1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * NET3 Protocol independent device support routines.
4 *
5 * Derived from the non IP parts of dev.c 1.0.19
6 * Authors: Ross Biro
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
9 *
10 * Additional Authors:
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
17 *
18 * Changes:
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
30 * drivers
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
40 * call a packet.
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
46 * changes.
47 * Rudi Cilibrasi : Pass the right thing to
48 * set_mac_address()
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
54 * 1 device.
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
62 * the backlog queue.
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
69 */
70
71 #include <linux/uaccess.h>
72 #include <linux/bitmap.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dsa.h>
103 #include <net/dst.h>
104 #include <net/dst_metadata.h>
105 #include <net/gro.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <net/tcx.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
126 #include <net/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <trace/events/qdisc.h>
136 #include <trace/events/xdp.h>
137 #include <linux/inetdevice.h>
138 #include <linux/cpu_rmap.h>
139 #include <linux/static_key.h>
140 #include <linux/hashtable.h>
141 #include <linux/vmalloc.h>
142 #include <linux/if_macvlan.h>
143 #include <linux/errqueue.h>
144 #include <linux/hrtimer.h>
145 #include <linux/netfilter_netdev.h>
146 #include <linux/crash_dump.h>
147 #include <linux/sctp.h>
148 #include <net/udp_tunnel.h>
149 #include <linux/net_namespace.h>
150 #include <linux/indirect_call_wrapper.h>
151 #include <net/devlink.h>
152 #include <linux/pm_runtime.h>
153 #include <linux/prandom.h>
154 #include <linux/once_lite.h>
155 #include <net/netdev_rx_queue.h>
156
157 #include "dev.h"
158 #include "net-sysfs.h"
159
160 static DEFINE_SPINLOCK(ptype_lock);
161 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
162 struct list_head ptype_all __read_mostly; /* Taps */
163
164 static int netif_rx_internal(struct sk_buff *skb);
165 static int call_netdevice_notifiers_extack(unsigned long val,
166 struct net_device *dev,
167 struct netlink_ext_ack *extack);
168 static struct napi_struct *napi_by_id(unsigned int napi_id);
169
170 /*
171 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
172 * semaphore.
173 *
174 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
175 *
176 * Writers must hold the rtnl semaphore while they loop through the
177 * dev_base_head list, and hold dev_base_lock for writing when they do the
178 * actual updates. This allows pure readers to access the list even
179 * while a writer is preparing to update it.
180 *
181 * To put it another way, dev_base_lock is held for writing only to
182 * protect against pure readers; the rtnl semaphore provides the
183 * protection against other writers.
184 *
185 * See, for example usages, register_netdevice() and
186 * unregister_netdevice(), which must be called with the rtnl
187 * semaphore held.
188 */
189 DEFINE_RWLOCK(dev_base_lock);
190 EXPORT_SYMBOL(dev_base_lock);
191
192 static DEFINE_MUTEX(ifalias_mutex);
193
194 /* protects napi_hash addition/deletion and napi_gen_id */
195 static DEFINE_SPINLOCK(napi_hash_lock);
196
197 static unsigned int napi_gen_id = NR_CPUS;
198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
199
200 static DECLARE_RWSEM(devnet_rename_sem);
201
dev_base_seq_inc(struct net * net)202 static inline void dev_base_seq_inc(struct net *net)
203 {
204 while (++net->dev_base_seq == 0)
205 ;
206 }
207
dev_name_hash(struct net * net,const char * name)208 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
209 {
210 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
211
212 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
213 }
214
dev_index_hash(struct net * net,int ifindex)215 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
216 {
217 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
218 }
219
rps_lock_irqsave(struct softnet_data * sd,unsigned long * flags)220 static inline void rps_lock_irqsave(struct softnet_data *sd,
221 unsigned long *flags)
222 {
223 if (IS_ENABLED(CONFIG_RPS))
224 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
225 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
226 local_irq_save(*flags);
227 }
228
rps_lock_irq_disable(struct softnet_data * sd)229 static inline void rps_lock_irq_disable(struct softnet_data *sd)
230 {
231 if (IS_ENABLED(CONFIG_RPS))
232 spin_lock_irq(&sd->input_pkt_queue.lock);
233 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
234 local_irq_disable();
235 }
236
rps_unlock_irq_restore(struct softnet_data * sd,unsigned long * flags)237 static inline void rps_unlock_irq_restore(struct softnet_data *sd,
238 unsigned long *flags)
239 {
240 if (IS_ENABLED(CONFIG_RPS))
241 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
242 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
243 local_irq_restore(*flags);
244 }
245
rps_unlock_irq_enable(struct softnet_data * sd)246 static inline void rps_unlock_irq_enable(struct softnet_data *sd)
247 {
248 if (IS_ENABLED(CONFIG_RPS))
249 spin_unlock_irq(&sd->input_pkt_queue.lock);
250 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
251 local_irq_enable();
252 }
253
netdev_name_node_alloc(struct net_device * dev,const char * name)254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
255 const char *name)
256 {
257 struct netdev_name_node *name_node;
258
259 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
260 if (!name_node)
261 return NULL;
262 INIT_HLIST_NODE(&name_node->hlist);
263 name_node->dev = dev;
264 name_node->name = name;
265 return name_node;
266 }
267
268 static struct netdev_name_node *
netdev_name_node_head_alloc(struct net_device * dev)269 netdev_name_node_head_alloc(struct net_device *dev)
270 {
271 struct netdev_name_node *name_node;
272
273 name_node = netdev_name_node_alloc(dev, dev->name);
274 if (!name_node)
275 return NULL;
276 INIT_LIST_HEAD(&name_node->list);
277 return name_node;
278 }
279
netdev_name_node_free(struct netdev_name_node * name_node)280 static void netdev_name_node_free(struct netdev_name_node *name_node)
281 {
282 kfree(name_node);
283 }
284
netdev_name_node_add(struct net * net,struct netdev_name_node * name_node)285 static void netdev_name_node_add(struct net *net,
286 struct netdev_name_node *name_node)
287 {
288 hlist_add_head_rcu(&name_node->hlist,
289 dev_name_hash(net, name_node->name));
290 }
291
netdev_name_node_del(struct netdev_name_node * name_node)292 static void netdev_name_node_del(struct netdev_name_node *name_node)
293 {
294 hlist_del_rcu(&name_node->hlist);
295 }
296
netdev_name_node_lookup(struct net * net,const char * name)297 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
298 const char *name)
299 {
300 struct hlist_head *head = dev_name_hash(net, name);
301 struct netdev_name_node *name_node;
302
303 hlist_for_each_entry(name_node, head, hlist)
304 if (!strcmp(name_node->name, name))
305 return name_node;
306 return NULL;
307 }
308
netdev_name_node_lookup_rcu(struct net * net,const char * name)309 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
310 const char *name)
311 {
312 struct hlist_head *head = dev_name_hash(net, name);
313 struct netdev_name_node *name_node;
314
315 hlist_for_each_entry_rcu(name_node, head, hlist)
316 if (!strcmp(name_node->name, name))
317 return name_node;
318 return NULL;
319 }
320
netdev_name_in_use(struct net * net,const char * name)321 bool netdev_name_in_use(struct net *net, const char *name)
322 {
323 return netdev_name_node_lookup(net, name);
324 }
325 EXPORT_SYMBOL(netdev_name_in_use);
326
netdev_name_node_alt_create(struct net_device * dev,const char * name)327 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
328 {
329 struct netdev_name_node *name_node;
330 struct net *net = dev_net(dev);
331
332 name_node = netdev_name_node_lookup(net, name);
333 if (name_node)
334 return -EEXIST;
335 name_node = netdev_name_node_alloc(dev, name);
336 if (!name_node)
337 return -ENOMEM;
338 netdev_name_node_add(net, name_node);
339 /* The node that holds dev->name acts as a head of per-device list. */
340 list_add_tail(&name_node->list, &dev->name_node->list);
341
342 return 0;
343 }
344
__netdev_name_node_alt_destroy(struct netdev_name_node * name_node)345 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
346 {
347 list_del(&name_node->list);
348 kfree(name_node->name);
349 netdev_name_node_free(name_node);
350 }
351
netdev_name_node_alt_destroy(struct net_device * dev,const char * name)352 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
353 {
354 struct netdev_name_node *name_node;
355 struct net *net = dev_net(dev);
356
357 name_node = netdev_name_node_lookup(net, name);
358 if (!name_node)
359 return -ENOENT;
360 /* lookup might have found our primary name or a name belonging
361 * to another device.
362 */
363 if (name_node == dev->name_node || name_node->dev != dev)
364 return -EINVAL;
365
366 netdev_name_node_del(name_node);
367 synchronize_rcu();
368 __netdev_name_node_alt_destroy(name_node);
369
370 return 0;
371 }
372
netdev_name_node_alt_flush(struct net_device * dev)373 static void netdev_name_node_alt_flush(struct net_device *dev)
374 {
375 struct netdev_name_node *name_node, *tmp;
376
377 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
378 __netdev_name_node_alt_destroy(name_node);
379 }
380
381 /* Device list insertion */
list_netdevice(struct net_device * dev)382 static void list_netdevice(struct net_device *dev)
383 {
384 struct netdev_name_node *name_node;
385 struct net *net = dev_net(dev);
386
387 ASSERT_RTNL();
388
389 write_lock(&dev_base_lock);
390 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
391 netdev_name_node_add(net, dev->name_node);
392 hlist_add_head_rcu(&dev->index_hlist,
393 dev_index_hash(net, dev->ifindex));
394 write_unlock(&dev_base_lock);
395
396 netdev_for_each_altname(dev, name_node)
397 netdev_name_node_add(net, name_node);
398
399 /* We reserved the ifindex, this can't fail */
400 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
401
402 dev_base_seq_inc(net);
403 }
404
405 /* Device list removal
406 * caller must respect a RCU grace period before freeing/reusing dev
407 */
unlist_netdevice(struct net_device * dev,bool lock)408 static void unlist_netdevice(struct net_device *dev, bool lock)
409 {
410 struct netdev_name_node *name_node;
411 struct net *net = dev_net(dev);
412
413 ASSERT_RTNL();
414
415 xa_erase(&net->dev_by_index, dev->ifindex);
416
417 netdev_for_each_altname(dev, name_node)
418 netdev_name_node_del(name_node);
419
420 /* Unlink dev from the device chain */
421 if (lock)
422 write_lock(&dev_base_lock);
423 list_del_rcu(&dev->dev_list);
424 netdev_name_node_del(dev->name_node);
425 hlist_del_rcu(&dev->index_hlist);
426 if (lock)
427 write_unlock(&dev_base_lock);
428
429 dev_base_seq_inc(dev_net(dev));
430 }
431
432 /*
433 * Our notifier list
434 */
435
436 static RAW_NOTIFIER_HEAD(netdev_chain);
437
438 /*
439 * Device drivers call our routines to queue packets here. We empty the
440 * queue in the local softnet handler.
441 */
442
443 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
444 EXPORT_PER_CPU_SYMBOL(softnet_data);
445
446 #ifdef CONFIG_LOCKDEP
447 /*
448 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
449 * according to dev->type
450 */
451 static const unsigned short netdev_lock_type[] = {
452 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
453 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
454 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
455 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
456 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
457 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
458 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
459 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
460 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
461 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
462 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
463 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
464 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
465 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
466 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
467
468 static const char *const netdev_lock_name[] = {
469 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
470 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
471 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
472 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
473 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
474 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
475 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
476 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
477 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
478 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
479 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
480 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
481 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
482 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
483 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
484
485 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
486 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
487
netdev_lock_pos(unsigned short dev_type)488 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
489 {
490 int i;
491
492 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
493 if (netdev_lock_type[i] == dev_type)
494 return i;
495 /* the last key is used by default */
496 return ARRAY_SIZE(netdev_lock_type) - 1;
497 }
498
netdev_set_xmit_lockdep_class(spinlock_t * lock,unsigned short dev_type)499 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
500 unsigned short dev_type)
501 {
502 int i;
503
504 i = netdev_lock_pos(dev_type);
505 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
506 netdev_lock_name[i]);
507 }
508
netdev_set_addr_lockdep_class(struct net_device * dev)509 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
510 {
511 int i;
512
513 i = netdev_lock_pos(dev->type);
514 lockdep_set_class_and_name(&dev->addr_list_lock,
515 &netdev_addr_lock_key[i],
516 netdev_lock_name[i]);
517 }
518 #else
netdev_set_xmit_lockdep_class(spinlock_t * lock,unsigned short dev_type)519 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
520 unsigned short dev_type)
521 {
522 }
523
netdev_set_addr_lockdep_class(struct net_device * dev)524 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
525 {
526 }
527 #endif
528
529 /*******************************************************************************
530 *
531 * Protocol management and registration routines
532 *
533 *******************************************************************************/
534
535
536 /*
537 * Add a protocol ID to the list. Now that the input handler is
538 * smarter we can dispense with all the messy stuff that used to be
539 * here.
540 *
541 * BEWARE!!! Protocol handlers, mangling input packets,
542 * MUST BE last in hash buckets and checking protocol handlers
543 * MUST start from promiscuous ptype_all chain in net_bh.
544 * It is true now, do not change it.
545 * Explanation follows: if protocol handler, mangling packet, will
546 * be the first on list, it is not able to sense, that packet
547 * is cloned and should be copied-on-write, so that it will
548 * change it and subsequent readers will get broken packet.
549 * --ANK (980803)
550 */
551
ptype_head(const struct packet_type * pt)552 static inline struct list_head *ptype_head(const struct packet_type *pt)
553 {
554 if (pt->type == htons(ETH_P_ALL))
555 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
556 else
557 return pt->dev ? &pt->dev->ptype_specific :
558 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
559 }
560
561 /**
562 * dev_add_pack - add packet handler
563 * @pt: packet type declaration
564 *
565 * Add a protocol handler to the networking stack. The passed &packet_type
566 * is linked into kernel lists and may not be freed until it has been
567 * removed from the kernel lists.
568 *
569 * This call does not sleep therefore it can not
570 * guarantee all CPU's that are in middle of receiving packets
571 * will see the new packet type (until the next received packet).
572 */
573
dev_add_pack(struct packet_type * pt)574 void dev_add_pack(struct packet_type *pt)
575 {
576 struct list_head *head = ptype_head(pt);
577
578 spin_lock(&ptype_lock);
579 list_add_rcu(&pt->list, head);
580 spin_unlock(&ptype_lock);
581 }
582 EXPORT_SYMBOL(dev_add_pack);
583
584 /**
585 * __dev_remove_pack - remove packet handler
586 * @pt: packet type declaration
587 *
588 * Remove a protocol handler that was previously added to the kernel
589 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
590 * from the kernel lists and can be freed or reused once this function
591 * returns.
592 *
593 * The packet type might still be in use by receivers
594 * and must not be freed until after all the CPU's have gone
595 * through a quiescent state.
596 */
__dev_remove_pack(struct packet_type * pt)597 void __dev_remove_pack(struct packet_type *pt)
598 {
599 struct list_head *head = ptype_head(pt);
600 struct packet_type *pt1;
601
602 spin_lock(&ptype_lock);
603
604 list_for_each_entry(pt1, head, list) {
605 if (pt == pt1) {
606 list_del_rcu(&pt->list);
607 goto out;
608 }
609 }
610
611 pr_warn("dev_remove_pack: %p not found\n", pt);
612 out:
613 spin_unlock(&ptype_lock);
614 }
615 EXPORT_SYMBOL(__dev_remove_pack);
616
617 /**
618 * dev_remove_pack - remove packet handler
619 * @pt: packet type declaration
620 *
621 * Remove a protocol handler that was previously added to the kernel
622 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
623 * from the kernel lists and can be freed or reused once this function
624 * returns.
625 *
626 * This call sleeps to guarantee that no CPU is looking at the packet
627 * type after return.
628 */
dev_remove_pack(struct packet_type * pt)629 void dev_remove_pack(struct packet_type *pt)
630 {
631 __dev_remove_pack(pt);
632
633 synchronize_net();
634 }
635 EXPORT_SYMBOL(dev_remove_pack);
636
637
638 /*******************************************************************************
639 *
640 * Device Interface Subroutines
641 *
642 *******************************************************************************/
643
644 /**
645 * dev_get_iflink - get 'iflink' value of a interface
646 * @dev: targeted interface
647 *
648 * Indicates the ifindex the interface is linked to.
649 * Physical interfaces have the same 'ifindex' and 'iflink' values.
650 */
651
dev_get_iflink(const struct net_device * dev)652 int dev_get_iflink(const struct net_device *dev)
653 {
654 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
655 return dev->netdev_ops->ndo_get_iflink(dev);
656
657 return dev->ifindex;
658 }
659 EXPORT_SYMBOL(dev_get_iflink);
660
661 /**
662 * dev_fill_metadata_dst - Retrieve tunnel egress information.
663 * @dev: targeted interface
664 * @skb: The packet.
665 *
666 * For better visibility of tunnel traffic OVS needs to retrieve
667 * egress tunnel information for a packet. Following API allows
668 * user to get this info.
669 */
dev_fill_metadata_dst(struct net_device * dev,struct sk_buff * skb)670 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
671 {
672 struct ip_tunnel_info *info;
673
674 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
675 return -EINVAL;
676
677 info = skb_tunnel_info_unclone(skb);
678 if (!info)
679 return -ENOMEM;
680 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
681 return -EINVAL;
682
683 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
684 }
685 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
686
dev_fwd_path(struct net_device_path_stack * stack)687 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
688 {
689 int k = stack->num_paths++;
690
691 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
692 return NULL;
693
694 return &stack->path[k];
695 }
696
dev_fill_forward_path(const struct net_device * dev,const u8 * daddr,struct net_device_path_stack * stack)697 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
698 struct net_device_path_stack *stack)
699 {
700 const struct net_device *last_dev;
701 struct net_device_path_ctx ctx = {
702 .dev = dev,
703 };
704 struct net_device_path *path;
705 int ret = 0;
706
707 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
708 stack->num_paths = 0;
709 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
710 last_dev = ctx.dev;
711 path = dev_fwd_path(stack);
712 if (!path)
713 return -1;
714
715 memset(path, 0, sizeof(struct net_device_path));
716 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
717 if (ret < 0)
718 return -1;
719
720 if (WARN_ON_ONCE(last_dev == ctx.dev))
721 return -1;
722 }
723
724 if (!ctx.dev)
725 return ret;
726
727 path = dev_fwd_path(stack);
728 if (!path)
729 return -1;
730 path->type = DEV_PATH_ETHERNET;
731 path->dev = ctx.dev;
732
733 return ret;
734 }
735 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
736
737 /**
738 * __dev_get_by_name - find a device by its name
739 * @net: the applicable net namespace
740 * @name: name to find
741 *
742 * Find an interface by name. Must be called under RTNL semaphore
743 * or @dev_base_lock. If the name is found a pointer to the device
744 * is returned. If the name is not found then %NULL is returned. The
745 * reference counters are not incremented so the caller must be
746 * careful with locks.
747 */
748
__dev_get_by_name(struct net * net,const char * name)749 struct net_device *__dev_get_by_name(struct net *net, const char *name)
750 {
751 struct netdev_name_node *node_name;
752
753 node_name = netdev_name_node_lookup(net, name);
754 return node_name ? node_name->dev : NULL;
755 }
756 EXPORT_SYMBOL(__dev_get_by_name);
757
758 /**
759 * dev_get_by_name_rcu - find a device by its name
760 * @net: the applicable net namespace
761 * @name: name to find
762 *
763 * Find an interface by name.
764 * If the name is found a pointer to the device is returned.
765 * If the name is not found then %NULL is returned.
766 * The reference counters are not incremented so the caller must be
767 * careful with locks. The caller must hold RCU lock.
768 */
769
dev_get_by_name_rcu(struct net * net,const char * name)770 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
771 {
772 struct netdev_name_node *node_name;
773
774 node_name = netdev_name_node_lookup_rcu(net, name);
775 return node_name ? node_name->dev : NULL;
776 }
777 EXPORT_SYMBOL(dev_get_by_name_rcu);
778
779 /* Deprecated for new users, call netdev_get_by_name() instead */
dev_get_by_name(struct net * net,const char * name)780 struct net_device *dev_get_by_name(struct net *net, const char *name)
781 {
782 struct net_device *dev;
783
784 rcu_read_lock();
785 dev = dev_get_by_name_rcu(net, name);
786 dev_hold(dev);
787 rcu_read_unlock();
788 return dev;
789 }
790 EXPORT_SYMBOL(dev_get_by_name);
791
792 /**
793 * netdev_get_by_name() - find a device by its name
794 * @net: the applicable net namespace
795 * @name: name to find
796 * @tracker: tracking object for the acquired reference
797 * @gfp: allocation flags for the tracker
798 *
799 * Find an interface by name. This can be called from any
800 * context and does its own locking. The returned handle has
801 * the usage count incremented and the caller must use netdev_put() to
802 * release it when it is no longer needed. %NULL is returned if no
803 * matching device is found.
804 */
netdev_get_by_name(struct net * net,const char * name,netdevice_tracker * tracker,gfp_t gfp)805 struct net_device *netdev_get_by_name(struct net *net, const char *name,
806 netdevice_tracker *tracker, gfp_t gfp)
807 {
808 struct net_device *dev;
809
810 dev = dev_get_by_name(net, name);
811 if (dev)
812 netdev_tracker_alloc(dev, tracker, gfp);
813 return dev;
814 }
815 EXPORT_SYMBOL(netdev_get_by_name);
816
817 /**
818 * __dev_get_by_index - find a device by its ifindex
819 * @net: the applicable net namespace
820 * @ifindex: index of device
821 *
822 * Search for an interface by index. Returns %NULL if the device
823 * is not found or a pointer to the device. The device has not
824 * had its reference counter increased so the caller must be careful
825 * about locking. The caller must hold either the RTNL semaphore
826 * or @dev_base_lock.
827 */
828
__dev_get_by_index(struct net * net,int ifindex)829 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
830 {
831 struct net_device *dev;
832 struct hlist_head *head = dev_index_hash(net, ifindex);
833
834 hlist_for_each_entry(dev, head, index_hlist)
835 if (dev->ifindex == ifindex)
836 return dev;
837
838 return NULL;
839 }
840 EXPORT_SYMBOL(__dev_get_by_index);
841
842 /**
843 * dev_get_by_index_rcu - find a device by its ifindex
844 * @net: the applicable net namespace
845 * @ifindex: index of device
846 *
847 * Search for an interface by index. Returns %NULL if the device
848 * is not found or a pointer to the device. The device has not
849 * had its reference counter increased so the caller must be careful
850 * about locking. The caller must hold RCU lock.
851 */
852
dev_get_by_index_rcu(struct net * net,int ifindex)853 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
854 {
855 struct net_device *dev;
856 struct hlist_head *head = dev_index_hash(net, ifindex);
857
858 hlist_for_each_entry_rcu(dev, head, index_hlist)
859 if (dev->ifindex == ifindex)
860 return dev;
861
862 return NULL;
863 }
864 EXPORT_SYMBOL(dev_get_by_index_rcu);
865
866 /* Deprecated for new users, call netdev_get_by_index() instead */
dev_get_by_index(struct net * net,int ifindex)867 struct net_device *dev_get_by_index(struct net *net, int ifindex)
868 {
869 struct net_device *dev;
870
871 rcu_read_lock();
872 dev = dev_get_by_index_rcu(net, ifindex);
873 dev_hold(dev);
874 rcu_read_unlock();
875 return dev;
876 }
877 EXPORT_SYMBOL(dev_get_by_index);
878
879 /**
880 * netdev_get_by_index() - find a device by its ifindex
881 * @net: the applicable net namespace
882 * @ifindex: index of device
883 * @tracker: tracking object for the acquired reference
884 * @gfp: allocation flags for the tracker
885 *
886 * Search for an interface by index. Returns NULL if the device
887 * is not found or a pointer to the device. The device returned has
888 * had a reference added and the pointer is safe until the user calls
889 * netdev_put() to indicate they have finished with it.
890 */
netdev_get_by_index(struct net * net,int ifindex,netdevice_tracker * tracker,gfp_t gfp)891 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
892 netdevice_tracker *tracker, gfp_t gfp)
893 {
894 struct net_device *dev;
895
896 dev = dev_get_by_index(net, ifindex);
897 if (dev)
898 netdev_tracker_alloc(dev, tracker, gfp);
899 return dev;
900 }
901 EXPORT_SYMBOL(netdev_get_by_index);
902
903 /**
904 * dev_get_by_napi_id - find a device by napi_id
905 * @napi_id: ID of the NAPI struct
906 *
907 * Search for an interface by NAPI ID. Returns %NULL if the device
908 * is not found or a pointer to the device. The device has not had
909 * its reference counter increased so the caller must be careful
910 * about locking. The caller must hold RCU lock.
911 */
912
dev_get_by_napi_id(unsigned int napi_id)913 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
914 {
915 struct napi_struct *napi;
916
917 WARN_ON_ONCE(!rcu_read_lock_held());
918
919 if (napi_id < MIN_NAPI_ID)
920 return NULL;
921
922 napi = napi_by_id(napi_id);
923
924 return napi ? napi->dev : NULL;
925 }
926 EXPORT_SYMBOL(dev_get_by_napi_id);
927
928 /**
929 * netdev_get_name - get a netdevice name, knowing its ifindex.
930 * @net: network namespace
931 * @name: a pointer to the buffer where the name will be stored.
932 * @ifindex: the ifindex of the interface to get the name from.
933 */
netdev_get_name(struct net * net,char * name,int ifindex)934 int netdev_get_name(struct net *net, char *name, int ifindex)
935 {
936 struct net_device *dev;
937 int ret;
938
939 down_read(&devnet_rename_sem);
940 rcu_read_lock();
941
942 dev = dev_get_by_index_rcu(net, ifindex);
943 if (!dev) {
944 ret = -ENODEV;
945 goto out;
946 }
947
948 strcpy(name, dev->name);
949
950 ret = 0;
951 out:
952 rcu_read_unlock();
953 up_read(&devnet_rename_sem);
954 return ret;
955 }
956
957 /**
958 * dev_getbyhwaddr_rcu - find a device by its hardware address
959 * @net: the applicable net namespace
960 * @type: media type of device
961 * @ha: hardware address
962 *
963 * Search for an interface by MAC address. Returns NULL if the device
964 * is not found or a pointer to the device.
965 * The caller must hold RCU or RTNL.
966 * The returned device has not had its ref count increased
967 * and the caller must therefore be careful about locking
968 *
969 */
970
dev_getbyhwaddr_rcu(struct net * net,unsigned short type,const char * ha)971 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
972 const char *ha)
973 {
974 struct net_device *dev;
975
976 for_each_netdev_rcu(net, dev)
977 if (dev->type == type &&
978 !memcmp(dev->dev_addr, ha, dev->addr_len))
979 return dev;
980
981 return NULL;
982 }
983 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
984
dev_getfirstbyhwtype(struct net * net,unsigned short type)985 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
986 {
987 struct net_device *dev, *ret = NULL;
988
989 rcu_read_lock();
990 for_each_netdev_rcu(net, dev)
991 if (dev->type == type) {
992 dev_hold(dev);
993 ret = dev;
994 break;
995 }
996 rcu_read_unlock();
997 return ret;
998 }
999 EXPORT_SYMBOL(dev_getfirstbyhwtype);
1000
1001 /**
1002 * __dev_get_by_flags - find any device with given flags
1003 * @net: the applicable net namespace
1004 * @if_flags: IFF_* values
1005 * @mask: bitmask of bits in if_flags to check
1006 *
1007 * Search for any interface with the given flags. Returns NULL if a device
1008 * is not found or a pointer to the device. Must be called inside
1009 * rtnl_lock(), and result refcount is unchanged.
1010 */
1011
__dev_get_by_flags(struct net * net,unsigned short if_flags,unsigned short mask)1012 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1013 unsigned short mask)
1014 {
1015 struct net_device *dev, *ret;
1016
1017 ASSERT_RTNL();
1018
1019 ret = NULL;
1020 for_each_netdev(net, dev) {
1021 if (((dev->flags ^ if_flags) & mask) == 0) {
1022 ret = dev;
1023 break;
1024 }
1025 }
1026 return ret;
1027 }
1028 EXPORT_SYMBOL(__dev_get_by_flags);
1029
1030 /**
1031 * dev_valid_name - check if name is okay for network device
1032 * @name: name string
1033 *
1034 * Network device names need to be valid file names to
1035 * allow sysfs to work. We also disallow any kind of
1036 * whitespace.
1037 */
dev_valid_name(const char * name)1038 bool dev_valid_name(const char *name)
1039 {
1040 if (*name == '\0')
1041 return false;
1042 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1043 return false;
1044 if (!strcmp(name, ".") || !strcmp(name, ".."))
1045 return false;
1046
1047 while (*name) {
1048 if (*name == '/' || *name == ':' || isspace(*name))
1049 return false;
1050 name++;
1051 }
1052 return true;
1053 }
1054 EXPORT_SYMBOL(dev_valid_name);
1055
1056 /**
1057 * __dev_alloc_name - allocate a name for a device
1058 * @net: network namespace to allocate the device name in
1059 * @name: name format string
1060 * @buf: scratch buffer and result name string
1061 *
1062 * Passed a format string - eg "lt%d" it will try and find a suitable
1063 * id. It scans list of devices to build up a free map, then chooses
1064 * the first empty slot. The caller must hold the dev_base or rtnl lock
1065 * while allocating the name and adding the device in order to avoid
1066 * duplicates.
1067 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1068 * Returns the number of the unit assigned or a negative errno code.
1069 */
1070
__dev_alloc_name(struct net * net,const char * name,char * buf)1071 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1072 {
1073 int i = 0;
1074 const char *p;
1075 const int max_netdevices = 8*PAGE_SIZE;
1076 unsigned long *inuse;
1077 struct net_device *d;
1078
1079 if (!dev_valid_name(name))
1080 return -EINVAL;
1081
1082 p = strchr(name, '%');
1083 if (p) {
1084 /*
1085 * Verify the string as this thing may have come from
1086 * the user. There must be either one "%d" and no other "%"
1087 * characters.
1088 */
1089 if (p[1] != 'd' || strchr(p + 2, '%'))
1090 return -EINVAL;
1091
1092 /* Use one page as a bit array of possible slots */
1093 inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
1094 if (!inuse)
1095 return -ENOMEM;
1096
1097 for_each_netdev(net, d) {
1098 struct netdev_name_node *name_node;
1099
1100 netdev_for_each_altname(d, name_node) {
1101 if (!sscanf(name_node->name, name, &i))
1102 continue;
1103 if (i < 0 || i >= max_netdevices)
1104 continue;
1105
1106 /* avoid cases where sscanf is not exact inverse of printf */
1107 snprintf(buf, IFNAMSIZ, name, i);
1108 if (!strncmp(buf, name_node->name, IFNAMSIZ))
1109 __set_bit(i, inuse);
1110 }
1111 if (!sscanf(d->name, name, &i))
1112 continue;
1113 if (i < 0 || i >= max_netdevices)
1114 continue;
1115
1116 /* avoid cases where sscanf is not exact inverse of printf */
1117 snprintf(buf, IFNAMSIZ, name, i);
1118 if (!strncmp(buf, d->name, IFNAMSIZ))
1119 __set_bit(i, inuse);
1120 }
1121
1122 i = find_first_zero_bit(inuse, max_netdevices);
1123 bitmap_free(inuse);
1124 }
1125
1126 snprintf(buf, IFNAMSIZ, name, i);
1127 if (!netdev_name_in_use(net, buf))
1128 return i;
1129
1130 /* It is possible to run out of possible slots
1131 * when the name is long and there isn't enough space left
1132 * for the digits, or if all bits are used.
1133 */
1134 return -ENFILE;
1135 }
1136
dev_prep_valid_name(struct net * net,struct net_device * dev,const char * want_name,char * out_name)1137 static int dev_prep_valid_name(struct net *net, struct net_device *dev,
1138 const char *want_name, char *out_name)
1139 {
1140 int ret;
1141
1142 if (!dev_valid_name(want_name))
1143 return -EINVAL;
1144
1145 if (strchr(want_name, '%')) {
1146 ret = __dev_alloc_name(net, want_name, out_name);
1147 return ret < 0 ? ret : 0;
1148 } else if (netdev_name_in_use(net, want_name)) {
1149 return -EEXIST;
1150 } else if (out_name != want_name) {
1151 strscpy(out_name, want_name, IFNAMSIZ);
1152 }
1153
1154 return 0;
1155 }
1156
dev_alloc_name_ns(struct net * net,struct net_device * dev,const char * name)1157 static int dev_alloc_name_ns(struct net *net,
1158 struct net_device *dev,
1159 const char *name)
1160 {
1161 char buf[IFNAMSIZ];
1162 int ret;
1163
1164 BUG_ON(!net);
1165 ret = __dev_alloc_name(net, name, buf);
1166 if (ret >= 0)
1167 strscpy(dev->name, buf, IFNAMSIZ);
1168 return ret;
1169 }
1170
1171 /**
1172 * dev_alloc_name - allocate a name for a device
1173 * @dev: device
1174 * @name: name format string
1175 *
1176 * Passed a format string - eg "lt%d" it will try and find a suitable
1177 * id. It scans list of devices to build up a free map, then chooses
1178 * the first empty slot. The caller must hold the dev_base or rtnl lock
1179 * while allocating the name and adding the device in order to avoid
1180 * duplicates.
1181 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1182 * Returns the number of the unit assigned or a negative errno code.
1183 */
1184
dev_alloc_name(struct net_device * dev,const char * name)1185 int dev_alloc_name(struct net_device *dev, const char *name)
1186 {
1187 return dev_alloc_name_ns(dev_net(dev), dev, name);
1188 }
1189 EXPORT_SYMBOL(dev_alloc_name);
1190
dev_get_valid_name(struct net * net,struct net_device * dev,const char * name)1191 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1192 const char *name)
1193 {
1194 char buf[IFNAMSIZ];
1195 int ret;
1196
1197 ret = dev_prep_valid_name(net, dev, name, buf);
1198 if (ret >= 0)
1199 strscpy(dev->name, buf, IFNAMSIZ);
1200 return ret;
1201 }
1202
1203 /**
1204 * dev_change_name - change name of a device
1205 * @dev: device
1206 * @newname: name (or format string) must be at least IFNAMSIZ
1207 *
1208 * Change name of a device, can pass format strings "eth%d".
1209 * for wildcarding.
1210 */
dev_change_name(struct net_device * dev,const char * newname)1211 int dev_change_name(struct net_device *dev, const char *newname)
1212 {
1213 unsigned char old_assign_type;
1214 char oldname[IFNAMSIZ];
1215 int err = 0;
1216 int ret;
1217 struct net *net;
1218
1219 ASSERT_RTNL();
1220 BUG_ON(!dev_net(dev));
1221
1222 net = dev_net(dev);
1223
1224 down_write(&devnet_rename_sem);
1225
1226 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1227 up_write(&devnet_rename_sem);
1228 return 0;
1229 }
1230
1231 memcpy(oldname, dev->name, IFNAMSIZ);
1232
1233 err = dev_get_valid_name(net, dev, newname);
1234 if (err < 0) {
1235 up_write(&devnet_rename_sem);
1236 return err;
1237 }
1238
1239 if (oldname[0] && !strchr(oldname, '%'))
1240 netdev_info(dev, "renamed from %s%s\n", oldname,
1241 dev->flags & IFF_UP ? " (while UP)" : "");
1242
1243 old_assign_type = dev->name_assign_type;
1244 dev->name_assign_type = NET_NAME_RENAMED;
1245
1246 rollback:
1247 ret = device_rename(&dev->dev, dev->name);
1248 if (ret) {
1249 memcpy(dev->name, oldname, IFNAMSIZ);
1250 dev->name_assign_type = old_assign_type;
1251 up_write(&devnet_rename_sem);
1252 return ret;
1253 }
1254
1255 up_write(&devnet_rename_sem);
1256
1257 netdev_adjacent_rename_links(dev, oldname);
1258
1259 write_lock(&dev_base_lock);
1260 netdev_name_node_del(dev->name_node);
1261 write_unlock(&dev_base_lock);
1262
1263 synchronize_rcu();
1264
1265 write_lock(&dev_base_lock);
1266 netdev_name_node_add(net, dev->name_node);
1267 write_unlock(&dev_base_lock);
1268
1269 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1270 ret = notifier_to_errno(ret);
1271
1272 if (ret) {
1273 /* err >= 0 after dev_alloc_name() or stores the first errno */
1274 if (err >= 0) {
1275 err = ret;
1276 down_write(&devnet_rename_sem);
1277 memcpy(dev->name, oldname, IFNAMSIZ);
1278 memcpy(oldname, newname, IFNAMSIZ);
1279 dev->name_assign_type = old_assign_type;
1280 old_assign_type = NET_NAME_RENAMED;
1281 goto rollback;
1282 } else {
1283 netdev_err(dev, "name change rollback failed: %d\n",
1284 ret);
1285 }
1286 }
1287
1288 return err;
1289 }
1290
1291 /**
1292 * dev_set_alias - change ifalias of a device
1293 * @dev: device
1294 * @alias: name up to IFALIASZ
1295 * @len: limit of bytes to copy from info
1296 *
1297 * Set ifalias for a device,
1298 */
dev_set_alias(struct net_device * dev,const char * alias,size_t len)1299 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1300 {
1301 struct dev_ifalias *new_alias = NULL;
1302
1303 if (len >= IFALIASZ)
1304 return -EINVAL;
1305
1306 if (len) {
1307 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1308 if (!new_alias)
1309 return -ENOMEM;
1310
1311 memcpy(new_alias->ifalias, alias, len);
1312 new_alias->ifalias[len] = 0;
1313 }
1314
1315 mutex_lock(&ifalias_mutex);
1316 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1317 mutex_is_locked(&ifalias_mutex));
1318 mutex_unlock(&ifalias_mutex);
1319
1320 if (new_alias)
1321 kfree_rcu(new_alias, rcuhead);
1322
1323 return len;
1324 }
1325 EXPORT_SYMBOL(dev_set_alias);
1326
1327 /**
1328 * dev_get_alias - get ifalias of a device
1329 * @dev: device
1330 * @name: buffer to store name of ifalias
1331 * @len: size of buffer
1332 *
1333 * get ifalias for a device. Caller must make sure dev cannot go
1334 * away, e.g. rcu read lock or own a reference count to device.
1335 */
dev_get_alias(const struct net_device * dev,char * name,size_t len)1336 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1337 {
1338 const struct dev_ifalias *alias;
1339 int ret = 0;
1340
1341 rcu_read_lock();
1342 alias = rcu_dereference(dev->ifalias);
1343 if (alias)
1344 ret = snprintf(name, len, "%s", alias->ifalias);
1345 rcu_read_unlock();
1346
1347 return ret;
1348 }
1349
1350 /**
1351 * netdev_features_change - device changes features
1352 * @dev: device to cause notification
1353 *
1354 * Called to indicate a device has changed features.
1355 */
netdev_features_change(struct net_device * dev)1356 void netdev_features_change(struct net_device *dev)
1357 {
1358 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1359 }
1360 EXPORT_SYMBOL(netdev_features_change);
1361
1362 /**
1363 * netdev_state_change - device changes state
1364 * @dev: device to cause notification
1365 *
1366 * Called to indicate a device has changed state. This function calls
1367 * the notifier chains for netdev_chain and sends a NEWLINK message
1368 * to the routing socket.
1369 */
netdev_state_change(struct net_device * dev)1370 void netdev_state_change(struct net_device *dev)
1371 {
1372 if (dev->flags & IFF_UP) {
1373 struct netdev_notifier_change_info change_info = {
1374 .info.dev = dev,
1375 };
1376
1377 call_netdevice_notifiers_info(NETDEV_CHANGE,
1378 &change_info.info);
1379 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
1380 }
1381 }
1382 EXPORT_SYMBOL(netdev_state_change);
1383
1384 /**
1385 * __netdev_notify_peers - notify network peers about existence of @dev,
1386 * to be called when rtnl lock is already held.
1387 * @dev: network device
1388 *
1389 * Generate traffic such that interested network peers are aware of
1390 * @dev, such as by generating a gratuitous ARP. This may be used when
1391 * a device wants to inform the rest of the network about some sort of
1392 * reconfiguration such as a failover event or virtual machine
1393 * migration.
1394 */
__netdev_notify_peers(struct net_device * dev)1395 void __netdev_notify_peers(struct net_device *dev)
1396 {
1397 ASSERT_RTNL();
1398 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1399 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1400 }
1401 EXPORT_SYMBOL(__netdev_notify_peers);
1402
1403 /**
1404 * netdev_notify_peers - notify network peers about existence of @dev
1405 * @dev: network device
1406 *
1407 * Generate traffic such that interested network peers are aware of
1408 * @dev, such as by generating a gratuitous ARP. This may be used when
1409 * a device wants to inform the rest of the network about some sort of
1410 * reconfiguration such as a failover event or virtual machine
1411 * migration.
1412 */
netdev_notify_peers(struct net_device * dev)1413 void netdev_notify_peers(struct net_device *dev)
1414 {
1415 rtnl_lock();
1416 __netdev_notify_peers(dev);
1417 rtnl_unlock();
1418 }
1419 EXPORT_SYMBOL(netdev_notify_peers);
1420
1421 static int napi_threaded_poll(void *data);
1422
napi_kthread_create(struct napi_struct * n)1423 static int napi_kthread_create(struct napi_struct *n)
1424 {
1425 int err = 0;
1426
1427 /* Create and wake up the kthread once to put it in
1428 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1429 * warning and work with loadavg.
1430 */
1431 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1432 n->dev->name, n->napi_id);
1433 if (IS_ERR(n->thread)) {
1434 err = PTR_ERR(n->thread);
1435 pr_err("kthread_run failed with err %d\n", err);
1436 n->thread = NULL;
1437 }
1438
1439 return err;
1440 }
1441
__dev_open(struct net_device * dev,struct netlink_ext_ack * extack)1442 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1443 {
1444 const struct net_device_ops *ops = dev->netdev_ops;
1445 int ret;
1446
1447 ASSERT_RTNL();
1448 dev_addr_check(dev);
1449
1450 if (!netif_device_present(dev)) {
1451 /* may be detached because parent is runtime-suspended */
1452 if (dev->dev.parent)
1453 pm_runtime_resume(dev->dev.parent);
1454 if (!netif_device_present(dev))
1455 return -ENODEV;
1456 }
1457
1458 /* Block netpoll from trying to do any rx path servicing.
1459 * If we don't do this there is a chance ndo_poll_controller
1460 * or ndo_poll may be running while we open the device
1461 */
1462 netpoll_poll_disable(dev);
1463
1464 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1465 ret = notifier_to_errno(ret);
1466 if (ret)
1467 return ret;
1468
1469 set_bit(__LINK_STATE_START, &dev->state);
1470
1471 if (ops->ndo_validate_addr)
1472 ret = ops->ndo_validate_addr(dev);
1473
1474 if (!ret && ops->ndo_open)
1475 ret = ops->ndo_open(dev);
1476
1477 netpoll_poll_enable(dev);
1478
1479 if (ret)
1480 clear_bit(__LINK_STATE_START, &dev->state);
1481 else {
1482 dev->flags |= IFF_UP;
1483 dev_set_rx_mode(dev);
1484 dev_activate(dev);
1485 add_device_randomness(dev->dev_addr, dev->addr_len);
1486 }
1487
1488 return ret;
1489 }
1490
1491 /**
1492 * dev_open - prepare an interface for use.
1493 * @dev: device to open
1494 * @extack: netlink extended ack
1495 *
1496 * Takes a device from down to up state. The device's private open
1497 * function is invoked and then the multicast lists are loaded. Finally
1498 * the device is moved into the up state and a %NETDEV_UP message is
1499 * sent to the netdev notifier chain.
1500 *
1501 * Calling this function on an active interface is a nop. On a failure
1502 * a negative errno code is returned.
1503 */
dev_open(struct net_device * dev,struct netlink_ext_ack * extack)1504 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1505 {
1506 int ret;
1507
1508 if (dev->flags & IFF_UP)
1509 return 0;
1510
1511 ret = __dev_open(dev, extack);
1512 if (ret < 0)
1513 return ret;
1514
1515 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1516 call_netdevice_notifiers(NETDEV_UP, dev);
1517
1518 return ret;
1519 }
1520 EXPORT_SYMBOL(dev_open);
1521
__dev_close_many(struct list_head * head)1522 static void __dev_close_many(struct list_head *head)
1523 {
1524 struct net_device *dev;
1525
1526 ASSERT_RTNL();
1527 might_sleep();
1528
1529 list_for_each_entry(dev, head, close_list) {
1530 /* Temporarily disable netpoll until the interface is down */
1531 netpoll_poll_disable(dev);
1532
1533 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1534
1535 clear_bit(__LINK_STATE_START, &dev->state);
1536
1537 /* Synchronize to scheduled poll. We cannot touch poll list, it
1538 * can be even on different cpu. So just clear netif_running().
1539 *
1540 * dev->stop() will invoke napi_disable() on all of it's
1541 * napi_struct instances on this device.
1542 */
1543 smp_mb__after_atomic(); /* Commit netif_running(). */
1544 }
1545
1546 dev_deactivate_many(head);
1547
1548 list_for_each_entry(dev, head, close_list) {
1549 const struct net_device_ops *ops = dev->netdev_ops;
1550
1551 /*
1552 * Call the device specific close. This cannot fail.
1553 * Only if device is UP
1554 *
1555 * We allow it to be called even after a DETACH hot-plug
1556 * event.
1557 */
1558 if (ops->ndo_stop)
1559 ops->ndo_stop(dev);
1560
1561 dev->flags &= ~IFF_UP;
1562 netpoll_poll_enable(dev);
1563 }
1564 }
1565
__dev_close(struct net_device * dev)1566 static void __dev_close(struct net_device *dev)
1567 {
1568 LIST_HEAD(single);
1569
1570 list_add(&dev->close_list, &single);
1571 __dev_close_many(&single);
1572 list_del(&single);
1573 }
1574
dev_close_many(struct list_head * head,bool unlink)1575 void dev_close_many(struct list_head *head, bool unlink)
1576 {
1577 struct net_device *dev, *tmp;
1578
1579 /* Remove the devices that don't need to be closed */
1580 list_for_each_entry_safe(dev, tmp, head, close_list)
1581 if (!(dev->flags & IFF_UP))
1582 list_del_init(&dev->close_list);
1583
1584 __dev_close_many(head);
1585
1586 list_for_each_entry_safe(dev, tmp, head, close_list) {
1587 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1588 call_netdevice_notifiers(NETDEV_DOWN, dev);
1589 if (unlink)
1590 list_del_init(&dev->close_list);
1591 }
1592 }
1593 EXPORT_SYMBOL(dev_close_many);
1594
1595 /**
1596 * dev_close - shutdown an interface.
1597 * @dev: device to shutdown
1598 *
1599 * This function moves an active device into down state. A
1600 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1601 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1602 * chain.
1603 */
dev_close(struct net_device * dev)1604 void dev_close(struct net_device *dev)
1605 {
1606 if (dev->flags & IFF_UP) {
1607 LIST_HEAD(single);
1608
1609 list_add(&dev->close_list, &single);
1610 dev_close_many(&single, true);
1611 list_del(&single);
1612 }
1613 }
1614 EXPORT_SYMBOL(dev_close);
1615
1616
1617 /**
1618 * dev_disable_lro - disable Large Receive Offload on a device
1619 * @dev: device
1620 *
1621 * Disable Large Receive Offload (LRO) on a net device. Must be
1622 * called under RTNL. This is needed if received packets may be
1623 * forwarded to another interface.
1624 */
dev_disable_lro(struct net_device * dev)1625 void dev_disable_lro(struct net_device *dev)
1626 {
1627 struct net_device *lower_dev;
1628 struct list_head *iter;
1629
1630 dev->wanted_features &= ~NETIF_F_LRO;
1631 netdev_update_features(dev);
1632
1633 if (unlikely(dev->features & NETIF_F_LRO))
1634 netdev_WARN(dev, "failed to disable LRO!\n");
1635
1636 netdev_for_each_lower_dev(dev, lower_dev, iter)
1637 dev_disable_lro(lower_dev);
1638 }
1639 EXPORT_SYMBOL(dev_disable_lro);
1640
1641 /**
1642 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1643 * @dev: device
1644 *
1645 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1646 * called under RTNL. This is needed if Generic XDP is installed on
1647 * the device.
1648 */
dev_disable_gro_hw(struct net_device * dev)1649 static void dev_disable_gro_hw(struct net_device *dev)
1650 {
1651 dev->wanted_features &= ~NETIF_F_GRO_HW;
1652 netdev_update_features(dev);
1653
1654 if (unlikely(dev->features & NETIF_F_GRO_HW))
1655 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1656 }
1657
netdev_cmd_to_name(enum netdev_cmd cmd)1658 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1659 {
1660 #define N(val) \
1661 case NETDEV_##val: \
1662 return "NETDEV_" __stringify(val);
1663 switch (cmd) {
1664 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1665 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1666 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1667 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
1668 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
1669 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
1670 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1671 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1672 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1673 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1674 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1675 N(XDP_FEAT_CHANGE)
1676 }
1677 #undef N
1678 return "UNKNOWN_NETDEV_EVENT";
1679 }
1680 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1681
call_netdevice_notifier(struct notifier_block * nb,unsigned long val,struct net_device * dev)1682 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1683 struct net_device *dev)
1684 {
1685 struct netdev_notifier_info info = {
1686 .dev = dev,
1687 };
1688
1689 return nb->notifier_call(nb, val, &info);
1690 }
1691
call_netdevice_register_notifiers(struct notifier_block * nb,struct net_device * dev)1692 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1693 struct net_device *dev)
1694 {
1695 int err;
1696
1697 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1698 err = notifier_to_errno(err);
1699 if (err)
1700 return err;
1701
1702 if (!(dev->flags & IFF_UP))
1703 return 0;
1704
1705 call_netdevice_notifier(nb, NETDEV_UP, dev);
1706 return 0;
1707 }
1708
call_netdevice_unregister_notifiers(struct notifier_block * nb,struct net_device * dev)1709 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1710 struct net_device *dev)
1711 {
1712 if (dev->flags & IFF_UP) {
1713 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1714 dev);
1715 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1716 }
1717 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1718 }
1719
call_netdevice_register_net_notifiers(struct notifier_block * nb,struct net * net)1720 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1721 struct net *net)
1722 {
1723 struct net_device *dev;
1724 int err;
1725
1726 for_each_netdev(net, dev) {
1727 err = call_netdevice_register_notifiers(nb, dev);
1728 if (err)
1729 goto rollback;
1730 }
1731 return 0;
1732
1733 rollback:
1734 for_each_netdev_continue_reverse(net, dev)
1735 call_netdevice_unregister_notifiers(nb, dev);
1736 return err;
1737 }
1738
call_netdevice_unregister_net_notifiers(struct notifier_block * nb,struct net * net)1739 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1740 struct net *net)
1741 {
1742 struct net_device *dev;
1743
1744 for_each_netdev(net, dev)
1745 call_netdevice_unregister_notifiers(nb, dev);
1746 }
1747
1748 static int dev_boot_phase = 1;
1749
1750 /**
1751 * register_netdevice_notifier - register a network notifier block
1752 * @nb: notifier
1753 *
1754 * Register a notifier to be called when network device events occur.
1755 * The notifier passed is linked into the kernel structures and must
1756 * not be reused until it has been unregistered. A negative errno code
1757 * is returned on a failure.
1758 *
1759 * When registered all registration and up events are replayed
1760 * to the new notifier to allow device to have a race free
1761 * view of the network device list.
1762 */
1763
register_netdevice_notifier(struct notifier_block * nb)1764 int register_netdevice_notifier(struct notifier_block *nb)
1765 {
1766 struct net *net;
1767 int err;
1768
1769 /* Close race with setup_net() and cleanup_net() */
1770 down_write(&pernet_ops_rwsem);
1771 rtnl_lock();
1772 err = raw_notifier_chain_register(&netdev_chain, nb);
1773 if (err)
1774 goto unlock;
1775 if (dev_boot_phase)
1776 goto unlock;
1777 for_each_net(net) {
1778 err = call_netdevice_register_net_notifiers(nb, net);
1779 if (err)
1780 goto rollback;
1781 }
1782
1783 unlock:
1784 rtnl_unlock();
1785 up_write(&pernet_ops_rwsem);
1786 return err;
1787
1788 rollback:
1789 for_each_net_continue_reverse(net)
1790 call_netdevice_unregister_net_notifiers(nb, net);
1791
1792 raw_notifier_chain_unregister(&netdev_chain, nb);
1793 goto unlock;
1794 }
1795 EXPORT_SYMBOL(register_netdevice_notifier);
1796
1797 /**
1798 * unregister_netdevice_notifier - unregister a network notifier block
1799 * @nb: notifier
1800 *
1801 * Unregister a notifier previously registered by
1802 * register_netdevice_notifier(). The notifier is unlinked into the
1803 * kernel structures and may then be reused. A negative errno code
1804 * is returned on a failure.
1805 *
1806 * After unregistering unregister and down device events are synthesized
1807 * for all devices on the device list to the removed notifier to remove
1808 * the need for special case cleanup code.
1809 */
1810
unregister_netdevice_notifier(struct notifier_block * nb)1811 int unregister_netdevice_notifier(struct notifier_block *nb)
1812 {
1813 struct net *net;
1814 int err;
1815
1816 /* Close race with setup_net() and cleanup_net() */
1817 down_write(&pernet_ops_rwsem);
1818 rtnl_lock();
1819 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1820 if (err)
1821 goto unlock;
1822
1823 for_each_net(net)
1824 call_netdevice_unregister_net_notifiers(nb, net);
1825
1826 unlock:
1827 rtnl_unlock();
1828 up_write(&pernet_ops_rwsem);
1829 return err;
1830 }
1831 EXPORT_SYMBOL(unregister_netdevice_notifier);
1832
__register_netdevice_notifier_net(struct net * net,struct notifier_block * nb,bool ignore_call_fail)1833 static int __register_netdevice_notifier_net(struct net *net,
1834 struct notifier_block *nb,
1835 bool ignore_call_fail)
1836 {
1837 int err;
1838
1839 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1840 if (err)
1841 return err;
1842 if (dev_boot_phase)
1843 return 0;
1844
1845 err = call_netdevice_register_net_notifiers(nb, net);
1846 if (err && !ignore_call_fail)
1847 goto chain_unregister;
1848
1849 return 0;
1850
1851 chain_unregister:
1852 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1853 return err;
1854 }
1855
__unregister_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1856 static int __unregister_netdevice_notifier_net(struct net *net,
1857 struct notifier_block *nb)
1858 {
1859 int err;
1860
1861 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1862 if (err)
1863 return err;
1864
1865 call_netdevice_unregister_net_notifiers(nb, net);
1866 return 0;
1867 }
1868
1869 /**
1870 * register_netdevice_notifier_net - register a per-netns network notifier block
1871 * @net: network namespace
1872 * @nb: notifier
1873 *
1874 * Register a notifier to be called when network device events occur.
1875 * The notifier passed is linked into the kernel structures and must
1876 * not be reused until it has been unregistered. A negative errno code
1877 * is returned on a failure.
1878 *
1879 * When registered all registration and up events are replayed
1880 * to the new notifier to allow device to have a race free
1881 * view of the network device list.
1882 */
1883
register_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1884 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1885 {
1886 int err;
1887
1888 rtnl_lock();
1889 err = __register_netdevice_notifier_net(net, nb, false);
1890 rtnl_unlock();
1891 return err;
1892 }
1893 EXPORT_SYMBOL(register_netdevice_notifier_net);
1894
1895 /**
1896 * unregister_netdevice_notifier_net - unregister a per-netns
1897 * network notifier block
1898 * @net: network namespace
1899 * @nb: notifier
1900 *
1901 * Unregister a notifier previously registered by
1902 * register_netdevice_notifier_net(). The notifier is unlinked from the
1903 * kernel structures and may then be reused. A negative errno code
1904 * is returned on a failure.
1905 *
1906 * After unregistering unregister and down device events are synthesized
1907 * for all devices on the device list to the removed notifier to remove
1908 * the need for special case cleanup code.
1909 */
1910
unregister_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1911 int unregister_netdevice_notifier_net(struct net *net,
1912 struct notifier_block *nb)
1913 {
1914 int err;
1915
1916 rtnl_lock();
1917 err = __unregister_netdevice_notifier_net(net, nb);
1918 rtnl_unlock();
1919 return err;
1920 }
1921 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1922
__move_netdevice_notifier_net(struct net * src_net,struct net * dst_net,struct notifier_block * nb)1923 static void __move_netdevice_notifier_net(struct net *src_net,
1924 struct net *dst_net,
1925 struct notifier_block *nb)
1926 {
1927 __unregister_netdevice_notifier_net(src_net, nb);
1928 __register_netdevice_notifier_net(dst_net, nb, true);
1929 }
1930
register_netdevice_notifier_dev_net(struct net_device * dev,struct notifier_block * nb,struct netdev_net_notifier * nn)1931 int register_netdevice_notifier_dev_net(struct net_device *dev,
1932 struct notifier_block *nb,
1933 struct netdev_net_notifier *nn)
1934 {
1935 int err;
1936
1937 rtnl_lock();
1938 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1939 if (!err) {
1940 nn->nb = nb;
1941 list_add(&nn->list, &dev->net_notifier_list);
1942 }
1943 rtnl_unlock();
1944 return err;
1945 }
1946 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1947
unregister_netdevice_notifier_dev_net(struct net_device * dev,struct notifier_block * nb,struct netdev_net_notifier * nn)1948 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1949 struct notifier_block *nb,
1950 struct netdev_net_notifier *nn)
1951 {
1952 int err;
1953
1954 rtnl_lock();
1955 list_del(&nn->list);
1956 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1957 rtnl_unlock();
1958 return err;
1959 }
1960 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1961
move_netdevice_notifiers_dev_net(struct net_device * dev,struct net * net)1962 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1963 struct net *net)
1964 {
1965 struct netdev_net_notifier *nn;
1966
1967 list_for_each_entry(nn, &dev->net_notifier_list, list)
1968 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
1969 }
1970
1971 /**
1972 * call_netdevice_notifiers_info - call all network notifier blocks
1973 * @val: value passed unmodified to notifier function
1974 * @info: notifier information data
1975 *
1976 * Call all network notifier blocks. Parameters and return value
1977 * are as for raw_notifier_call_chain().
1978 */
1979
call_netdevice_notifiers_info(unsigned long val,struct netdev_notifier_info * info)1980 int call_netdevice_notifiers_info(unsigned long val,
1981 struct netdev_notifier_info *info)
1982 {
1983 struct net *net = dev_net(info->dev);
1984 int ret;
1985
1986 ASSERT_RTNL();
1987
1988 /* Run per-netns notifier block chain first, then run the global one.
1989 * Hopefully, one day, the global one is going to be removed after
1990 * all notifier block registrators get converted to be per-netns.
1991 */
1992 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1993 if (ret & NOTIFY_STOP_MASK)
1994 return ret;
1995 return raw_notifier_call_chain(&netdev_chain, val, info);
1996 }
1997
1998 /**
1999 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks
2000 * for and rollback on error
2001 * @val_up: value passed unmodified to notifier function
2002 * @val_down: value passed unmodified to the notifier function when
2003 * recovering from an error on @val_up
2004 * @info: notifier information data
2005 *
2006 * Call all per-netns network notifier blocks, but not notifier blocks on
2007 * the global notifier chain. Parameters and return value are as for
2008 * raw_notifier_call_chain_robust().
2009 */
2010
2011 static int
call_netdevice_notifiers_info_robust(unsigned long val_up,unsigned long val_down,struct netdev_notifier_info * info)2012 call_netdevice_notifiers_info_robust(unsigned long val_up,
2013 unsigned long val_down,
2014 struct netdev_notifier_info *info)
2015 {
2016 struct net *net = dev_net(info->dev);
2017
2018 ASSERT_RTNL();
2019
2020 return raw_notifier_call_chain_robust(&net->netdev_chain,
2021 val_up, val_down, info);
2022 }
2023
call_netdevice_notifiers_extack(unsigned long val,struct net_device * dev,struct netlink_ext_ack * extack)2024 static int call_netdevice_notifiers_extack(unsigned long val,
2025 struct net_device *dev,
2026 struct netlink_ext_ack *extack)
2027 {
2028 struct netdev_notifier_info info = {
2029 .dev = dev,
2030 .extack = extack,
2031 };
2032
2033 return call_netdevice_notifiers_info(val, &info);
2034 }
2035
2036 /**
2037 * call_netdevice_notifiers - call all network notifier blocks
2038 * @val: value passed unmodified to notifier function
2039 * @dev: net_device pointer passed unmodified to notifier function
2040 *
2041 * Call all network notifier blocks. Parameters and return value
2042 * are as for raw_notifier_call_chain().
2043 */
2044
call_netdevice_notifiers(unsigned long val,struct net_device * dev)2045 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2046 {
2047 return call_netdevice_notifiers_extack(val, dev, NULL);
2048 }
2049 EXPORT_SYMBOL(call_netdevice_notifiers);
2050
2051 /**
2052 * call_netdevice_notifiers_mtu - call all network notifier blocks
2053 * @val: value passed unmodified to notifier function
2054 * @dev: net_device pointer passed unmodified to notifier function
2055 * @arg: additional u32 argument passed to the notifier function
2056 *
2057 * Call all network notifier blocks. Parameters and return value
2058 * are as for raw_notifier_call_chain().
2059 */
call_netdevice_notifiers_mtu(unsigned long val,struct net_device * dev,u32 arg)2060 static int call_netdevice_notifiers_mtu(unsigned long val,
2061 struct net_device *dev, u32 arg)
2062 {
2063 struct netdev_notifier_info_ext info = {
2064 .info.dev = dev,
2065 .ext.mtu = arg,
2066 };
2067
2068 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2069
2070 return call_netdevice_notifiers_info(val, &info.info);
2071 }
2072
2073 #ifdef CONFIG_NET_INGRESS
2074 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2075
net_inc_ingress_queue(void)2076 void net_inc_ingress_queue(void)
2077 {
2078 static_branch_inc(&ingress_needed_key);
2079 }
2080 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2081
net_dec_ingress_queue(void)2082 void net_dec_ingress_queue(void)
2083 {
2084 static_branch_dec(&ingress_needed_key);
2085 }
2086 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2087 #endif
2088
2089 #ifdef CONFIG_NET_EGRESS
2090 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2091
net_inc_egress_queue(void)2092 void net_inc_egress_queue(void)
2093 {
2094 static_branch_inc(&egress_needed_key);
2095 }
2096 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2097
net_dec_egress_queue(void)2098 void net_dec_egress_queue(void)
2099 {
2100 static_branch_dec(&egress_needed_key);
2101 }
2102 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2103 #endif
2104
2105 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2106 EXPORT_SYMBOL(netstamp_needed_key);
2107 #ifdef CONFIG_JUMP_LABEL
2108 static atomic_t netstamp_needed_deferred;
2109 static atomic_t netstamp_wanted;
netstamp_clear(struct work_struct * work)2110 static void netstamp_clear(struct work_struct *work)
2111 {
2112 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2113 int wanted;
2114
2115 wanted = atomic_add_return(deferred, &netstamp_wanted);
2116 if (wanted > 0)
2117 static_branch_enable(&netstamp_needed_key);
2118 else
2119 static_branch_disable(&netstamp_needed_key);
2120 }
2121 static DECLARE_WORK(netstamp_work, netstamp_clear);
2122 #endif
2123
net_enable_timestamp(void)2124 void net_enable_timestamp(void)
2125 {
2126 #ifdef CONFIG_JUMP_LABEL
2127 int wanted = atomic_read(&netstamp_wanted);
2128
2129 while (wanted > 0) {
2130 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
2131 return;
2132 }
2133 atomic_inc(&netstamp_needed_deferred);
2134 schedule_work(&netstamp_work);
2135 #else
2136 static_branch_inc(&netstamp_needed_key);
2137 #endif
2138 }
2139 EXPORT_SYMBOL(net_enable_timestamp);
2140
net_disable_timestamp(void)2141 void net_disable_timestamp(void)
2142 {
2143 #ifdef CONFIG_JUMP_LABEL
2144 int wanted = atomic_read(&netstamp_wanted);
2145
2146 while (wanted > 1) {
2147 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
2148 return;
2149 }
2150 atomic_dec(&netstamp_needed_deferred);
2151 schedule_work(&netstamp_work);
2152 #else
2153 static_branch_dec(&netstamp_needed_key);
2154 #endif
2155 }
2156 EXPORT_SYMBOL(net_disable_timestamp);
2157
net_timestamp_set(struct sk_buff * skb)2158 static inline void net_timestamp_set(struct sk_buff *skb)
2159 {
2160 skb->tstamp = 0;
2161 skb->mono_delivery_time = 0;
2162 if (static_branch_unlikely(&netstamp_needed_key))
2163 skb->tstamp = ktime_get_real();
2164 }
2165
2166 #define net_timestamp_check(COND, SKB) \
2167 if (static_branch_unlikely(&netstamp_needed_key)) { \
2168 if ((COND) && !(SKB)->tstamp) \
2169 (SKB)->tstamp = ktime_get_real(); \
2170 } \
2171
is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb)2172 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2173 {
2174 return __is_skb_forwardable(dev, skb, true);
2175 }
2176 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2177
__dev_forward_skb2(struct net_device * dev,struct sk_buff * skb,bool check_mtu)2178 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2179 bool check_mtu)
2180 {
2181 int ret = ____dev_forward_skb(dev, skb, check_mtu);
2182
2183 if (likely(!ret)) {
2184 skb->protocol = eth_type_trans(skb, dev);
2185 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2186 }
2187
2188 return ret;
2189 }
2190
__dev_forward_skb(struct net_device * dev,struct sk_buff * skb)2191 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2192 {
2193 return __dev_forward_skb2(dev, skb, true);
2194 }
2195 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2196
2197 /**
2198 * dev_forward_skb - loopback an skb to another netif
2199 *
2200 * @dev: destination network device
2201 * @skb: buffer to forward
2202 *
2203 * return values:
2204 * NET_RX_SUCCESS (no congestion)
2205 * NET_RX_DROP (packet was dropped, but freed)
2206 *
2207 * dev_forward_skb can be used for injecting an skb from the
2208 * start_xmit function of one device into the receive queue
2209 * of another device.
2210 *
2211 * The receiving device may be in another namespace, so
2212 * we have to clear all information in the skb that could
2213 * impact namespace isolation.
2214 */
dev_forward_skb(struct net_device * dev,struct sk_buff * skb)2215 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2216 {
2217 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2218 }
2219 EXPORT_SYMBOL_GPL(dev_forward_skb);
2220
dev_forward_skb_nomtu(struct net_device * dev,struct sk_buff * skb)2221 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2222 {
2223 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2224 }
2225
deliver_skb(struct sk_buff * skb,struct packet_type * pt_prev,struct net_device * orig_dev)2226 static inline int deliver_skb(struct sk_buff *skb,
2227 struct packet_type *pt_prev,
2228 struct net_device *orig_dev)
2229 {
2230 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2231 return -ENOMEM;
2232 refcount_inc(&skb->users);
2233 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2234 }
2235
deliver_ptype_list_skb(struct sk_buff * skb,struct packet_type ** pt,struct net_device * orig_dev,__be16 type,struct list_head * ptype_list)2236 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2237 struct packet_type **pt,
2238 struct net_device *orig_dev,
2239 __be16 type,
2240 struct list_head *ptype_list)
2241 {
2242 struct packet_type *ptype, *pt_prev = *pt;
2243
2244 list_for_each_entry_rcu(ptype, ptype_list, list) {
2245 if (ptype->type != type)
2246 continue;
2247 if (pt_prev)
2248 deliver_skb(skb, pt_prev, orig_dev);
2249 pt_prev = ptype;
2250 }
2251 *pt = pt_prev;
2252 }
2253
skb_loop_sk(struct packet_type * ptype,struct sk_buff * skb)2254 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2255 {
2256 if (!ptype->af_packet_priv || !skb->sk)
2257 return false;
2258
2259 if (ptype->id_match)
2260 return ptype->id_match(ptype, skb->sk);
2261 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2262 return true;
2263
2264 return false;
2265 }
2266
2267 /**
2268 * dev_nit_active - return true if any network interface taps are in use
2269 *
2270 * @dev: network device to check for the presence of taps
2271 */
dev_nit_active(struct net_device * dev)2272 bool dev_nit_active(struct net_device *dev)
2273 {
2274 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2275 }
2276 EXPORT_SYMBOL_GPL(dev_nit_active);
2277
2278 /*
2279 * Support routine. Sends outgoing frames to any network
2280 * taps currently in use.
2281 */
2282
dev_queue_xmit_nit(struct sk_buff * skb,struct net_device * dev)2283 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2284 {
2285 struct packet_type *ptype;
2286 struct sk_buff *skb2 = NULL;
2287 struct packet_type *pt_prev = NULL;
2288 struct list_head *ptype_list = &ptype_all;
2289
2290 rcu_read_lock();
2291 again:
2292 list_for_each_entry_rcu(ptype, ptype_list, list) {
2293 if (READ_ONCE(ptype->ignore_outgoing))
2294 continue;
2295
2296 /* Never send packets back to the socket
2297 * they originated from - MvS (miquels@drinkel.ow.org)
2298 */
2299 if (skb_loop_sk(ptype, skb))
2300 continue;
2301
2302 if (pt_prev) {
2303 deliver_skb(skb2, pt_prev, skb->dev);
2304 pt_prev = ptype;
2305 continue;
2306 }
2307
2308 /* need to clone skb, done only once */
2309 skb2 = skb_clone(skb, GFP_ATOMIC);
2310 if (!skb2)
2311 goto out_unlock;
2312
2313 net_timestamp_set(skb2);
2314
2315 /* skb->nh should be correctly
2316 * set by sender, so that the second statement is
2317 * just protection against buggy protocols.
2318 */
2319 skb_reset_mac_header(skb2);
2320
2321 if (skb_network_header(skb2) < skb2->data ||
2322 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2323 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2324 ntohs(skb2->protocol),
2325 dev->name);
2326 skb_reset_network_header(skb2);
2327 }
2328
2329 skb2->transport_header = skb2->network_header;
2330 skb2->pkt_type = PACKET_OUTGOING;
2331 pt_prev = ptype;
2332 }
2333
2334 if (ptype_list == &ptype_all) {
2335 ptype_list = &dev->ptype_all;
2336 goto again;
2337 }
2338 out_unlock:
2339 if (pt_prev) {
2340 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2341 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2342 else
2343 kfree_skb(skb2);
2344 }
2345 rcu_read_unlock();
2346 }
2347 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2348
2349 /**
2350 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2351 * @dev: Network device
2352 * @txq: number of queues available
2353 *
2354 * If real_num_tx_queues is changed the tc mappings may no longer be
2355 * valid. To resolve this verify the tc mapping remains valid and if
2356 * not NULL the mapping. With no priorities mapping to this
2357 * offset/count pair it will no longer be used. In the worst case TC0
2358 * is invalid nothing can be done so disable priority mappings. If is
2359 * expected that drivers will fix this mapping if they can before
2360 * calling netif_set_real_num_tx_queues.
2361 */
netif_setup_tc(struct net_device * dev,unsigned int txq)2362 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2363 {
2364 int i;
2365 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2366
2367 /* If TC0 is invalidated disable TC mapping */
2368 if (tc->offset + tc->count > txq) {
2369 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2370 dev->num_tc = 0;
2371 return;
2372 }
2373
2374 /* Invalidated prio to tc mappings set to TC0 */
2375 for (i = 1; i < TC_BITMASK + 1; i++) {
2376 int q = netdev_get_prio_tc_map(dev, i);
2377
2378 tc = &dev->tc_to_txq[q];
2379 if (tc->offset + tc->count > txq) {
2380 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2381 i, q);
2382 netdev_set_prio_tc_map(dev, i, 0);
2383 }
2384 }
2385 }
2386
netdev_txq_to_tc(struct net_device * dev,unsigned int txq)2387 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2388 {
2389 if (dev->num_tc) {
2390 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2391 int i;
2392
2393 /* walk through the TCs and see if it falls into any of them */
2394 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2395 if ((txq - tc->offset) < tc->count)
2396 return i;
2397 }
2398
2399 /* didn't find it, just return -1 to indicate no match */
2400 return -1;
2401 }
2402
2403 return 0;
2404 }
2405 EXPORT_SYMBOL(netdev_txq_to_tc);
2406
2407 #ifdef CONFIG_XPS
2408 static struct static_key xps_needed __read_mostly;
2409 static struct static_key xps_rxqs_needed __read_mostly;
2410 static DEFINE_MUTEX(xps_map_mutex);
2411 #define xmap_dereference(P) \
2412 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2413
remove_xps_queue(struct xps_dev_maps * dev_maps,struct xps_dev_maps * old_maps,int tci,u16 index)2414 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2415 struct xps_dev_maps *old_maps, int tci, u16 index)
2416 {
2417 struct xps_map *map = NULL;
2418 int pos;
2419
2420 map = xmap_dereference(dev_maps->attr_map[tci]);
2421 if (!map)
2422 return false;
2423
2424 for (pos = map->len; pos--;) {
2425 if (map->queues[pos] != index)
2426 continue;
2427
2428 if (map->len > 1) {
2429 map->queues[pos] = map->queues[--map->len];
2430 break;
2431 }
2432
2433 if (old_maps)
2434 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2435 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2436 kfree_rcu(map, rcu);
2437 return false;
2438 }
2439
2440 return true;
2441 }
2442
remove_xps_queue_cpu(struct net_device * dev,struct xps_dev_maps * dev_maps,int cpu,u16 offset,u16 count)2443 static bool remove_xps_queue_cpu(struct net_device *dev,
2444 struct xps_dev_maps *dev_maps,
2445 int cpu, u16 offset, u16 count)
2446 {
2447 int num_tc = dev_maps->num_tc;
2448 bool active = false;
2449 int tci;
2450
2451 for (tci = cpu * num_tc; num_tc--; tci++) {
2452 int i, j;
2453
2454 for (i = count, j = offset; i--; j++) {
2455 if (!remove_xps_queue(dev_maps, NULL, tci, j))
2456 break;
2457 }
2458
2459 active |= i < 0;
2460 }
2461
2462 return active;
2463 }
2464
reset_xps_maps(struct net_device * dev,struct xps_dev_maps * dev_maps,enum xps_map_type type)2465 static void reset_xps_maps(struct net_device *dev,
2466 struct xps_dev_maps *dev_maps,
2467 enum xps_map_type type)
2468 {
2469 static_key_slow_dec_cpuslocked(&xps_needed);
2470 if (type == XPS_RXQS)
2471 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2472
2473 RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2474
2475 kfree_rcu(dev_maps, rcu);
2476 }
2477
clean_xps_maps(struct net_device * dev,enum xps_map_type type,u16 offset,u16 count)2478 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2479 u16 offset, u16 count)
2480 {
2481 struct xps_dev_maps *dev_maps;
2482 bool active = false;
2483 int i, j;
2484
2485 dev_maps = xmap_dereference(dev->xps_maps[type]);
2486 if (!dev_maps)
2487 return;
2488
2489 for (j = 0; j < dev_maps->nr_ids; j++)
2490 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2491 if (!active)
2492 reset_xps_maps(dev, dev_maps, type);
2493
2494 if (type == XPS_CPUS) {
2495 for (i = offset + (count - 1); count--; i--)
2496 netdev_queue_numa_node_write(
2497 netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2498 }
2499 }
2500
netif_reset_xps_queues(struct net_device * dev,u16 offset,u16 count)2501 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2502 u16 count)
2503 {
2504 if (!static_key_false(&xps_needed))
2505 return;
2506
2507 cpus_read_lock();
2508 mutex_lock(&xps_map_mutex);
2509
2510 if (static_key_false(&xps_rxqs_needed))
2511 clean_xps_maps(dev, XPS_RXQS, offset, count);
2512
2513 clean_xps_maps(dev, XPS_CPUS, offset, count);
2514
2515 mutex_unlock(&xps_map_mutex);
2516 cpus_read_unlock();
2517 }
2518
netif_reset_xps_queues_gt(struct net_device * dev,u16 index)2519 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2520 {
2521 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2522 }
2523
expand_xps_map(struct xps_map * map,int attr_index,u16 index,bool is_rxqs_map)2524 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2525 u16 index, bool is_rxqs_map)
2526 {
2527 struct xps_map *new_map;
2528 int alloc_len = XPS_MIN_MAP_ALLOC;
2529 int i, pos;
2530
2531 for (pos = 0; map && pos < map->len; pos++) {
2532 if (map->queues[pos] != index)
2533 continue;
2534 return map;
2535 }
2536
2537 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2538 if (map) {
2539 if (pos < map->alloc_len)
2540 return map;
2541
2542 alloc_len = map->alloc_len * 2;
2543 }
2544
2545 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2546 * map
2547 */
2548 if (is_rxqs_map)
2549 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2550 else
2551 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2552 cpu_to_node(attr_index));
2553 if (!new_map)
2554 return NULL;
2555
2556 for (i = 0; i < pos; i++)
2557 new_map->queues[i] = map->queues[i];
2558 new_map->alloc_len = alloc_len;
2559 new_map->len = pos;
2560
2561 return new_map;
2562 }
2563
2564 /* Copy xps maps at a given index */
xps_copy_dev_maps(struct xps_dev_maps * dev_maps,struct xps_dev_maps * new_dev_maps,int index,int tc,bool skip_tc)2565 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2566 struct xps_dev_maps *new_dev_maps, int index,
2567 int tc, bool skip_tc)
2568 {
2569 int i, tci = index * dev_maps->num_tc;
2570 struct xps_map *map;
2571
2572 /* copy maps belonging to foreign traffic classes */
2573 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2574 if (i == tc && skip_tc)
2575 continue;
2576
2577 /* fill in the new device map from the old device map */
2578 map = xmap_dereference(dev_maps->attr_map[tci]);
2579 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2580 }
2581 }
2582
2583 /* Must be called under cpus_read_lock */
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)2584 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2585 u16 index, enum xps_map_type type)
2586 {
2587 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2588 const unsigned long *online_mask = NULL;
2589 bool active = false, copy = false;
2590 int i, j, tci, numa_node_id = -2;
2591 int maps_sz, num_tc = 1, tc = 0;
2592 struct xps_map *map, *new_map;
2593 unsigned int nr_ids;
2594
2595 WARN_ON_ONCE(index >= dev->num_tx_queues);
2596
2597 if (dev->num_tc) {
2598 /* Do not allow XPS on subordinate device directly */
2599 num_tc = dev->num_tc;
2600 if (num_tc < 0)
2601 return -EINVAL;
2602
2603 /* If queue belongs to subordinate dev use its map */
2604 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2605
2606 tc = netdev_txq_to_tc(dev, index);
2607 if (tc < 0)
2608 return -EINVAL;
2609 }
2610
2611 mutex_lock(&xps_map_mutex);
2612
2613 dev_maps = xmap_dereference(dev->xps_maps[type]);
2614 if (type == XPS_RXQS) {
2615 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2616 nr_ids = dev->num_rx_queues;
2617 } else {
2618 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2619 if (num_possible_cpus() > 1)
2620 online_mask = cpumask_bits(cpu_online_mask);
2621 nr_ids = nr_cpu_ids;
2622 }
2623
2624 if (maps_sz < L1_CACHE_BYTES)
2625 maps_sz = L1_CACHE_BYTES;
2626
2627 /* The old dev_maps could be larger or smaller than the one we're
2628 * setting up now, as dev->num_tc or nr_ids could have been updated in
2629 * between. We could try to be smart, but let's be safe instead and only
2630 * copy foreign traffic classes if the two map sizes match.
2631 */
2632 if (dev_maps &&
2633 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2634 copy = true;
2635
2636 /* allocate memory for queue storage */
2637 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2638 j < nr_ids;) {
2639 if (!new_dev_maps) {
2640 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2641 if (!new_dev_maps) {
2642 mutex_unlock(&xps_map_mutex);
2643 return -ENOMEM;
2644 }
2645
2646 new_dev_maps->nr_ids = nr_ids;
2647 new_dev_maps->num_tc = num_tc;
2648 }
2649
2650 tci = j * num_tc + tc;
2651 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2652
2653 map = expand_xps_map(map, j, index, type == XPS_RXQS);
2654 if (!map)
2655 goto error;
2656
2657 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2658 }
2659
2660 if (!new_dev_maps)
2661 goto out_no_new_maps;
2662
2663 if (!dev_maps) {
2664 /* Increment static keys at most once per type */
2665 static_key_slow_inc_cpuslocked(&xps_needed);
2666 if (type == XPS_RXQS)
2667 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2668 }
2669
2670 for (j = 0; j < nr_ids; j++) {
2671 bool skip_tc = false;
2672
2673 tci = j * num_tc + tc;
2674 if (netif_attr_test_mask(j, mask, nr_ids) &&
2675 netif_attr_test_online(j, online_mask, nr_ids)) {
2676 /* add tx-queue to CPU/rx-queue maps */
2677 int pos = 0;
2678
2679 skip_tc = true;
2680
2681 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2682 while ((pos < map->len) && (map->queues[pos] != index))
2683 pos++;
2684
2685 if (pos == map->len)
2686 map->queues[map->len++] = index;
2687 #ifdef CONFIG_NUMA
2688 if (type == XPS_CPUS) {
2689 if (numa_node_id == -2)
2690 numa_node_id = cpu_to_node(j);
2691 else if (numa_node_id != cpu_to_node(j))
2692 numa_node_id = -1;
2693 }
2694 #endif
2695 }
2696
2697 if (copy)
2698 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2699 skip_tc);
2700 }
2701
2702 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2703
2704 /* Cleanup old maps */
2705 if (!dev_maps)
2706 goto out_no_old_maps;
2707
2708 for (j = 0; j < dev_maps->nr_ids; j++) {
2709 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2710 map = xmap_dereference(dev_maps->attr_map[tci]);
2711 if (!map)
2712 continue;
2713
2714 if (copy) {
2715 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2716 if (map == new_map)
2717 continue;
2718 }
2719
2720 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2721 kfree_rcu(map, rcu);
2722 }
2723 }
2724
2725 old_dev_maps = dev_maps;
2726
2727 out_no_old_maps:
2728 dev_maps = new_dev_maps;
2729 active = true;
2730
2731 out_no_new_maps:
2732 if (type == XPS_CPUS)
2733 /* update Tx queue numa node */
2734 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2735 (numa_node_id >= 0) ?
2736 numa_node_id : NUMA_NO_NODE);
2737
2738 if (!dev_maps)
2739 goto out_no_maps;
2740
2741 /* removes tx-queue from unused CPUs/rx-queues */
2742 for (j = 0; j < dev_maps->nr_ids; j++) {
2743 tci = j * dev_maps->num_tc;
2744
2745 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2746 if (i == tc &&
2747 netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2748 netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2749 continue;
2750
2751 active |= remove_xps_queue(dev_maps,
2752 copy ? old_dev_maps : NULL,
2753 tci, index);
2754 }
2755 }
2756
2757 if (old_dev_maps)
2758 kfree_rcu(old_dev_maps, rcu);
2759
2760 /* free map if not active */
2761 if (!active)
2762 reset_xps_maps(dev, dev_maps, type);
2763
2764 out_no_maps:
2765 mutex_unlock(&xps_map_mutex);
2766
2767 return 0;
2768 error:
2769 /* remove any maps that we added */
2770 for (j = 0; j < nr_ids; j++) {
2771 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2772 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2773 map = copy ?
2774 xmap_dereference(dev_maps->attr_map[tci]) :
2775 NULL;
2776 if (new_map && new_map != map)
2777 kfree(new_map);
2778 }
2779 }
2780
2781 mutex_unlock(&xps_map_mutex);
2782
2783 kfree(new_dev_maps);
2784 return -ENOMEM;
2785 }
2786 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2787
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)2788 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2789 u16 index)
2790 {
2791 int ret;
2792
2793 cpus_read_lock();
2794 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2795 cpus_read_unlock();
2796
2797 return ret;
2798 }
2799 EXPORT_SYMBOL(netif_set_xps_queue);
2800
2801 #endif
netdev_unbind_all_sb_channels(struct net_device * dev)2802 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2803 {
2804 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2805
2806 /* Unbind any subordinate channels */
2807 while (txq-- != &dev->_tx[0]) {
2808 if (txq->sb_dev)
2809 netdev_unbind_sb_channel(dev, txq->sb_dev);
2810 }
2811 }
2812
netdev_reset_tc(struct net_device * dev)2813 void netdev_reset_tc(struct net_device *dev)
2814 {
2815 #ifdef CONFIG_XPS
2816 netif_reset_xps_queues_gt(dev, 0);
2817 #endif
2818 netdev_unbind_all_sb_channels(dev);
2819
2820 /* Reset TC configuration of device */
2821 dev->num_tc = 0;
2822 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2823 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2824 }
2825 EXPORT_SYMBOL(netdev_reset_tc);
2826
netdev_set_tc_queue(struct net_device * dev,u8 tc,u16 count,u16 offset)2827 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2828 {
2829 if (tc >= dev->num_tc)
2830 return -EINVAL;
2831
2832 #ifdef CONFIG_XPS
2833 netif_reset_xps_queues(dev, offset, count);
2834 #endif
2835 dev->tc_to_txq[tc].count = count;
2836 dev->tc_to_txq[tc].offset = offset;
2837 return 0;
2838 }
2839 EXPORT_SYMBOL(netdev_set_tc_queue);
2840
netdev_set_num_tc(struct net_device * dev,u8 num_tc)2841 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2842 {
2843 if (num_tc > TC_MAX_QUEUE)
2844 return -EINVAL;
2845
2846 #ifdef CONFIG_XPS
2847 netif_reset_xps_queues_gt(dev, 0);
2848 #endif
2849 netdev_unbind_all_sb_channels(dev);
2850
2851 dev->num_tc = num_tc;
2852 return 0;
2853 }
2854 EXPORT_SYMBOL(netdev_set_num_tc);
2855
netdev_unbind_sb_channel(struct net_device * dev,struct net_device * sb_dev)2856 void netdev_unbind_sb_channel(struct net_device *dev,
2857 struct net_device *sb_dev)
2858 {
2859 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2860
2861 #ifdef CONFIG_XPS
2862 netif_reset_xps_queues_gt(sb_dev, 0);
2863 #endif
2864 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2865 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2866
2867 while (txq-- != &dev->_tx[0]) {
2868 if (txq->sb_dev == sb_dev)
2869 txq->sb_dev = NULL;
2870 }
2871 }
2872 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2873
netdev_bind_sb_channel_queue(struct net_device * dev,struct net_device * sb_dev,u8 tc,u16 count,u16 offset)2874 int netdev_bind_sb_channel_queue(struct net_device *dev,
2875 struct net_device *sb_dev,
2876 u8 tc, u16 count, u16 offset)
2877 {
2878 /* Make certain the sb_dev and dev are already configured */
2879 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2880 return -EINVAL;
2881
2882 /* We cannot hand out queues we don't have */
2883 if ((offset + count) > dev->real_num_tx_queues)
2884 return -EINVAL;
2885
2886 /* Record the mapping */
2887 sb_dev->tc_to_txq[tc].count = count;
2888 sb_dev->tc_to_txq[tc].offset = offset;
2889
2890 /* Provide a way for Tx queue to find the tc_to_txq map or
2891 * XPS map for itself.
2892 */
2893 while (count--)
2894 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2895
2896 return 0;
2897 }
2898 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2899
netdev_set_sb_channel(struct net_device * dev,u16 channel)2900 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2901 {
2902 /* Do not use a multiqueue device to represent a subordinate channel */
2903 if (netif_is_multiqueue(dev))
2904 return -ENODEV;
2905
2906 /* We allow channels 1 - 32767 to be used for subordinate channels.
2907 * Channel 0 is meant to be "native" mode and used only to represent
2908 * the main root device. We allow writing 0 to reset the device back
2909 * to normal mode after being used as a subordinate channel.
2910 */
2911 if (channel > S16_MAX)
2912 return -EINVAL;
2913
2914 dev->num_tc = -channel;
2915
2916 return 0;
2917 }
2918 EXPORT_SYMBOL(netdev_set_sb_channel);
2919
2920 /*
2921 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2922 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2923 */
netif_set_real_num_tx_queues(struct net_device * dev,unsigned int txq)2924 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2925 {
2926 bool disabling;
2927 int rc;
2928
2929 disabling = txq < dev->real_num_tx_queues;
2930
2931 if (txq < 1 || txq > dev->num_tx_queues)
2932 return -EINVAL;
2933
2934 if (dev->reg_state == NETREG_REGISTERED ||
2935 dev->reg_state == NETREG_UNREGISTERING) {
2936 ASSERT_RTNL();
2937
2938 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2939 txq);
2940 if (rc)
2941 return rc;
2942
2943 if (dev->num_tc)
2944 netif_setup_tc(dev, txq);
2945
2946 dev_qdisc_change_real_num_tx(dev, txq);
2947
2948 dev->real_num_tx_queues = txq;
2949
2950 if (disabling) {
2951 synchronize_net();
2952 qdisc_reset_all_tx_gt(dev, txq);
2953 #ifdef CONFIG_XPS
2954 netif_reset_xps_queues_gt(dev, txq);
2955 #endif
2956 }
2957 } else {
2958 dev->real_num_tx_queues = txq;
2959 }
2960
2961 return 0;
2962 }
2963 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2964
2965 #ifdef CONFIG_SYSFS
2966 /**
2967 * netif_set_real_num_rx_queues - set actual number of RX queues used
2968 * @dev: Network device
2969 * @rxq: Actual number of RX queues
2970 *
2971 * This must be called either with the rtnl_lock held or before
2972 * registration of the net device. Returns 0 on success, or a
2973 * negative error code. If called before registration, it always
2974 * succeeds.
2975 */
netif_set_real_num_rx_queues(struct net_device * dev,unsigned int rxq)2976 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2977 {
2978 int rc;
2979
2980 if (rxq < 1 || rxq > dev->num_rx_queues)
2981 return -EINVAL;
2982
2983 if (dev->reg_state == NETREG_REGISTERED) {
2984 ASSERT_RTNL();
2985
2986 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2987 rxq);
2988 if (rc)
2989 return rc;
2990 }
2991
2992 dev->real_num_rx_queues = rxq;
2993 return 0;
2994 }
2995 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2996 #endif
2997
2998 /**
2999 * netif_set_real_num_queues - set actual number of RX and TX queues used
3000 * @dev: Network device
3001 * @txq: Actual number of TX queues
3002 * @rxq: Actual number of RX queues
3003 *
3004 * Set the real number of both TX and RX queues.
3005 * Does nothing if the number of queues is already correct.
3006 */
netif_set_real_num_queues(struct net_device * dev,unsigned int txq,unsigned int rxq)3007 int netif_set_real_num_queues(struct net_device *dev,
3008 unsigned int txq, unsigned int rxq)
3009 {
3010 unsigned int old_rxq = dev->real_num_rx_queues;
3011 int err;
3012
3013 if (txq < 1 || txq > dev->num_tx_queues ||
3014 rxq < 1 || rxq > dev->num_rx_queues)
3015 return -EINVAL;
3016
3017 /* Start from increases, so the error path only does decreases -
3018 * decreases can't fail.
3019 */
3020 if (rxq > dev->real_num_rx_queues) {
3021 err = netif_set_real_num_rx_queues(dev, rxq);
3022 if (err)
3023 return err;
3024 }
3025 if (txq > dev->real_num_tx_queues) {
3026 err = netif_set_real_num_tx_queues(dev, txq);
3027 if (err)
3028 goto undo_rx;
3029 }
3030 if (rxq < dev->real_num_rx_queues)
3031 WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
3032 if (txq < dev->real_num_tx_queues)
3033 WARN_ON(netif_set_real_num_tx_queues(dev, txq));
3034
3035 return 0;
3036 undo_rx:
3037 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
3038 return err;
3039 }
3040 EXPORT_SYMBOL(netif_set_real_num_queues);
3041
3042 /**
3043 * netif_set_tso_max_size() - set the max size of TSO frames supported
3044 * @dev: netdev to update
3045 * @size: max skb->len of a TSO frame
3046 *
3047 * Set the limit on the size of TSO super-frames the device can handle.
3048 * Unless explicitly set the stack will assume the value of
3049 * %GSO_LEGACY_MAX_SIZE.
3050 */
netif_set_tso_max_size(struct net_device * dev,unsigned int size)3051 void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
3052 {
3053 dev->tso_max_size = min(GSO_MAX_SIZE, size);
3054 if (size < READ_ONCE(dev->gso_max_size))
3055 netif_set_gso_max_size(dev, size);
3056 if (size < READ_ONCE(dev->gso_ipv4_max_size))
3057 netif_set_gso_ipv4_max_size(dev, size);
3058 }
3059 EXPORT_SYMBOL(netif_set_tso_max_size);
3060
3061 /**
3062 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3063 * @dev: netdev to update
3064 * @segs: max number of TCP segments
3065 *
3066 * Set the limit on the number of TCP segments the device can generate from
3067 * a single TSO super-frame.
3068 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3069 */
netif_set_tso_max_segs(struct net_device * dev,unsigned int segs)3070 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3071 {
3072 dev->tso_max_segs = segs;
3073 if (segs < READ_ONCE(dev->gso_max_segs))
3074 netif_set_gso_max_segs(dev, segs);
3075 }
3076 EXPORT_SYMBOL(netif_set_tso_max_segs);
3077
3078 /**
3079 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3080 * @to: netdev to update
3081 * @from: netdev from which to copy the limits
3082 */
netif_inherit_tso_max(struct net_device * to,const struct net_device * from)3083 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
3084 {
3085 netif_set_tso_max_size(to, from->tso_max_size);
3086 netif_set_tso_max_segs(to, from->tso_max_segs);
3087 }
3088 EXPORT_SYMBOL(netif_inherit_tso_max);
3089
3090 /**
3091 * netif_get_num_default_rss_queues - default number of RSS queues
3092 *
3093 * Default value is the number of physical cores if there are only 1 or 2, or
3094 * divided by 2 if there are more.
3095 */
netif_get_num_default_rss_queues(void)3096 int netif_get_num_default_rss_queues(void)
3097 {
3098 cpumask_var_t cpus;
3099 int cpu, count = 0;
3100
3101 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3102 return 1;
3103
3104 cpumask_copy(cpus, cpu_online_mask);
3105 for_each_cpu(cpu, cpus) {
3106 ++count;
3107 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3108 }
3109 free_cpumask_var(cpus);
3110
3111 return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3112 }
3113 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3114
__netif_reschedule(struct Qdisc * q)3115 static void __netif_reschedule(struct Qdisc *q)
3116 {
3117 struct softnet_data *sd;
3118 unsigned long flags;
3119
3120 local_irq_save(flags);
3121 sd = this_cpu_ptr(&softnet_data);
3122 q->next_sched = NULL;
3123 *sd->output_queue_tailp = q;
3124 sd->output_queue_tailp = &q->next_sched;
3125 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3126 local_irq_restore(flags);
3127 }
3128
__netif_schedule(struct Qdisc * q)3129 void __netif_schedule(struct Qdisc *q)
3130 {
3131 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3132 __netif_reschedule(q);
3133 }
3134 EXPORT_SYMBOL(__netif_schedule);
3135
3136 struct dev_kfree_skb_cb {
3137 enum skb_drop_reason reason;
3138 };
3139
get_kfree_skb_cb(const struct sk_buff * skb)3140 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3141 {
3142 return (struct dev_kfree_skb_cb *)skb->cb;
3143 }
3144
netif_schedule_queue(struct netdev_queue * txq)3145 void netif_schedule_queue(struct netdev_queue *txq)
3146 {
3147 rcu_read_lock();
3148 if (!netif_xmit_stopped(txq)) {
3149 struct Qdisc *q = rcu_dereference(txq->qdisc);
3150
3151 __netif_schedule(q);
3152 }
3153 rcu_read_unlock();
3154 }
3155 EXPORT_SYMBOL(netif_schedule_queue);
3156
netif_tx_wake_queue(struct netdev_queue * dev_queue)3157 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3158 {
3159 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3160 struct Qdisc *q;
3161
3162 rcu_read_lock();
3163 q = rcu_dereference(dev_queue->qdisc);
3164 __netif_schedule(q);
3165 rcu_read_unlock();
3166 }
3167 }
3168 EXPORT_SYMBOL(netif_tx_wake_queue);
3169
dev_kfree_skb_irq_reason(struct sk_buff * skb,enum skb_drop_reason reason)3170 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3171 {
3172 unsigned long flags;
3173
3174 if (unlikely(!skb))
3175 return;
3176
3177 if (likely(refcount_read(&skb->users) == 1)) {
3178 smp_rmb();
3179 refcount_set(&skb->users, 0);
3180 } else if (likely(!refcount_dec_and_test(&skb->users))) {
3181 return;
3182 }
3183 get_kfree_skb_cb(skb)->reason = reason;
3184 local_irq_save(flags);
3185 skb->next = __this_cpu_read(softnet_data.completion_queue);
3186 __this_cpu_write(softnet_data.completion_queue, skb);
3187 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3188 local_irq_restore(flags);
3189 }
3190 EXPORT_SYMBOL(dev_kfree_skb_irq_reason);
3191
dev_kfree_skb_any_reason(struct sk_buff * skb,enum skb_drop_reason reason)3192 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3193 {
3194 if (in_hardirq() || irqs_disabled())
3195 dev_kfree_skb_irq_reason(skb, reason);
3196 else
3197 kfree_skb_reason(skb, reason);
3198 }
3199 EXPORT_SYMBOL(dev_kfree_skb_any_reason);
3200
3201
3202 /**
3203 * netif_device_detach - mark device as removed
3204 * @dev: network device
3205 *
3206 * Mark device as removed from system and therefore no longer available.
3207 */
netif_device_detach(struct net_device * dev)3208 void netif_device_detach(struct net_device *dev)
3209 {
3210 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3211 netif_running(dev)) {
3212 netif_tx_stop_all_queues(dev);
3213 }
3214 }
3215 EXPORT_SYMBOL(netif_device_detach);
3216
3217 /**
3218 * netif_device_attach - mark device as attached
3219 * @dev: network device
3220 *
3221 * Mark device as attached from system and restart if needed.
3222 */
netif_device_attach(struct net_device * dev)3223 void netif_device_attach(struct net_device *dev)
3224 {
3225 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3226 netif_running(dev)) {
3227 netif_tx_wake_all_queues(dev);
3228 __netdev_watchdog_up(dev);
3229 }
3230 }
3231 EXPORT_SYMBOL(netif_device_attach);
3232
3233 /*
3234 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3235 * to be used as a distribution range.
3236 */
skb_tx_hash(const struct net_device * dev,const struct net_device * sb_dev,struct sk_buff * skb)3237 static u16 skb_tx_hash(const struct net_device *dev,
3238 const struct net_device *sb_dev,
3239 struct sk_buff *skb)
3240 {
3241 u32 hash;
3242 u16 qoffset = 0;
3243 u16 qcount = dev->real_num_tx_queues;
3244
3245 if (dev->num_tc) {
3246 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3247
3248 qoffset = sb_dev->tc_to_txq[tc].offset;
3249 qcount = sb_dev->tc_to_txq[tc].count;
3250 if (unlikely(!qcount)) {
3251 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3252 sb_dev->name, qoffset, tc);
3253 qoffset = 0;
3254 qcount = dev->real_num_tx_queues;
3255 }
3256 }
3257
3258 if (skb_rx_queue_recorded(skb)) {
3259 DEBUG_NET_WARN_ON_ONCE(qcount == 0);
3260 hash = skb_get_rx_queue(skb);
3261 if (hash >= qoffset)
3262 hash -= qoffset;
3263 while (unlikely(hash >= qcount))
3264 hash -= qcount;
3265 return hash + qoffset;
3266 }
3267
3268 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3269 }
3270
skb_warn_bad_offload(const struct sk_buff * skb)3271 void skb_warn_bad_offload(const struct sk_buff *skb)
3272 {
3273 static const netdev_features_t null_features;
3274 struct net_device *dev = skb->dev;
3275 const char *name = "";
3276
3277 if (!net_ratelimit())
3278 return;
3279
3280 if (dev) {
3281 if (dev->dev.parent)
3282 name = dev_driver_string(dev->dev.parent);
3283 else
3284 name = netdev_name(dev);
3285 }
3286 skb_dump(KERN_WARNING, skb, false);
3287 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3288 name, dev ? &dev->features : &null_features,
3289 skb->sk ? &skb->sk->sk_route_caps : &null_features);
3290 }
3291
3292 /*
3293 * Invalidate hardware checksum when packet is to be mangled, and
3294 * complete checksum manually on outgoing path.
3295 */
skb_checksum_help(struct sk_buff * skb)3296 int skb_checksum_help(struct sk_buff *skb)
3297 {
3298 __wsum csum;
3299 int ret = 0, offset;
3300
3301 if (skb->ip_summed == CHECKSUM_COMPLETE)
3302 goto out_set_summed;
3303
3304 if (unlikely(skb_is_gso(skb))) {
3305 skb_warn_bad_offload(skb);
3306 return -EINVAL;
3307 }
3308
3309 /* Before computing a checksum, we should make sure no frag could
3310 * be modified by an external entity : checksum could be wrong.
3311 */
3312 if (skb_has_shared_frag(skb)) {
3313 ret = __skb_linearize(skb);
3314 if (ret)
3315 goto out;
3316 }
3317
3318 offset = skb_checksum_start_offset(skb);
3319 ret = -EINVAL;
3320 if (unlikely(offset >= skb_headlen(skb))) {
3321 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3322 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3323 offset, skb_headlen(skb));
3324 goto out;
3325 }
3326 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3327
3328 offset += skb->csum_offset;
3329 if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
3330 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3331 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3332 offset + sizeof(__sum16), skb_headlen(skb));
3333 goto out;
3334 }
3335 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3336 if (ret)
3337 goto out;
3338
3339 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3340 out_set_summed:
3341 skb->ip_summed = CHECKSUM_NONE;
3342 out:
3343 return ret;
3344 }
3345 EXPORT_SYMBOL(skb_checksum_help);
3346
skb_crc32c_csum_help(struct sk_buff * skb)3347 int skb_crc32c_csum_help(struct sk_buff *skb)
3348 {
3349 __le32 crc32c_csum;
3350 int ret = 0, offset, start;
3351
3352 if (skb->ip_summed != CHECKSUM_PARTIAL)
3353 goto out;
3354
3355 if (unlikely(skb_is_gso(skb)))
3356 goto out;
3357
3358 /* Before computing a checksum, we should make sure no frag could
3359 * be modified by an external entity : checksum could be wrong.
3360 */
3361 if (unlikely(skb_has_shared_frag(skb))) {
3362 ret = __skb_linearize(skb);
3363 if (ret)
3364 goto out;
3365 }
3366 start = skb_checksum_start_offset(skb);
3367 offset = start + offsetof(struct sctphdr, checksum);
3368 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3369 ret = -EINVAL;
3370 goto out;
3371 }
3372
3373 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3374 if (ret)
3375 goto out;
3376
3377 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3378 skb->len - start, ~(__u32)0,
3379 crc32c_csum_stub));
3380 *(__le32 *)(skb->data + offset) = crc32c_csum;
3381 skb_reset_csum_not_inet(skb);
3382 out:
3383 return ret;
3384 }
3385
skb_network_protocol(struct sk_buff * skb,int * depth)3386 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3387 {
3388 __be16 type = skb->protocol;
3389
3390 /* Tunnel gso handlers can set protocol to ethernet. */
3391 if (type == htons(ETH_P_TEB)) {
3392 struct ethhdr *eth;
3393
3394 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3395 return 0;
3396
3397 eth = (struct ethhdr *)skb->data;
3398 type = eth->h_proto;
3399 }
3400
3401 return vlan_get_protocol_and_depth(skb, type, depth);
3402 }
3403
3404
3405 /* Take action when hardware reception checksum errors are detected. */
3406 #ifdef CONFIG_BUG
do_netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)3407 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3408 {
3409 netdev_err(dev, "hw csum failure\n");
3410 skb_dump(KERN_ERR, skb, true);
3411 dump_stack();
3412 }
3413
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)3414 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3415 {
3416 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3417 }
3418 EXPORT_SYMBOL(netdev_rx_csum_fault);
3419 #endif
3420
3421 /* XXX: check that highmem exists at all on the given machine. */
illegal_highdma(struct net_device * dev,struct sk_buff * skb)3422 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3423 {
3424 #ifdef CONFIG_HIGHMEM
3425 int i;
3426
3427 if (!(dev->features & NETIF_F_HIGHDMA)) {
3428 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3429 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3430
3431 if (PageHighMem(skb_frag_page(frag)))
3432 return 1;
3433 }
3434 }
3435 #endif
3436 return 0;
3437 }
3438
3439 /* If MPLS offload request, verify we are testing hardware MPLS features
3440 * instead of standard features for the netdev.
3441 */
3442 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
net_mpls_features(struct sk_buff * skb,netdev_features_t features,__be16 type)3443 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3444 netdev_features_t features,
3445 __be16 type)
3446 {
3447 if (eth_p_mpls(type))
3448 features &= skb->dev->mpls_features;
3449
3450 return features;
3451 }
3452 #else
net_mpls_features(struct sk_buff * skb,netdev_features_t features,__be16 type)3453 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3454 netdev_features_t features,
3455 __be16 type)
3456 {
3457 return features;
3458 }
3459 #endif
3460
harmonize_features(struct sk_buff * skb,netdev_features_t features)3461 static netdev_features_t harmonize_features(struct sk_buff *skb,
3462 netdev_features_t features)
3463 {
3464 __be16 type;
3465
3466 type = skb_network_protocol(skb, NULL);
3467 features = net_mpls_features(skb, features, type);
3468
3469 if (skb->ip_summed != CHECKSUM_NONE &&
3470 !can_checksum_protocol(features, type)) {
3471 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3472 }
3473 if (illegal_highdma(skb->dev, skb))
3474 features &= ~NETIF_F_SG;
3475
3476 return features;
3477 }
3478
passthru_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3479 netdev_features_t passthru_features_check(struct sk_buff *skb,
3480 struct net_device *dev,
3481 netdev_features_t features)
3482 {
3483 return features;
3484 }
3485 EXPORT_SYMBOL(passthru_features_check);
3486
dflt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3487 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3488 struct net_device *dev,
3489 netdev_features_t features)
3490 {
3491 return vlan_features_check(skb, features);
3492 }
3493
gso_features_check(const struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3494 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3495 struct net_device *dev,
3496 netdev_features_t features)
3497 {
3498 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3499
3500 if (gso_segs > READ_ONCE(dev->gso_max_segs))
3501 return features & ~NETIF_F_GSO_MASK;
3502
3503 if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb)))
3504 return features & ~NETIF_F_GSO_MASK;
3505
3506 if (!skb_shinfo(skb)->gso_type) {
3507 skb_warn_bad_offload(skb);
3508 return features & ~NETIF_F_GSO_MASK;
3509 }
3510
3511 /* Support for GSO partial features requires software
3512 * intervention before we can actually process the packets
3513 * so we need to strip support for any partial features now
3514 * and we can pull them back in after we have partially
3515 * segmented the frame.
3516 */
3517 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3518 features &= ~dev->gso_partial_features;
3519
3520 /* Make sure to clear the IPv4 ID mangling feature if the
3521 * IPv4 header has the potential to be fragmented.
3522 */
3523 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3524 struct iphdr *iph = skb->encapsulation ?
3525 inner_ip_hdr(skb) : ip_hdr(skb);
3526
3527 if (!(iph->frag_off & htons(IP_DF)))
3528 features &= ~NETIF_F_TSO_MANGLEID;
3529 }
3530
3531 return features;
3532 }
3533
netif_skb_features(struct sk_buff * skb)3534 netdev_features_t netif_skb_features(struct sk_buff *skb)
3535 {
3536 struct net_device *dev = skb->dev;
3537 netdev_features_t features = dev->features;
3538
3539 if (skb_is_gso(skb))
3540 features = gso_features_check(skb, dev, features);
3541
3542 /* If encapsulation offload request, verify we are testing
3543 * hardware encapsulation features instead of standard
3544 * features for the netdev
3545 */
3546 if (skb->encapsulation)
3547 features &= dev->hw_enc_features;
3548
3549 if (skb_vlan_tagged(skb))
3550 features = netdev_intersect_features(features,
3551 dev->vlan_features |
3552 NETIF_F_HW_VLAN_CTAG_TX |
3553 NETIF_F_HW_VLAN_STAG_TX);
3554
3555 if (dev->netdev_ops->ndo_features_check)
3556 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3557 features);
3558 else
3559 features &= dflt_features_check(skb, dev, features);
3560
3561 return harmonize_features(skb, features);
3562 }
3563 EXPORT_SYMBOL(netif_skb_features);
3564
xmit_one(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)3565 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3566 struct netdev_queue *txq, bool more)
3567 {
3568 unsigned int len;
3569 int rc;
3570
3571 if (dev_nit_active(dev))
3572 dev_queue_xmit_nit(skb, dev);
3573
3574 len = skb->len;
3575 trace_net_dev_start_xmit(skb, dev);
3576 rc = netdev_start_xmit(skb, dev, txq, more);
3577 trace_net_dev_xmit(skb, rc, dev, len);
3578
3579 return rc;
3580 }
3581
dev_hard_start_xmit(struct sk_buff * first,struct net_device * dev,struct netdev_queue * txq,int * ret)3582 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3583 struct netdev_queue *txq, int *ret)
3584 {
3585 struct sk_buff *skb = first;
3586 int rc = NETDEV_TX_OK;
3587
3588 while (skb) {
3589 struct sk_buff *next = skb->next;
3590
3591 skb_mark_not_on_list(skb);
3592 rc = xmit_one(skb, dev, txq, next != NULL);
3593 if (unlikely(!dev_xmit_complete(rc))) {
3594 skb->next = next;
3595 goto out;
3596 }
3597
3598 skb = next;
3599 if (netif_tx_queue_stopped(txq) && skb) {
3600 rc = NETDEV_TX_BUSY;
3601 break;
3602 }
3603 }
3604
3605 out:
3606 *ret = rc;
3607 return skb;
3608 }
3609
validate_xmit_vlan(struct sk_buff * skb,netdev_features_t features)3610 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3611 netdev_features_t features)
3612 {
3613 if (skb_vlan_tag_present(skb) &&
3614 !vlan_hw_offload_capable(features, skb->vlan_proto))
3615 skb = __vlan_hwaccel_push_inside(skb);
3616 return skb;
3617 }
3618
skb_csum_hwoffload_help(struct sk_buff * skb,const netdev_features_t features)3619 int skb_csum_hwoffload_help(struct sk_buff *skb,
3620 const netdev_features_t features)
3621 {
3622 if (unlikely(skb_csum_is_sctp(skb)))
3623 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3624 skb_crc32c_csum_help(skb);
3625
3626 if (features & NETIF_F_HW_CSUM)
3627 return 0;
3628
3629 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3630 switch (skb->csum_offset) {
3631 case offsetof(struct tcphdr, check):
3632 case offsetof(struct udphdr, check):
3633 return 0;
3634 }
3635 }
3636
3637 return skb_checksum_help(skb);
3638 }
3639 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3640
validate_xmit_skb(struct sk_buff * skb,struct net_device * dev,bool * again)3641 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3642 {
3643 netdev_features_t features;
3644
3645 features = netif_skb_features(skb);
3646 skb = validate_xmit_vlan(skb, features);
3647 if (unlikely(!skb))
3648 goto out_null;
3649
3650 skb = sk_validate_xmit_skb(skb, dev);
3651 if (unlikely(!skb))
3652 goto out_null;
3653
3654 if (netif_needs_gso(skb, features)) {
3655 struct sk_buff *segs;
3656
3657 segs = skb_gso_segment(skb, features);
3658 if (IS_ERR(segs)) {
3659 goto out_kfree_skb;
3660 } else if (segs) {
3661 consume_skb(skb);
3662 skb = segs;
3663 }
3664 } else {
3665 if (skb_needs_linearize(skb, features) &&
3666 __skb_linearize(skb))
3667 goto out_kfree_skb;
3668
3669 /* If packet is not checksummed and device does not
3670 * support checksumming for this protocol, complete
3671 * checksumming here.
3672 */
3673 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3674 if (skb->encapsulation)
3675 skb_set_inner_transport_header(skb,
3676 skb_checksum_start_offset(skb));
3677 else
3678 skb_set_transport_header(skb,
3679 skb_checksum_start_offset(skb));
3680 if (skb_csum_hwoffload_help(skb, features))
3681 goto out_kfree_skb;
3682 }
3683 }
3684
3685 skb = validate_xmit_xfrm(skb, features, again);
3686
3687 return skb;
3688
3689 out_kfree_skb:
3690 kfree_skb(skb);
3691 out_null:
3692 dev_core_stats_tx_dropped_inc(dev);
3693 return NULL;
3694 }
3695
validate_xmit_skb_list(struct sk_buff * skb,struct net_device * dev,bool * again)3696 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3697 {
3698 struct sk_buff *next, *head = NULL, *tail;
3699
3700 for (; skb != NULL; skb = next) {
3701 next = skb->next;
3702 skb_mark_not_on_list(skb);
3703
3704 /* in case skb wont be segmented, point to itself */
3705 skb->prev = skb;
3706
3707 skb = validate_xmit_skb(skb, dev, again);
3708 if (!skb)
3709 continue;
3710
3711 if (!head)
3712 head = skb;
3713 else
3714 tail->next = skb;
3715 /* If skb was segmented, skb->prev points to
3716 * the last segment. If not, it still contains skb.
3717 */
3718 tail = skb->prev;
3719 }
3720 return head;
3721 }
3722 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3723
qdisc_pkt_len_init(struct sk_buff * skb)3724 static void qdisc_pkt_len_init(struct sk_buff *skb)
3725 {
3726 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3727
3728 qdisc_skb_cb(skb)->pkt_len = skb->len;
3729
3730 /* To get more precise estimation of bytes sent on wire,
3731 * we add to pkt_len the headers size of all segments
3732 */
3733 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3734 u16 gso_segs = shinfo->gso_segs;
3735 unsigned int hdr_len;
3736
3737 /* mac layer + network layer */
3738 hdr_len = skb_transport_offset(skb);
3739
3740 /* + transport layer */
3741 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3742 const struct tcphdr *th;
3743 struct tcphdr _tcphdr;
3744
3745 th = skb_header_pointer(skb, hdr_len,
3746 sizeof(_tcphdr), &_tcphdr);
3747 if (likely(th))
3748 hdr_len += __tcp_hdrlen(th);
3749 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
3750 struct udphdr _udphdr;
3751
3752 if (skb_header_pointer(skb, hdr_len,
3753 sizeof(_udphdr), &_udphdr))
3754 hdr_len += sizeof(struct udphdr);
3755 }
3756
3757 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) {
3758 int payload = skb->len - hdr_len;
3759
3760 /* Malicious packet. */
3761 if (payload <= 0)
3762 return;
3763 gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
3764 }
3765 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3766 }
3767 }
3768
dev_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * q,struct sk_buff ** to_free,struct netdev_queue * txq)3769 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3770 struct sk_buff **to_free,
3771 struct netdev_queue *txq)
3772 {
3773 int rc;
3774
3775 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3776 if (rc == NET_XMIT_SUCCESS)
3777 trace_qdisc_enqueue(q, txq, skb);
3778 return rc;
3779 }
3780
__dev_xmit_skb(struct sk_buff * skb,struct Qdisc * q,struct net_device * dev,struct netdev_queue * txq)3781 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3782 struct net_device *dev,
3783 struct netdev_queue *txq)
3784 {
3785 spinlock_t *root_lock = qdisc_lock(q);
3786 struct sk_buff *to_free = NULL;
3787 bool contended;
3788 int rc;
3789
3790 qdisc_calculate_pkt_len(skb, q);
3791
3792 if (q->flags & TCQ_F_NOLOCK) {
3793 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3794 qdisc_run_begin(q)) {
3795 /* Retest nolock_qdisc_is_empty() within the protection
3796 * of q->seqlock to protect from racing with requeuing.
3797 */
3798 if (unlikely(!nolock_qdisc_is_empty(q))) {
3799 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3800 __qdisc_run(q);
3801 qdisc_run_end(q);
3802
3803 goto no_lock_out;
3804 }
3805
3806 qdisc_bstats_cpu_update(q, skb);
3807 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3808 !nolock_qdisc_is_empty(q))
3809 __qdisc_run(q);
3810
3811 qdisc_run_end(q);
3812 return NET_XMIT_SUCCESS;
3813 }
3814
3815 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3816 qdisc_run(q);
3817
3818 no_lock_out:
3819 if (unlikely(to_free))
3820 kfree_skb_list_reason(to_free,
3821 SKB_DROP_REASON_QDISC_DROP);
3822 return rc;
3823 }
3824
3825 /*
3826 * Heuristic to force contended enqueues to serialize on a
3827 * separate lock before trying to get qdisc main lock.
3828 * This permits qdisc->running owner to get the lock more
3829 * often and dequeue packets faster.
3830 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3831 * and then other tasks will only enqueue packets. The packets will be
3832 * sent after the qdisc owner is scheduled again. To prevent this
3833 * scenario the task always serialize on the lock.
3834 */
3835 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
3836 if (unlikely(contended))
3837 spin_lock(&q->busylock);
3838
3839 spin_lock(root_lock);
3840 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3841 __qdisc_drop(skb, &to_free);
3842 rc = NET_XMIT_DROP;
3843 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3844 qdisc_run_begin(q)) {
3845 /*
3846 * This is a work-conserving queue; there are no old skbs
3847 * waiting to be sent out; and the qdisc is not running -
3848 * xmit the skb directly.
3849 */
3850
3851 qdisc_bstats_update(q, skb);
3852
3853 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3854 if (unlikely(contended)) {
3855 spin_unlock(&q->busylock);
3856 contended = false;
3857 }
3858 __qdisc_run(q);
3859 }
3860
3861 qdisc_run_end(q);
3862 rc = NET_XMIT_SUCCESS;
3863 } else {
3864 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3865 if (qdisc_run_begin(q)) {
3866 if (unlikely(contended)) {
3867 spin_unlock(&q->busylock);
3868 contended = false;
3869 }
3870 __qdisc_run(q);
3871 qdisc_run_end(q);
3872 }
3873 }
3874 spin_unlock(root_lock);
3875 if (unlikely(to_free))
3876 kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP);
3877 if (unlikely(contended))
3878 spin_unlock(&q->busylock);
3879 return rc;
3880 }
3881
3882 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
skb_update_prio(struct sk_buff * skb)3883 static void skb_update_prio(struct sk_buff *skb)
3884 {
3885 const struct netprio_map *map;
3886 const struct sock *sk;
3887 unsigned int prioidx;
3888
3889 if (skb->priority)
3890 return;
3891 map = rcu_dereference_bh(skb->dev->priomap);
3892 if (!map)
3893 return;
3894 sk = skb_to_full_sk(skb);
3895 if (!sk)
3896 return;
3897
3898 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3899
3900 if (prioidx < map->priomap_len)
3901 skb->priority = map->priomap[prioidx];
3902 }
3903 #else
3904 #define skb_update_prio(skb)
3905 #endif
3906
3907 /**
3908 * dev_loopback_xmit - loop back @skb
3909 * @net: network namespace this loopback is happening in
3910 * @sk: sk needed to be a netfilter okfn
3911 * @skb: buffer to transmit
3912 */
dev_loopback_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)3913 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3914 {
3915 skb_reset_mac_header(skb);
3916 __skb_pull(skb, skb_network_offset(skb));
3917 skb->pkt_type = PACKET_LOOPBACK;
3918 if (skb->ip_summed == CHECKSUM_NONE)
3919 skb->ip_summed = CHECKSUM_UNNECESSARY;
3920 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
3921 skb_dst_force(skb);
3922 netif_rx(skb);
3923 return 0;
3924 }
3925 EXPORT_SYMBOL(dev_loopback_xmit);
3926
3927 #ifdef CONFIG_NET_EGRESS
3928 static struct netdev_queue *
netdev_tx_queue_mapping(struct net_device * dev,struct sk_buff * skb)3929 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3930 {
3931 int qm = skb_get_queue_mapping(skb);
3932
3933 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
3934 }
3935
netdev_xmit_txqueue_skipped(void)3936 static bool netdev_xmit_txqueue_skipped(void)
3937 {
3938 return __this_cpu_read(softnet_data.xmit.skip_txqueue);
3939 }
3940
netdev_xmit_skip_txqueue(bool skip)3941 void netdev_xmit_skip_txqueue(bool skip)
3942 {
3943 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
3944 }
3945 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
3946 #endif /* CONFIG_NET_EGRESS */
3947
3948 #ifdef CONFIG_NET_XGRESS
tc_run(struct tcx_entry * entry,struct sk_buff * skb)3949 static int tc_run(struct tcx_entry *entry, struct sk_buff *skb)
3950 {
3951 int ret = TC_ACT_UNSPEC;
3952 #ifdef CONFIG_NET_CLS_ACT
3953 struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq);
3954 struct tcf_result res;
3955
3956 if (!miniq)
3957 return ret;
3958
3959 tc_skb_cb(skb)->mru = 0;
3960 tc_skb_cb(skb)->post_ct = false;
3961
3962 mini_qdisc_bstats_cpu_update(miniq, skb);
3963 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
3964 /* Only tcf related quirks below. */
3965 switch (ret) {
3966 case TC_ACT_SHOT:
3967 mini_qdisc_qstats_cpu_drop(miniq);
3968 break;
3969 case TC_ACT_OK:
3970 case TC_ACT_RECLASSIFY:
3971 skb->tc_index = TC_H_MIN(res.classid);
3972 break;
3973 }
3974 #endif /* CONFIG_NET_CLS_ACT */
3975 return ret;
3976 }
3977
3978 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);
3979
tcx_inc(void)3980 void tcx_inc(void)
3981 {
3982 static_branch_inc(&tcx_needed_key);
3983 }
3984
tcx_dec(void)3985 void tcx_dec(void)
3986 {
3987 static_branch_dec(&tcx_needed_key);
3988 }
3989
3990 static __always_inline enum tcx_action_base
tcx_run(const struct bpf_mprog_entry * entry,struct sk_buff * skb,const bool needs_mac)3991 tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
3992 const bool needs_mac)
3993 {
3994 const struct bpf_mprog_fp *fp;
3995 const struct bpf_prog *prog;
3996 int ret = TCX_NEXT;
3997
3998 if (needs_mac)
3999 __skb_push(skb, skb->mac_len);
4000 bpf_mprog_foreach_prog(entry, fp, prog) {
4001 bpf_compute_data_pointers(skb);
4002 ret = bpf_prog_run(prog, skb);
4003 if (ret != TCX_NEXT)
4004 break;
4005 }
4006 if (needs_mac)
4007 __skb_pull(skb, skb->mac_len);
4008 return tcx_action_code(skb, ret);
4009 }
4010
4011 static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev,bool * another)4012 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4013 struct net_device *orig_dev, bool *another)
4014 {
4015 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
4016 int sch_ret;
4017
4018 if (!entry)
4019 return skb;
4020 if (*pt_prev) {
4021 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4022 *pt_prev = NULL;
4023 }
4024
4025 qdisc_skb_cb(skb)->pkt_len = skb->len;
4026 tcx_set_ingress(skb, true);
4027
4028 if (static_branch_unlikely(&tcx_needed_key)) {
4029 sch_ret = tcx_run(entry, skb, true);
4030 if (sch_ret != TC_ACT_UNSPEC)
4031 goto ingress_verdict;
4032 }
4033 sch_ret = tc_run(tcx_entry(entry), skb);
4034 ingress_verdict:
4035 switch (sch_ret) {
4036 case TC_ACT_REDIRECT:
4037 /* skb_mac_header check was done by BPF, so we can safely
4038 * push the L2 header back before redirecting to another
4039 * netdev.
4040 */
4041 __skb_push(skb, skb->mac_len);
4042 if (skb_do_redirect(skb) == -EAGAIN) {
4043 __skb_pull(skb, skb->mac_len);
4044 *another = true;
4045 break;
4046 }
4047 *ret = NET_RX_SUCCESS;
4048 return NULL;
4049 case TC_ACT_SHOT:
4050 kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS);
4051 *ret = NET_RX_DROP;
4052 return NULL;
4053 /* used by tc_run */
4054 case TC_ACT_STOLEN:
4055 case TC_ACT_QUEUED:
4056 case TC_ACT_TRAP:
4057 consume_skb(skb);
4058 fallthrough;
4059 case TC_ACT_CONSUMED:
4060 *ret = NET_RX_SUCCESS;
4061 return NULL;
4062 }
4063
4064 return skb;
4065 }
4066
4067 static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff * skb,int * ret,struct net_device * dev)4068 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4069 {
4070 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
4071 int sch_ret;
4072
4073 if (!entry)
4074 return skb;
4075
4076 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4077 * already set by the caller.
4078 */
4079 if (static_branch_unlikely(&tcx_needed_key)) {
4080 sch_ret = tcx_run(entry, skb, false);
4081 if (sch_ret != TC_ACT_UNSPEC)
4082 goto egress_verdict;
4083 }
4084 sch_ret = tc_run(tcx_entry(entry), skb);
4085 egress_verdict:
4086 switch (sch_ret) {
4087 case TC_ACT_REDIRECT:
4088 /* No need to push/pop skb's mac_header here on egress! */
4089 skb_do_redirect(skb);
4090 *ret = NET_XMIT_SUCCESS;
4091 return NULL;
4092 case TC_ACT_SHOT:
4093 kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS);
4094 *ret = NET_XMIT_DROP;
4095 return NULL;
4096 /* used by tc_run */
4097 case TC_ACT_STOLEN:
4098 case TC_ACT_QUEUED:
4099 case TC_ACT_TRAP:
4100 consume_skb(skb);
4101 fallthrough;
4102 case TC_ACT_CONSUMED:
4103 *ret = NET_XMIT_SUCCESS;
4104 return NULL;
4105 }
4106
4107 return skb;
4108 }
4109 #else
4110 static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev,bool * another)4111 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4112 struct net_device *orig_dev, bool *another)
4113 {
4114 return skb;
4115 }
4116
4117 static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff * skb,int * ret,struct net_device * dev)4118 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4119 {
4120 return skb;
4121 }
4122 #endif /* CONFIG_NET_XGRESS */
4123
4124 #ifdef CONFIG_XPS
__get_xps_queue_idx(struct net_device * dev,struct sk_buff * skb,struct xps_dev_maps * dev_maps,unsigned int tci)4125 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4126 struct xps_dev_maps *dev_maps, unsigned int tci)
4127 {
4128 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4129 struct xps_map *map;
4130 int queue_index = -1;
4131
4132 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4133 return queue_index;
4134
4135 tci *= dev_maps->num_tc;
4136 tci += tc;
4137
4138 map = rcu_dereference(dev_maps->attr_map[tci]);
4139 if (map) {
4140 if (map->len == 1)
4141 queue_index = map->queues[0];
4142 else
4143 queue_index = map->queues[reciprocal_scale(
4144 skb_get_hash(skb), map->len)];
4145 if (unlikely(queue_index >= dev->real_num_tx_queues))
4146 queue_index = -1;
4147 }
4148 return queue_index;
4149 }
4150 #endif
4151
get_xps_queue(struct net_device * dev,struct net_device * sb_dev,struct sk_buff * skb)4152 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4153 struct sk_buff *skb)
4154 {
4155 #ifdef CONFIG_XPS
4156 struct xps_dev_maps *dev_maps;
4157 struct sock *sk = skb->sk;
4158 int queue_index = -1;
4159
4160 if (!static_key_false(&xps_needed))
4161 return -1;
4162
4163 rcu_read_lock();
4164 if (!static_key_false(&xps_rxqs_needed))
4165 goto get_cpus_map;
4166
4167 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4168 if (dev_maps) {
4169 int tci = sk_rx_queue_get(sk);
4170
4171 if (tci >= 0)
4172 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4173 tci);
4174 }
4175
4176 get_cpus_map:
4177 if (queue_index < 0) {
4178 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4179 if (dev_maps) {
4180 unsigned int tci = skb->sender_cpu - 1;
4181
4182 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4183 tci);
4184 }
4185 }
4186 rcu_read_unlock();
4187
4188 return queue_index;
4189 #else
4190 return -1;
4191 #endif
4192 }
4193
dev_pick_tx_zero(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4194 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4195 struct net_device *sb_dev)
4196 {
4197 return 0;
4198 }
4199 EXPORT_SYMBOL(dev_pick_tx_zero);
4200
dev_pick_tx_cpu_id(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4201 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4202 struct net_device *sb_dev)
4203 {
4204 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4205 }
4206 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4207
netdev_pick_tx(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4208 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4209 struct net_device *sb_dev)
4210 {
4211 struct sock *sk = skb->sk;
4212 int queue_index = sk_tx_queue_get(sk);
4213
4214 sb_dev = sb_dev ? : dev;
4215
4216 if (queue_index < 0 || skb->ooo_okay ||
4217 queue_index >= dev->real_num_tx_queues) {
4218 int new_index = get_xps_queue(dev, sb_dev, skb);
4219
4220 if (new_index < 0)
4221 new_index = skb_tx_hash(dev, sb_dev, skb);
4222
4223 if (queue_index != new_index && sk &&
4224 sk_fullsock(sk) &&
4225 rcu_access_pointer(sk->sk_dst_cache))
4226 sk_tx_queue_set(sk, new_index);
4227
4228 queue_index = new_index;
4229 }
4230
4231 return queue_index;
4232 }
4233 EXPORT_SYMBOL(netdev_pick_tx);
4234
netdev_core_pick_tx(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4235 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4236 struct sk_buff *skb,
4237 struct net_device *sb_dev)
4238 {
4239 int queue_index = 0;
4240
4241 #ifdef CONFIG_XPS
4242 u32 sender_cpu = skb->sender_cpu - 1;
4243
4244 if (sender_cpu >= (u32)NR_CPUS)
4245 skb->sender_cpu = raw_smp_processor_id() + 1;
4246 #endif
4247
4248 if (dev->real_num_tx_queues != 1) {
4249 const struct net_device_ops *ops = dev->netdev_ops;
4250
4251 if (ops->ndo_select_queue)
4252 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4253 else
4254 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4255
4256 queue_index = netdev_cap_txqueue(dev, queue_index);
4257 }
4258
4259 skb_set_queue_mapping(skb, queue_index);
4260 return netdev_get_tx_queue(dev, queue_index);
4261 }
4262
4263 /**
4264 * __dev_queue_xmit() - transmit a buffer
4265 * @skb: buffer to transmit
4266 * @sb_dev: suboordinate device used for L2 forwarding offload
4267 *
4268 * Queue a buffer for transmission to a network device. The caller must
4269 * have set the device and priority and built the buffer before calling
4270 * this function. The function can be called from an interrupt.
4271 *
4272 * When calling this method, interrupts MUST be enabled. This is because
4273 * the BH enable code must have IRQs enabled so that it will not deadlock.
4274 *
4275 * Regardless of the return value, the skb is consumed, so it is currently
4276 * difficult to retry a send to this method. (You can bump the ref count
4277 * before sending to hold a reference for retry if you are careful.)
4278 *
4279 * Return:
4280 * * 0 - buffer successfully transmitted
4281 * * positive qdisc return code - NET_XMIT_DROP etc.
4282 * * negative errno - other errors
4283 */
__dev_queue_xmit(struct sk_buff * skb,struct net_device * sb_dev)4284 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4285 {
4286 struct net_device *dev = skb->dev;
4287 struct netdev_queue *txq = NULL;
4288 struct Qdisc *q;
4289 int rc = -ENOMEM;
4290 bool again = false;
4291
4292 skb_reset_mac_header(skb);
4293 skb_assert_len(skb);
4294
4295 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4296 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4297
4298 /* Disable soft irqs for various locks below. Also
4299 * stops preemption for RCU.
4300 */
4301 rcu_read_lock_bh();
4302
4303 skb_update_prio(skb);
4304
4305 qdisc_pkt_len_init(skb);
4306 tcx_set_ingress(skb, false);
4307 #ifdef CONFIG_NET_EGRESS
4308 if (static_branch_unlikely(&egress_needed_key)) {
4309 if (nf_hook_egress_active()) {
4310 skb = nf_hook_egress(skb, &rc, dev);
4311 if (!skb)
4312 goto out;
4313 }
4314
4315 netdev_xmit_skip_txqueue(false);
4316
4317 nf_skip_egress(skb, true);
4318 skb = sch_handle_egress(skb, &rc, dev);
4319 if (!skb)
4320 goto out;
4321 nf_skip_egress(skb, false);
4322
4323 if (netdev_xmit_txqueue_skipped())
4324 txq = netdev_tx_queue_mapping(dev, skb);
4325 }
4326 #endif
4327 /* If device/qdisc don't need skb->dst, release it right now while
4328 * its hot in this cpu cache.
4329 */
4330 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4331 skb_dst_drop(skb);
4332 else
4333 skb_dst_force(skb);
4334
4335 if (!txq)
4336 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4337
4338 q = rcu_dereference_bh(txq->qdisc);
4339
4340 trace_net_dev_queue(skb);
4341 if (q->enqueue) {
4342 rc = __dev_xmit_skb(skb, q, dev, txq);
4343 goto out;
4344 }
4345
4346 /* The device has no queue. Common case for software devices:
4347 * loopback, all the sorts of tunnels...
4348
4349 * Really, it is unlikely that netif_tx_lock protection is necessary
4350 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4351 * counters.)
4352 * However, it is possible, that they rely on protection
4353 * made by us here.
4354
4355 * Check this and shot the lock. It is not prone from deadlocks.
4356 *Either shot noqueue qdisc, it is even simpler 8)
4357 */
4358 if (dev->flags & IFF_UP) {
4359 int cpu = smp_processor_id(); /* ok because BHs are off */
4360
4361 /* Other cpus might concurrently change txq->xmit_lock_owner
4362 * to -1 or to their cpu id, but not to our id.
4363 */
4364 if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4365 if (dev_xmit_recursion())
4366 goto recursion_alert;
4367
4368 skb = validate_xmit_skb(skb, dev, &again);
4369 if (!skb)
4370 goto out;
4371
4372 HARD_TX_LOCK(dev, txq, cpu);
4373
4374 if (!netif_xmit_stopped(txq)) {
4375 dev_xmit_recursion_inc();
4376 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4377 dev_xmit_recursion_dec();
4378 if (dev_xmit_complete(rc)) {
4379 HARD_TX_UNLOCK(dev, txq);
4380 goto out;
4381 }
4382 }
4383 HARD_TX_UNLOCK(dev, txq);
4384 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4385 dev->name);
4386 } else {
4387 /* Recursion is detected! It is possible,
4388 * unfortunately
4389 */
4390 recursion_alert:
4391 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4392 dev->name);
4393 }
4394 }
4395
4396 rc = -ENETDOWN;
4397 rcu_read_unlock_bh();
4398
4399 dev_core_stats_tx_dropped_inc(dev);
4400 kfree_skb_list(skb);
4401 return rc;
4402 out:
4403 rcu_read_unlock_bh();
4404 return rc;
4405 }
4406 EXPORT_SYMBOL(__dev_queue_xmit);
4407
__dev_direct_xmit(struct sk_buff * skb,u16 queue_id)4408 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4409 {
4410 struct net_device *dev = skb->dev;
4411 struct sk_buff *orig_skb = skb;
4412 struct netdev_queue *txq;
4413 int ret = NETDEV_TX_BUSY;
4414 bool again = false;
4415
4416 if (unlikely(!netif_running(dev) ||
4417 !netif_carrier_ok(dev)))
4418 goto drop;
4419
4420 skb = validate_xmit_skb_list(skb, dev, &again);
4421 if (skb != orig_skb)
4422 goto drop;
4423
4424 skb_set_queue_mapping(skb, queue_id);
4425 txq = skb_get_tx_queue(dev, skb);
4426
4427 local_bh_disable();
4428
4429 dev_xmit_recursion_inc();
4430 HARD_TX_LOCK(dev, txq, smp_processor_id());
4431 if (!netif_xmit_frozen_or_drv_stopped(txq))
4432 ret = netdev_start_xmit(skb, dev, txq, false);
4433 HARD_TX_UNLOCK(dev, txq);
4434 dev_xmit_recursion_dec();
4435
4436 local_bh_enable();
4437 return ret;
4438 drop:
4439 dev_core_stats_tx_dropped_inc(dev);
4440 kfree_skb_list(skb);
4441 return NET_XMIT_DROP;
4442 }
4443 EXPORT_SYMBOL(__dev_direct_xmit);
4444
4445 /*************************************************************************
4446 * Receiver routines
4447 *************************************************************************/
4448
4449 int netdev_max_backlog __read_mostly = 1000;
4450 EXPORT_SYMBOL(netdev_max_backlog);
4451
4452 int netdev_tstamp_prequeue __read_mostly = 1;
4453 unsigned int sysctl_skb_defer_max __read_mostly = 64;
4454 int netdev_budget __read_mostly = 300;
4455 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4456 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4457 int weight_p __read_mostly = 64; /* old backlog weight */
4458 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
4459 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
4460 int dev_rx_weight __read_mostly = 64;
4461 int dev_tx_weight __read_mostly = 64;
4462
4463 /* Called with irq disabled */
____napi_schedule(struct softnet_data * sd,struct napi_struct * napi)4464 static inline void ____napi_schedule(struct softnet_data *sd,
4465 struct napi_struct *napi)
4466 {
4467 struct task_struct *thread;
4468
4469 lockdep_assert_irqs_disabled();
4470
4471 if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4472 /* Paired with smp_mb__before_atomic() in
4473 * napi_enable()/dev_set_threaded().
4474 * Use READ_ONCE() to guarantee a complete
4475 * read on napi->thread. Only call
4476 * wake_up_process() when it's not NULL.
4477 */
4478 thread = READ_ONCE(napi->thread);
4479 if (thread) {
4480 /* Avoid doing set_bit() if the thread is in
4481 * INTERRUPTIBLE state, cause napi_thread_wait()
4482 * makes sure to proceed with napi polling
4483 * if the thread is explicitly woken from here.
4484 */
4485 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4486 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4487 wake_up_process(thread);
4488 return;
4489 }
4490 }
4491
4492 list_add_tail(&napi->poll_list, &sd->poll_list);
4493 WRITE_ONCE(napi->list_owner, smp_processor_id());
4494 /* If not called from net_rx_action()
4495 * we have to raise NET_RX_SOFTIRQ.
4496 */
4497 if (!sd->in_net_rx_action)
4498 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4499 }
4500
4501 #ifdef CONFIG_RPS
4502
4503 /* One global table that all flow-based protocols share. */
4504 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4505 EXPORT_SYMBOL(rps_sock_flow_table);
4506 u32 rps_cpu_mask __read_mostly;
4507 EXPORT_SYMBOL(rps_cpu_mask);
4508
4509 struct static_key_false rps_needed __read_mostly;
4510 EXPORT_SYMBOL(rps_needed);
4511 struct static_key_false rfs_needed __read_mostly;
4512 EXPORT_SYMBOL(rfs_needed);
4513
4514 static struct rps_dev_flow *
set_rps_cpu(struct net_device * dev,struct sk_buff * skb,struct rps_dev_flow * rflow,u16 next_cpu)4515 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4516 struct rps_dev_flow *rflow, u16 next_cpu)
4517 {
4518 if (next_cpu < nr_cpu_ids) {
4519 #ifdef CONFIG_RFS_ACCEL
4520 struct netdev_rx_queue *rxqueue;
4521 struct rps_dev_flow_table *flow_table;
4522 struct rps_dev_flow *old_rflow;
4523 u32 flow_id;
4524 u16 rxq_index;
4525 int rc;
4526
4527 /* Should we steer this flow to a different hardware queue? */
4528 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4529 !(dev->features & NETIF_F_NTUPLE))
4530 goto out;
4531 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4532 if (rxq_index == skb_get_rx_queue(skb))
4533 goto out;
4534
4535 rxqueue = dev->_rx + rxq_index;
4536 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4537 if (!flow_table)
4538 goto out;
4539 flow_id = skb_get_hash(skb) & flow_table->mask;
4540 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4541 rxq_index, flow_id);
4542 if (rc < 0)
4543 goto out;
4544 old_rflow = rflow;
4545 rflow = &flow_table->flows[flow_id];
4546 rflow->filter = rc;
4547 if (old_rflow->filter == rflow->filter)
4548 old_rflow->filter = RPS_NO_FILTER;
4549 out:
4550 #endif
4551 rflow->last_qtail =
4552 per_cpu(softnet_data, next_cpu).input_queue_head;
4553 }
4554
4555 rflow->cpu = next_cpu;
4556 return rflow;
4557 }
4558
4559 /*
4560 * get_rps_cpu is called from netif_receive_skb and returns the target
4561 * CPU from the RPS map of the receiving queue for a given skb.
4562 * rcu_read_lock must be held on entry.
4563 */
get_rps_cpu(struct net_device * dev,struct sk_buff * skb,struct rps_dev_flow ** rflowp)4564 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4565 struct rps_dev_flow **rflowp)
4566 {
4567 const struct rps_sock_flow_table *sock_flow_table;
4568 struct netdev_rx_queue *rxqueue = dev->_rx;
4569 struct rps_dev_flow_table *flow_table;
4570 struct rps_map *map;
4571 int cpu = -1;
4572 u32 tcpu;
4573 u32 hash;
4574
4575 if (skb_rx_queue_recorded(skb)) {
4576 u16 index = skb_get_rx_queue(skb);
4577
4578 if (unlikely(index >= dev->real_num_rx_queues)) {
4579 WARN_ONCE(dev->real_num_rx_queues > 1,
4580 "%s received packet on queue %u, but number "
4581 "of RX queues is %u\n",
4582 dev->name, index, dev->real_num_rx_queues);
4583 goto done;
4584 }
4585 rxqueue += index;
4586 }
4587
4588 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4589
4590 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4591 map = rcu_dereference(rxqueue->rps_map);
4592 if (!flow_table && !map)
4593 goto done;
4594
4595 skb_reset_network_header(skb);
4596 hash = skb_get_hash(skb);
4597 if (!hash)
4598 goto done;
4599
4600 sock_flow_table = rcu_dereference(rps_sock_flow_table);
4601 if (flow_table && sock_flow_table) {
4602 struct rps_dev_flow *rflow;
4603 u32 next_cpu;
4604 u32 ident;
4605
4606 /* First check into global flow table if there is a match.
4607 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4608 */
4609 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
4610 if ((ident ^ hash) & ~rps_cpu_mask)
4611 goto try_rps;
4612
4613 next_cpu = ident & rps_cpu_mask;
4614
4615 /* OK, now we know there is a match,
4616 * we can look at the local (per receive queue) flow table
4617 */
4618 rflow = &flow_table->flows[hash & flow_table->mask];
4619 tcpu = rflow->cpu;
4620
4621 /*
4622 * If the desired CPU (where last recvmsg was done) is
4623 * different from current CPU (one in the rx-queue flow
4624 * table entry), switch if one of the following holds:
4625 * - Current CPU is unset (>= nr_cpu_ids).
4626 * - Current CPU is offline.
4627 * - The current CPU's queue tail has advanced beyond the
4628 * last packet that was enqueued using this table entry.
4629 * This guarantees that all previous packets for the flow
4630 * have been dequeued, thus preserving in order delivery.
4631 */
4632 if (unlikely(tcpu != next_cpu) &&
4633 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4634 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4635 rflow->last_qtail)) >= 0)) {
4636 tcpu = next_cpu;
4637 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4638 }
4639
4640 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4641 *rflowp = rflow;
4642 cpu = tcpu;
4643 goto done;
4644 }
4645 }
4646
4647 try_rps:
4648
4649 if (map) {
4650 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4651 if (cpu_online(tcpu)) {
4652 cpu = tcpu;
4653 goto done;
4654 }
4655 }
4656
4657 done:
4658 return cpu;
4659 }
4660
4661 #ifdef CONFIG_RFS_ACCEL
4662
4663 /**
4664 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4665 * @dev: Device on which the filter was set
4666 * @rxq_index: RX queue index
4667 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4668 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4669 *
4670 * Drivers that implement ndo_rx_flow_steer() should periodically call
4671 * this function for each installed filter and remove the filters for
4672 * which it returns %true.
4673 */
rps_may_expire_flow(struct net_device * dev,u16 rxq_index,u32 flow_id,u16 filter_id)4674 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4675 u32 flow_id, u16 filter_id)
4676 {
4677 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4678 struct rps_dev_flow_table *flow_table;
4679 struct rps_dev_flow *rflow;
4680 bool expire = true;
4681 unsigned int cpu;
4682
4683 rcu_read_lock();
4684 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4685 if (flow_table && flow_id <= flow_table->mask) {
4686 rflow = &flow_table->flows[flow_id];
4687 cpu = READ_ONCE(rflow->cpu);
4688 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4689 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4690 rflow->last_qtail) <
4691 (int)(10 * flow_table->mask)))
4692 expire = false;
4693 }
4694 rcu_read_unlock();
4695 return expire;
4696 }
4697 EXPORT_SYMBOL(rps_may_expire_flow);
4698
4699 #endif /* CONFIG_RFS_ACCEL */
4700
4701 /* Called from hardirq (IPI) context */
rps_trigger_softirq(void * data)4702 static void rps_trigger_softirq(void *data)
4703 {
4704 struct softnet_data *sd = data;
4705
4706 ____napi_schedule(sd, &sd->backlog);
4707 sd->received_rps++;
4708 }
4709
4710 #endif /* CONFIG_RPS */
4711
4712 /* Called from hardirq (IPI) context */
trigger_rx_softirq(void * data)4713 static void trigger_rx_softirq(void *data)
4714 {
4715 struct softnet_data *sd = data;
4716
4717 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4718 smp_store_release(&sd->defer_ipi_scheduled, 0);
4719 }
4720
4721 /*
4722 * After we queued a packet into sd->input_pkt_queue,
4723 * we need to make sure this queue is serviced soon.
4724 *
4725 * - If this is another cpu queue, link it to our rps_ipi_list,
4726 * and make sure we will process rps_ipi_list from net_rx_action().
4727 *
4728 * - If this is our own queue, NAPI schedule our backlog.
4729 * Note that this also raises NET_RX_SOFTIRQ.
4730 */
napi_schedule_rps(struct softnet_data * sd)4731 static void napi_schedule_rps(struct softnet_data *sd)
4732 {
4733 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4734
4735 #ifdef CONFIG_RPS
4736 if (sd != mysd) {
4737 sd->rps_ipi_next = mysd->rps_ipi_list;
4738 mysd->rps_ipi_list = sd;
4739
4740 /* If not called from net_rx_action() or napi_threaded_poll()
4741 * we have to raise NET_RX_SOFTIRQ.
4742 */
4743 if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
4744 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4745 return;
4746 }
4747 #endif /* CONFIG_RPS */
4748 __napi_schedule_irqoff(&mysd->backlog);
4749 }
4750
4751 #ifdef CONFIG_NET_FLOW_LIMIT
4752 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4753 #endif
4754
skb_flow_limit(struct sk_buff * skb,unsigned int qlen)4755 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4756 {
4757 #ifdef CONFIG_NET_FLOW_LIMIT
4758 struct sd_flow_limit *fl;
4759 struct softnet_data *sd;
4760 unsigned int old_flow, new_flow;
4761
4762 if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
4763 return false;
4764
4765 sd = this_cpu_ptr(&softnet_data);
4766
4767 rcu_read_lock();
4768 fl = rcu_dereference(sd->flow_limit);
4769 if (fl) {
4770 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4771 old_flow = fl->history[fl->history_head];
4772 fl->history[fl->history_head] = new_flow;
4773
4774 fl->history_head++;
4775 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4776
4777 if (likely(fl->buckets[old_flow]))
4778 fl->buckets[old_flow]--;
4779
4780 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4781 fl->count++;
4782 rcu_read_unlock();
4783 return true;
4784 }
4785 }
4786 rcu_read_unlock();
4787 #endif
4788 return false;
4789 }
4790
4791 /*
4792 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4793 * queue (may be a remote CPU queue).
4794 */
enqueue_to_backlog(struct sk_buff * skb,int cpu,unsigned int * qtail)4795 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4796 unsigned int *qtail)
4797 {
4798 enum skb_drop_reason reason;
4799 struct softnet_data *sd;
4800 unsigned long flags;
4801 unsigned int qlen;
4802
4803 reason = SKB_DROP_REASON_NOT_SPECIFIED;
4804 sd = &per_cpu(softnet_data, cpu);
4805
4806 rps_lock_irqsave(sd, &flags);
4807 if (!netif_running(skb->dev))
4808 goto drop;
4809 qlen = skb_queue_len(&sd->input_pkt_queue);
4810 if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
4811 if (qlen) {
4812 enqueue:
4813 __skb_queue_tail(&sd->input_pkt_queue, skb);
4814 input_queue_tail_incr_save(sd, qtail);
4815 rps_unlock_irq_restore(sd, &flags);
4816 return NET_RX_SUCCESS;
4817 }
4818
4819 /* Schedule NAPI for backlog device
4820 * We can use non atomic operation since we own the queue lock
4821 */
4822 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4823 napi_schedule_rps(sd);
4824 goto enqueue;
4825 }
4826 reason = SKB_DROP_REASON_CPU_BACKLOG;
4827
4828 drop:
4829 sd->dropped++;
4830 rps_unlock_irq_restore(sd, &flags);
4831
4832 dev_core_stats_rx_dropped_inc(skb->dev);
4833 kfree_skb_reason(skb, reason);
4834 return NET_RX_DROP;
4835 }
4836
netif_get_rxqueue(struct sk_buff * skb)4837 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4838 {
4839 struct net_device *dev = skb->dev;
4840 struct netdev_rx_queue *rxqueue;
4841
4842 rxqueue = dev->_rx;
4843
4844 if (skb_rx_queue_recorded(skb)) {
4845 u16 index = skb_get_rx_queue(skb);
4846
4847 if (unlikely(index >= dev->real_num_rx_queues)) {
4848 WARN_ONCE(dev->real_num_rx_queues > 1,
4849 "%s received packet on queue %u, but number "
4850 "of RX queues is %u\n",
4851 dev->name, index, dev->real_num_rx_queues);
4852
4853 return rxqueue; /* Return first rxqueue */
4854 }
4855 rxqueue += index;
4856 }
4857 return rxqueue;
4858 }
4859
bpf_prog_run_generic_xdp(struct sk_buff * skb,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)4860 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4861 struct bpf_prog *xdp_prog)
4862 {
4863 void *orig_data, *orig_data_end, *hard_start;
4864 struct netdev_rx_queue *rxqueue;
4865 bool orig_bcast, orig_host;
4866 u32 mac_len, frame_sz;
4867 __be16 orig_eth_type;
4868 struct ethhdr *eth;
4869 u32 metalen, act;
4870 int off;
4871
4872 /* The XDP program wants to see the packet starting at the MAC
4873 * header.
4874 */
4875 mac_len = skb->data - skb_mac_header(skb);
4876 hard_start = skb->data - skb_headroom(skb);
4877
4878 /* SKB "head" area always have tailroom for skb_shared_info */
4879 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4880 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4881
4882 rxqueue = netif_get_rxqueue(skb);
4883 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4884 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4885 skb_headlen(skb) + mac_len, true);
4886
4887 orig_data_end = xdp->data_end;
4888 orig_data = xdp->data;
4889 eth = (struct ethhdr *)xdp->data;
4890 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4891 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4892 orig_eth_type = eth->h_proto;
4893
4894 act = bpf_prog_run_xdp(xdp_prog, xdp);
4895
4896 /* check if bpf_xdp_adjust_head was used */
4897 off = xdp->data - orig_data;
4898 if (off) {
4899 if (off > 0)
4900 __skb_pull(skb, off);
4901 else if (off < 0)
4902 __skb_push(skb, -off);
4903
4904 skb->mac_header += off;
4905 skb_reset_network_header(skb);
4906 }
4907
4908 /* check if bpf_xdp_adjust_tail was used */
4909 off = xdp->data_end - orig_data_end;
4910 if (off != 0) {
4911 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4912 skb->len += off; /* positive on grow, negative on shrink */
4913 }
4914
4915 /* check if XDP changed eth hdr such SKB needs update */
4916 eth = (struct ethhdr *)xdp->data;
4917 if ((orig_eth_type != eth->h_proto) ||
4918 (orig_host != ether_addr_equal_64bits(eth->h_dest,
4919 skb->dev->dev_addr)) ||
4920 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4921 __skb_push(skb, ETH_HLEN);
4922 skb->pkt_type = PACKET_HOST;
4923 skb->protocol = eth_type_trans(skb, skb->dev);
4924 }
4925
4926 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4927 * before calling us again on redirect path. We do not call do_redirect
4928 * as we leave that up to the caller.
4929 *
4930 * Caller is responsible for managing lifetime of skb (i.e. calling
4931 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4932 */
4933 switch (act) {
4934 case XDP_REDIRECT:
4935 case XDP_TX:
4936 __skb_push(skb, mac_len);
4937 break;
4938 case XDP_PASS:
4939 metalen = xdp->data - xdp->data_meta;
4940 if (metalen)
4941 skb_metadata_set(skb, metalen);
4942 break;
4943 }
4944
4945 return act;
4946 }
4947
netif_receive_generic_xdp(struct sk_buff * skb,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)4948 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4949 struct xdp_buff *xdp,
4950 struct bpf_prog *xdp_prog)
4951 {
4952 u32 act = XDP_DROP;
4953
4954 /* Reinjected packets coming from act_mirred or similar should
4955 * not get XDP generic processing.
4956 */
4957 if (skb_is_redirected(skb))
4958 return XDP_PASS;
4959
4960 /* XDP packets must be linear and must have sufficient headroom
4961 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4962 * native XDP provides, thus we need to do it here as well.
4963 */
4964 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4965 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4966 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4967 int troom = skb->tail + skb->data_len - skb->end;
4968
4969 /* In case we have to go down the path and also linearize,
4970 * then lets do the pskb_expand_head() work just once here.
4971 */
4972 if (pskb_expand_head(skb,
4973 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4974 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4975 goto do_drop;
4976 if (skb_linearize(skb))
4977 goto do_drop;
4978 }
4979
4980 act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
4981 switch (act) {
4982 case XDP_REDIRECT:
4983 case XDP_TX:
4984 case XDP_PASS:
4985 break;
4986 default:
4987 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
4988 fallthrough;
4989 case XDP_ABORTED:
4990 trace_xdp_exception(skb->dev, xdp_prog, act);
4991 fallthrough;
4992 case XDP_DROP:
4993 do_drop:
4994 kfree_skb(skb);
4995 break;
4996 }
4997
4998 return act;
4999 }
5000
5001 /* When doing generic XDP we have to bypass the qdisc layer and the
5002 * network taps in order to match in-driver-XDP behavior. This also means
5003 * that XDP packets are able to starve other packets going through a qdisc,
5004 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
5005 * queues, so they do not have this starvation issue.
5006 */
generic_xdp_tx(struct sk_buff * skb,struct bpf_prog * xdp_prog)5007 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
5008 {
5009 struct net_device *dev = skb->dev;
5010 struct netdev_queue *txq;
5011 bool free_skb = true;
5012 int cpu, rc;
5013
5014 txq = netdev_core_pick_tx(dev, skb, NULL);
5015 cpu = smp_processor_id();
5016 HARD_TX_LOCK(dev, txq, cpu);
5017 if (!netif_xmit_frozen_or_drv_stopped(txq)) {
5018 rc = netdev_start_xmit(skb, dev, txq, 0);
5019 if (dev_xmit_complete(rc))
5020 free_skb = false;
5021 }
5022 HARD_TX_UNLOCK(dev, txq);
5023 if (free_skb) {
5024 trace_xdp_exception(dev, xdp_prog, XDP_TX);
5025 dev_core_stats_tx_dropped_inc(dev);
5026 kfree_skb(skb);
5027 }
5028 }
5029
5030 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
5031
do_xdp_generic(struct bpf_prog * xdp_prog,struct sk_buff * skb)5032 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
5033 {
5034 if (xdp_prog) {
5035 struct xdp_buff xdp;
5036 u32 act;
5037 int err;
5038
5039 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
5040 if (act != XDP_PASS) {
5041 switch (act) {
5042 case XDP_REDIRECT:
5043 err = xdp_do_generic_redirect(skb->dev, skb,
5044 &xdp, xdp_prog);
5045 if (err)
5046 goto out_redir;
5047 break;
5048 case XDP_TX:
5049 generic_xdp_tx(skb, xdp_prog);
5050 break;
5051 }
5052 return XDP_DROP;
5053 }
5054 }
5055 return XDP_PASS;
5056 out_redir:
5057 kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
5058 return XDP_DROP;
5059 }
5060 EXPORT_SYMBOL_GPL(do_xdp_generic);
5061
netif_rx_internal(struct sk_buff * skb)5062 static int netif_rx_internal(struct sk_buff *skb)
5063 {
5064 int ret;
5065
5066 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5067
5068 trace_netif_rx(skb);
5069
5070 #ifdef CONFIG_RPS
5071 if (static_branch_unlikely(&rps_needed)) {
5072 struct rps_dev_flow voidflow, *rflow = &voidflow;
5073 int cpu;
5074
5075 rcu_read_lock();
5076
5077 cpu = get_rps_cpu(skb->dev, skb, &rflow);
5078 if (cpu < 0)
5079 cpu = smp_processor_id();
5080
5081 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5082
5083 rcu_read_unlock();
5084 } else
5085 #endif
5086 {
5087 unsigned int qtail;
5088
5089 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
5090 }
5091 return ret;
5092 }
5093
5094 /**
5095 * __netif_rx - Slightly optimized version of netif_rx
5096 * @skb: buffer to post
5097 *
5098 * This behaves as netif_rx except that it does not disable bottom halves.
5099 * As a result this function may only be invoked from the interrupt context
5100 * (either hard or soft interrupt).
5101 */
__netif_rx(struct sk_buff * skb)5102 int __netif_rx(struct sk_buff *skb)
5103 {
5104 int ret;
5105
5106 lockdep_assert_once(hardirq_count() | softirq_count());
5107
5108 trace_netif_rx_entry(skb);
5109 ret = netif_rx_internal(skb);
5110 trace_netif_rx_exit(ret);
5111 return ret;
5112 }
5113 EXPORT_SYMBOL(__netif_rx);
5114
5115 /**
5116 * netif_rx - post buffer to the network code
5117 * @skb: buffer to post
5118 *
5119 * This function receives a packet from a device driver and queues it for
5120 * the upper (protocol) levels to process via the backlog NAPI device. It
5121 * always succeeds. The buffer may be dropped during processing for
5122 * congestion control or by the protocol layers.
5123 * The network buffer is passed via the backlog NAPI device. Modern NIC
5124 * driver should use NAPI and GRO.
5125 * This function can used from interrupt and from process context. The
5126 * caller from process context must not disable interrupts before invoking
5127 * this function.
5128 *
5129 * return values:
5130 * NET_RX_SUCCESS (no congestion)
5131 * NET_RX_DROP (packet was dropped)
5132 *
5133 */
netif_rx(struct sk_buff * skb)5134 int netif_rx(struct sk_buff *skb)
5135 {
5136 bool need_bh_off = !(hardirq_count() | softirq_count());
5137 int ret;
5138
5139 if (need_bh_off)
5140 local_bh_disable();
5141 trace_netif_rx_entry(skb);
5142 ret = netif_rx_internal(skb);
5143 trace_netif_rx_exit(ret);
5144 if (need_bh_off)
5145 local_bh_enable();
5146 return ret;
5147 }
5148 EXPORT_SYMBOL(netif_rx);
5149
net_tx_action(struct softirq_action * h)5150 static __latent_entropy void net_tx_action(struct softirq_action *h)
5151 {
5152 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5153
5154 if (sd->completion_queue) {
5155 struct sk_buff *clist;
5156
5157 local_irq_disable();
5158 clist = sd->completion_queue;
5159 sd->completion_queue = NULL;
5160 local_irq_enable();
5161
5162 while (clist) {
5163 struct sk_buff *skb = clist;
5164
5165 clist = clist->next;
5166
5167 WARN_ON(refcount_read(&skb->users));
5168 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5169 trace_consume_skb(skb, net_tx_action);
5170 else
5171 trace_kfree_skb(skb, net_tx_action,
5172 get_kfree_skb_cb(skb)->reason);
5173
5174 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5175 __kfree_skb(skb);
5176 else
5177 __napi_kfree_skb(skb,
5178 get_kfree_skb_cb(skb)->reason);
5179 }
5180 }
5181
5182 if (sd->output_queue) {
5183 struct Qdisc *head;
5184
5185 local_irq_disable();
5186 head = sd->output_queue;
5187 sd->output_queue = NULL;
5188 sd->output_queue_tailp = &sd->output_queue;
5189 local_irq_enable();
5190
5191 rcu_read_lock();
5192
5193 while (head) {
5194 struct Qdisc *q = head;
5195 spinlock_t *root_lock = NULL;
5196
5197 head = head->next_sched;
5198
5199 /* We need to make sure head->next_sched is read
5200 * before clearing __QDISC_STATE_SCHED
5201 */
5202 smp_mb__before_atomic();
5203
5204 if (!(q->flags & TCQ_F_NOLOCK)) {
5205 root_lock = qdisc_lock(q);
5206 spin_lock(root_lock);
5207 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5208 &q->state))) {
5209 /* There is a synchronize_net() between
5210 * STATE_DEACTIVATED flag being set and
5211 * qdisc_reset()/some_qdisc_is_busy() in
5212 * dev_deactivate(), so we can safely bail out
5213 * early here to avoid data race between
5214 * qdisc_deactivate() and some_qdisc_is_busy()
5215 * for lockless qdisc.
5216 */
5217 clear_bit(__QDISC_STATE_SCHED, &q->state);
5218 continue;
5219 }
5220
5221 clear_bit(__QDISC_STATE_SCHED, &q->state);
5222 qdisc_run(q);
5223 if (root_lock)
5224 spin_unlock(root_lock);
5225 }
5226
5227 rcu_read_unlock();
5228 }
5229
5230 xfrm_dev_backlog(sd);
5231 }
5232
5233 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5234 /* This hook is defined here for ATM LANE */
5235 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5236 unsigned char *addr) __read_mostly;
5237 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5238 #endif
5239
5240 /**
5241 * netdev_is_rx_handler_busy - check if receive handler is registered
5242 * @dev: device to check
5243 *
5244 * Check if a receive handler is already registered for a given device.
5245 * Return true if there one.
5246 *
5247 * The caller must hold the rtnl_mutex.
5248 */
netdev_is_rx_handler_busy(struct net_device * dev)5249 bool netdev_is_rx_handler_busy(struct net_device *dev)
5250 {
5251 ASSERT_RTNL();
5252 return dev && rtnl_dereference(dev->rx_handler);
5253 }
5254 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5255
5256 /**
5257 * netdev_rx_handler_register - register receive handler
5258 * @dev: device to register a handler for
5259 * @rx_handler: receive handler to register
5260 * @rx_handler_data: data pointer that is used by rx handler
5261 *
5262 * Register a receive handler for a device. This handler will then be
5263 * called from __netif_receive_skb. A negative errno code is returned
5264 * on a failure.
5265 *
5266 * The caller must hold the rtnl_mutex.
5267 *
5268 * For a general description of rx_handler, see enum rx_handler_result.
5269 */
netdev_rx_handler_register(struct net_device * dev,rx_handler_func_t * rx_handler,void * rx_handler_data)5270 int netdev_rx_handler_register(struct net_device *dev,
5271 rx_handler_func_t *rx_handler,
5272 void *rx_handler_data)
5273 {
5274 if (netdev_is_rx_handler_busy(dev))
5275 return -EBUSY;
5276
5277 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5278 return -EINVAL;
5279
5280 /* Note: rx_handler_data must be set before rx_handler */
5281 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5282 rcu_assign_pointer(dev->rx_handler, rx_handler);
5283
5284 return 0;
5285 }
5286 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5287
5288 /**
5289 * netdev_rx_handler_unregister - unregister receive handler
5290 * @dev: device to unregister a handler from
5291 *
5292 * Unregister a receive handler from a device.
5293 *
5294 * The caller must hold the rtnl_mutex.
5295 */
netdev_rx_handler_unregister(struct net_device * dev)5296 void netdev_rx_handler_unregister(struct net_device *dev)
5297 {
5298
5299 ASSERT_RTNL();
5300 RCU_INIT_POINTER(dev->rx_handler, NULL);
5301 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5302 * section has a guarantee to see a non NULL rx_handler_data
5303 * as well.
5304 */
5305 synchronize_net();
5306 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5307 }
5308 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5309
5310 /*
5311 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5312 * the special handling of PFMEMALLOC skbs.
5313 */
skb_pfmemalloc_protocol(struct sk_buff * skb)5314 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5315 {
5316 switch (skb->protocol) {
5317 case htons(ETH_P_ARP):
5318 case htons(ETH_P_IP):
5319 case htons(ETH_P_IPV6):
5320 case htons(ETH_P_8021Q):
5321 case htons(ETH_P_8021AD):
5322 return true;
5323 default:
5324 return false;
5325 }
5326 }
5327
nf_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev)5328 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5329 int *ret, struct net_device *orig_dev)
5330 {
5331 if (nf_hook_ingress_active(skb)) {
5332 int ingress_retval;
5333
5334 if (*pt_prev) {
5335 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5336 *pt_prev = NULL;
5337 }
5338
5339 rcu_read_lock();
5340 ingress_retval = nf_hook_ingress(skb);
5341 rcu_read_unlock();
5342 return ingress_retval;
5343 }
5344 return 0;
5345 }
5346
__netif_receive_skb_core(struct sk_buff ** pskb,bool pfmemalloc,struct packet_type ** ppt_prev)5347 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5348 struct packet_type **ppt_prev)
5349 {
5350 struct packet_type *ptype, *pt_prev;
5351 rx_handler_func_t *rx_handler;
5352 struct sk_buff *skb = *pskb;
5353 struct net_device *orig_dev;
5354 bool deliver_exact = false;
5355 int ret = NET_RX_DROP;
5356 __be16 type;
5357
5358 net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
5359
5360 trace_netif_receive_skb(skb);
5361
5362 orig_dev = skb->dev;
5363
5364 skb_reset_network_header(skb);
5365 if (!skb_transport_header_was_set(skb))
5366 skb_reset_transport_header(skb);
5367 skb_reset_mac_len(skb);
5368
5369 pt_prev = NULL;
5370
5371 another_round:
5372 skb->skb_iif = skb->dev->ifindex;
5373
5374 __this_cpu_inc(softnet_data.processed);
5375
5376 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5377 int ret2;
5378
5379 migrate_disable();
5380 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5381 migrate_enable();
5382
5383 if (ret2 != XDP_PASS) {
5384 ret = NET_RX_DROP;
5385 goto out;
5386 }
5387 }
5388
5389 if (eth_type_vlan(skb->protocol)) {
5390 skb = skb_vlan_untag(skb);
5391 if (unlikely(!skb))
5392 goto out;
5393 }
5394
5395 if (skb_skip_tc_classify(skb))
5396 goto skip_classify;
5397
5398 if (pfmemalloc)
5399 goto skip_taps;
5400
5401 list_for_each_entry_rcu(ptype, &ptype_all, list) {
5402 if (pt_prev)
5403 ret = deliver_skb(skb, pt_prev, orig_dev);
5404 pt_prev = ptype;
5405 }
5406
5407 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5408 if (pt_prev)
5409 ret = deliver_skb(skb, pt_prev, orig_dev);
5410 pt_prev = ptype;
5411 }
5412
5413 skip_taps:
5414 #ifdef CONFIG_NET_INGRESS
5415 if (static_branch_unlikely(&ingress_needed_key)) {
5416 bool another = false;
5417
5418 nf_skip_egress(skb, true);
5419 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5420 &another);
5421 if (another)
5422 goto another_round;
5423 if (!skb)
5424 goto out;
5425
5426 nf_skip_egress(skb, false);
5427 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5428 goto out;
5429 }
5430 #endif
5431 skb_reset_redirect(skb);
5432 skip_classify:
5433 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5434 goto drop;
5435
5436 if (skb_vlan_tag_present(skb)) {
5437 if (pt_prev) {
5438 ret = deliver_skb(skb, pt_prev, orig_dev);
5439 pt_prev = NULL;
5440 }
5441 if (vlan_do_receive(&skb))
5442 goto another_round;
5443 else if (unlikely(!skb))
5444 goto out;
5445 }
5446
5447 rx_handler = rcu_dereference(skb->dev->rx_handler);
5448 if (rx_handler) {
5449 if (pt_prev) {
5450 ret = deliver_skb(skb, pt_prev, orig_dev);
5451 pt_prev = NULL;
5452 }
5453 switch (rx_handler(&skb)) {
5454 case RX_HANDLER_CONSUMED:
5455 ret = NET_RX_SUCCESS;
5456 goto out;
5457 case RX_HANDLER_ANOTHER:
5458 goto another_round;
5459 case RX_HANDLER_EXACT:
5460 deliver_exact = true;
5461 break;
5462 case RX_HANDLER_PASS:
5463 break;
5464 default:
5465 BUG();
5466 }
5467 }
5468
5469 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5470 check_vlan_id:
5471 if (skb_vlan_tag_get_id(skb)) {
5472 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5473 * find vlan device.
5474 */
5475 skb->pkt_type = PACKET_OTHERHOST;
5476 } else if (eth_type_vlan(skb->protocol)) {
5477 /* Outer header is 802.1P with vlan 0, inner header is
5478 * 802.1Q or 802.1AD and vlan_do_receive() above could
5479 * not find vlan dev for vlan id 0.
5480 */
5481 __vlan_hwaccel_clear_tag(skb);
5482 skb = skb_vlan_untag(skb);
5483 if (unlikely(!skb))
5484 goto out;
5485 if (vlan_do_receive(&skb))
5486 /* After stripping off 802.1P header with vlan 0
5487 * vlan dev is found for inner header.
5488 */
5489 goto another_round;
5490 else if (unlikely(!skb))
5491 goto out;
5492 else
5493 /* We have stripped outer 802.1P vlan 0 header.
5494 * But could not find vlan dev.
5495 * check again for vlan id to set OTHERHOST.
5496 */
5497 goto check_vlan_id;
5498 }
5499 /* Note: we might in the future use prio bits
5500 * and set skb->priority like in vlan_do_receive()
5501 * For the time being, just ignore Priority Code Point
5502 */
5503 __vlan_hwaccel_clear_tag(skb);
5504 }
5505
5506 type = skb->protocol;
5507
5508 /* deliver only exact match when indicated */
5509 if (likely(!deliver_exact)) {
5510 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5511 &ptype_base[ntohs(type) &
5512 PTYPE_HASH_MASK]);
5513 }
5514
5515 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5516 &orig_dev->ptype_specific);
5517
5518 if (unlikely(skb->dev != orig_dev)) {
5519 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5520 &skb->dev->ptype_specific);
5521 }
5522
5523 if (pt_prev) {
5524 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5525 goto drop;
5526 *ppt_prev = pt_prev;
5527 } else {
5528 drop:
5529 if (!deliver_exact)
5530 dev_core_stats_rx_dropped_inc(skb->dev);
5531 else
5532 dev_core_stats_rx_nohandler_inc(skb->dev);
5533 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5534 /* Jamal, now you will not able to escape explaining
5535 * me how you were going to use this. :-)
5536 */
5537 ret = NET_RX_DROP;
5538 }
5539
5540 out:
5541 /* The invariant here is that if *ppt_prev is not NULL
5542 * then skb should also be non-NULL.
5543 *
5544 * Apparently *ppt_prev assignment above holds this invariant due to
5545 * skb dereferencing near it.
5546 */
5547 *pskb = skb;
5548 return ret;
5549 }
5550
__netif_receive_skb_one_core(struct sk_buff * skb,bool pfmemalloc)5551 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5552 {
5553 struct net_device *orig_dev = skb->dev;
5554 struct packet_type *pt_prev = NULL;
5555 int ret;
5556
5557 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5558 if (pt_prev)
5559 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5560 skb->dev, pt_prev, orig_dev);
5561 return ret;
5562 }
5563
5564 /**
5565 * netif_receive_skb_core - special purpose version of netif_receive_skb
5566 * @skb: buffer to process
5567 *
5568 * More direct receive version of netif_receive_skb(). It should
5569 * only be used by callers that have a need to skip RPS and Generic XDP.
5570 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5571 *
5572 * This function may only be called from softirq context and interrupts
5573 * should be enabled.
5574 *
5575 * Return values (usually ignored):
5576 * NET_RX_SUCCESS: no congestion
5577 * NET_RX_DROP: packet was dropped
5578 */
netif_receive_skb_core(struct sk_buff * skb)5579 int netif_receive_skb_core(struct sk_buff *skb)
5580 {
5581 int ret;
5582
5583 rcu_read_lock();
5584 ret = __netif_receive_skb_one_core(skb, false);
5585 rcu_read_unlock();
5586
5587 return ret;
5588 }
5589 EXPORT_SYMBOL(netif_receive_skb_core);
5590
__netif_receive_skb_list_ptype(struct list_head * head,struct packet_type * pt_prev,struct net_device * orig_dev)5591 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5592 struct packet_type *pt_prev,
5593 struct net_device *orig_dev)
5594 {
5595 struct sk_buff *skb, *next;
5596
5597 if (!pt_prev)
5598 return;
5599 if (list_empty(head))
5600 return;
5601 if (pt_prev->list_func != NULL)
5602 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5603 ip_list_rcv, head, pt_prev, orig_dev);
5604 else
5605 list_for_each_entry_safe(skb, next, head, list) {
5606 skb_list_del_init(skb);
5607 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5608 }
5609 }
5610
__netif_receive_skb_list_core(struct list_head * head,bool pfmemalloc)5611 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5612 {
5613 /* Fast-path assumptions:
5614 * - There is no RX handler.
5615 * - Only one packet_type matches.
5616 * If either of these fails, we will end up doing some per-packet
5617 * processing in-line, then handling the 'last ptype' for the whole
5618 * sublist. This can't cause out-of-order delivery to any single ptype,
5619 * because the 'last ptype' must be constant across the sublist, and all
5620 * other ptypes are handled per-packet.
5621 */
5622 /* Current (common) ptype of sublist */
5623 struct packet_type *pt_curr = NULL;
5624 /* Current (common) orig_dev of sublist */
5625 struct net_device *od_curr = NULL;
5626 struct list_head sublist;
5627 struct sk_buff *skb, *next;
5628
5629 INIT_LIST_HEAD(&sublist);
5630 list_for_each_entry_safe(skb, next, head, list) {
5631 struct net_device *orig_dev = skb->dev;
5632 struct packet_type *pt_prev = NULL;
5633
5634 skb_list_del_init(skb);
5635 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5636 if (!pt_prev)
5637 continue;
5638 if (pt_curr != pt_prev || od_curr != orig_dev) {
5639 /* dispatch old sublist */
5640 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5641 /* start new sublist */
5642 INIT_LIST_HEAD(&sublist);
5643 pt_curr = pt_prev;
5644 od_curr = orig_dev;
5645 }
5646 list_add_tail(&skb->list, &sublist);
5647 }
5648
5649 /* dispatch final sublist */
5650 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5651 }
5652
__netif_receive_skb(struct sk_buff * skb)5653 static int __netif_receive_skb(struct sk_buff *skb)
5654 {
5655 int ret;
5656
5657 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5658 unsigned int noreclaim_flag;
5659
5660 /*
5661 * PFMEMALLOC skbs are special, they should
5662 * - be delivered to SOCK_MEMALLOC sockets only
5663 * - stay away from userspace
5664 * - have bounded memory usage
5665 *
5666 * Use PF_MEMALLOC as this saves us from propagating the allocation
5667 * context down to all allocation sites.
5668 */
5669 noreclaim_flag = memalloc_noreclaim_save();
5670 ret = __netif_receive_skb_one_core(skb, true);
5671 memalloc_noreclaim_restore(noreclaim_flag);
5672 } else
5673 ret = __netif_receive_skb_one_core(skb, false);
5674
5675 return ret;
5676 }
5677
__netif_receive_skb_list(struct list_head * head)5678 static void __netif_receive_skb_list(struct list_head *head)
5679 {
5680 unsigned long noreclaim_flag = 0;
5681 struct sk_buff *skb, *next;
5682 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5683
5684 list_for_each_entry_safe(skb, next, head, list) {
5685 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5686 struct list_head sublist;
5687
5688 /* Handle the previous sublist */
5689 list_cut_before(&sublist, head, &skb->list);
5690 if (!list_empty(&sublist))
5691 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5692 pfmemalloc = !pfmemalloc;
5693 /* See comments in __netif_receive_skb */
5694 if (pfmemalloc)
5695 noreclaim_flag = memalloc_noreclaim_save();
5696 else
5697 memalloc_noreclaim_restore(noreclaim_flag);
5698 }
5699 }
5700 /* Handle the remaining sublist */
5701 if (!list_empty(head))
5702 __netif_receive_skb_list_core(head, pfmemalloc);
5703 /* Restore pflags */
5704 if (pfmemalloc)
5705 memalloc_noreclaim_restore(noreclaim_flag);
5706 }
5707
generic_xdp_install(struct net_device * dev,struct netdev_bpf * xdp)5708 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5709 {
5710 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5711 struct bpf_prog *new = xdp->prog;
5712 int ret = 0;
5713
5714 switch (xdp->command) {
5715 case XDP_SETUP_PROG:
5716 rcu_assign_pointer(dev->xdp_prog, new);
5717 if (old)
5718 bpf_prog_put(old);
5719
5720 if (old && !new) {
5721 static_branch_dec(&generic_xdp_needed_key);
5722 } else if (new && !old) {
5723 static_branch_inc(&generic_xdp_needed_key);
5724 dev_disable_lro(dev);
5725 dev_disable_gro_hw(dev);
5726 }
5727 break;
5728
5729 default:
5730 ret = -EINVAL;
5731 break;
5732 }
5733
5734 return ret;
5735 }
5736
netif_receive_skb_internal(struct sk_buff * skb)5737 static int netif_receive_skb_internal(struct sk_buff *skb)
5738 {
5739 int ret;
5740
5741 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5742
5743 if (skb_defer_rx_timestamp(skb))
5744 return NET_RX_SUCCESS;
5745
5746 rcu_read_lock();
5747 #ifdef CONFIG_RPS
5748 if (static_branch_unlikely(&rps_needed)) {
5749 struct rps_dev_flow voidflow, *rflow = &voidflow;
5750 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5751
5752 if (cpu >= 0) {
5753 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5754 rcu_read_unlock();
5755 return ret;
5756 }
5757 }
5758 #endif
5759 ret = __netif_receive_skb(skb);
5760 rcu_read_unlock();
5761 return ret;
5762 }
5763
netif_receive_skb_list_internal(struct list_head * head)5764 void netif_receive_skb_list_internal(struct list_head *head)
5765 {
5766 struct sk_buff *skb, *next;
5767 struct list_head sublist;
5768
5769 INIT_LIST_HEAD(&sublist);
5770 list_for_each_entry_safe(skb, next, head, list) {
5771 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5772 skb_list_del_init(skb);
5773 if (!skb_defer_rx_timestamp(skb))
5774 list_add_tail(&skb->list, &sublist);
5775 }
5776 list_splice_init(&sublist, head);
5777
5778 rcu_read_lock();
5779 #ifdef CONFIG_RPS
5780 if (static_branch_unlikely(&rps_needed)) {
5781 list_for_each_entry_safe(skb, next, head, list) {
5782 struct rps_dev_flow voidflow, *rflow = &voidflow;
5783 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5784
5785 if (cpu >= 0) {
5786 /* Will be handled, remove from list */
5787 skb_list_del_init(skb);
5788 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5789 }
5790 }
5791 }
5792 #endif
5793 __netif_receive_skb_list(head);
5794 rcu_read_unlock();
5795 }
5796
5797 /**
5798 * netif_receive_skb - process receive buffer from network
5799 * @skb: buffer to process
5800 *
5801 * netif_receive_skb() is the main receive data processing function.
5802 * It always succeeds. The buffer may be dropped during processing
5803 * for congestion control or by the protocol layers.
5804 *
5805 * This function may only be called from softirq context and interrupts
5806 * should be enabled.
5807 *
5808 * Return values (usually ignored):
5809 * NET_RX_SUCCESS: no congestion
5810 * NET_RX_DROP: packet was dropped
5811 */
netif_receive_skb(struct sk_buff * skb)5812 int netif_receive_skb(struct sk_buff *skb)
5813 {
5814 int ret;
5815
5816 trace_netif_receive_skb_entry(skb);
5817
5818 ret = netif_receive_skb_internal(skb);
5819 trace_netif_receive_skb_exit(ret);
5820
5821 return ret;
5822 }
5823 EXPORT_SYMBOL(netif_receive_skb);
5824
5825 /**
5826 * netif_receive_skb_list - process many receive buffers from network
5827 * @head: list of skbs to process.
5828 *
5829 * Since return value of netif_receive_skb() is normally ignored, and
5830 * wouldn't be meaningful for a list, this function returns void.
5831 *
5832 * This function may only be called from softirq context and interrupts
5833 * should be enabled.
5834 */
netif_receive_skb_list(struct list_head * head)5835 void netif_receive_skb_list(struct list_head *head)
5836 {
5837 struct sk_buff *skb;
5838
5839 if (list_empty(head))
5840 return;
5841 if (trace_netif_receive_skb_list_entry_enabled()) {
5842 list_for_each_entry(skb, head, list)
5843 trace_netif_receive_skb_list_entry(skb);
5844 }
5845 netif_receive_skb_list_internal(head);
5846 trace_netif_receive_skb_list_exit(0);
5847 }
5848 EXPORT_SYMBOL(netif_receive_skb_list);
5849
5850 static DEFINE_PER_CPU(struct work_struct, flush_works);
5851
5852 /* Network device is going away, flush any packets still pending */
flush_backlog(struct work_struct * work)5853 static void flush_backlog(struct work_struct *work)
5854 {
5855 struct sk_buff *skb, *tmp;
5856 struct softnet_data *sd;
5857
5858 local_bh_disable();
5859 sd = this_cpu_ptr(&softnet_data);
5860
5861 rps_lock_irq_disable(sd);
5862 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5863 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5864 __skb_unlink(skb, &sd->input_pkt_queue);
5865 dev_kfree_skb_irq(skb);
5866 input_queue_head_incr(sd);
5867 }
5868 }
5869 rps_unlock_irq_enable(sd);
5870
5871 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5872 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5873 __skb_unlink(skb, &sd->process_queue);
5874 kfree_skb(skb);
5875 input_queue_head_incr(sd);
5876 }
5877 }
5878 local_bh_enable();
5879 }
5880
flush_required(int cpu)5881 static bool flush_required(int cpu)
5882 {
5883 #if IS_ENABLED(CONFIG_RPS)
5884 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5885 bool do_flush;
5886
5887 rps_lock_irq_disable(sd);
5888
5889 /* as insertion into process_queue happens with the rps lock held,
5890 * process_queue access may race only with dequeue
5891 */
5892 do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5893 !skb_queue_empty_lockless(&sd->process_queue);
5894 rps_unlock_irq_enable(sd);
5895
5896 return do_flush;
5897 #endif
5898 /* without RPS we can't safely check input_pkt_queue: during a
5899 * concurrent remote skb_queue_splice() we can detect as empty both
5900 * input_pkt_queue and process_queue even if the latter could end-up
5901 * containing a lot of packets.
5902 */
5903 return true;
5904 }
5905
flush_all_backlogs(void)5906 static void flush_all_backlogs(void)
5907 {
5908 static cpumask_t flush_cpus;
5909 unsigned int cpu;
5910
5911 /* since we are under rtnl lock protection we can use static data
5912 * for the cpumask and avoid allocating on stack the possibly
5913 * large mask
5914 */
5915 ASSERT_RTNL();
5916
5917 cpus_read_lock();
5918
5919 cpumask_clear(&flush_cpus);
5920 for_each_online_cpu(cpu) {
5921 if (flush_required(cpu)) {
5922 queue_work_on(cpu, system_highpri_wq,
5923 per_cpu_ptr(&flush_works, cpu));
5924 cpumask_set_cpu(cpu, &flush_cpus);
5925 }
5926 }
5927
5928 /* we can have in flight packet[s] on the cpus we are not flushing,
5929 * synchronize_net() in unregister_netdevice_many() will take care of
5930 * them
5931 */
5932 for_each_cpu(cpu, &flush_cpus)
5933 flush_work(per_cpu_ptr(&flush_works, cpu));
5934
5935 cpus_read_unlock();
5936 }
5937
net_rps_send_ipi(struct softnet_data * remsd)5938 static void net_rps_send_ipi(struct softnet_data *remsd)
5939 {
5940 #ifdef CONFIG_RPS
5941 while (remsd) {
5942 struct softnet_data *next = remsd->rps_ipi_next;
5943
5944 if (cpu_online(remsd->cpu))
5945 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5946 remsd = next;
5947 }
5948 #endif
5949 }
5950
5951 /*
5952 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5953 * Note: called with local irq disabled, but exits with local irq enabled.
5954 */
net_rps_action_and_irq_enable(struct softnet_data * sd)5955 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5956 {
5957 #ifdef CONFIG_RPS
5958 struct softnet_data *remsd = sd->rps_ipi_list;
5959
5960 if (remsd) {
5961 sd->rps_ipi_list = NULL;
5962
5963 local_irq_enable();
5964
5965 /* Send pending IPI's to kick RPS processing on remote cpus. */
5966 net_rps_send_ipi(remsd);
5967 } else
5968 #endif
5969 local_irq_enable();
5970 }
5971
sd_has_rps_ipi_waiting(struct softnet_data * sd)5972 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5973 {
5974 #ifdef CONFIG_RPS
5975 return sd->rps_ipi_list != NULL;
5976 #else
5977 return false;
5978 #endif
5979 }
5980
process_backlog(struct napi_struct * napi,int quota)5981 static int process_backlog(struct napi_struct *napi, int quota)
5982 {
5983 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5984 bool again = true;
5985 int work = 0;
5986
5987 /* Check if we have pending ipi, its better to send them now,
5988 * not waiting net_rx_action() end.
5989 */
5990 if (sd_has_rps_ipi_waiting(sd)) {
5991 local_irq_disable();
5992 net_rps_action_and_irq_enable(sd);
5993 }
5994
5995 napi->weight = READ_ONCE(dev_rx_weight);
5996 while (again) {
5997 struct sk_buff *skb;
5998
5999 while ((skb = __skb_dequeue(&sd->process_queue))) {
6000 rcu_read_lock();
6001 __netif_receive_skb(skb);
6002 rcu_read_unlock();
6003 input_queue_head_incr(sd);
6004 if (++work >= quota)
6005 return work;
6006
6007 }
6008
6009 rps_lock_irq_disable(sd);
6010 if (skb_queue_empty(&sd->input_pkt_queue)) {
6011 /*
6012 * Inline a custom version of __napi_complete().
6013 * only current cpu owns and manipulates this napi,
6014 * and NAPI_STATE_SCHED is the only possible flag set
6015 * on backlog.
6016 * We can use a plain write instead of clear_bit(),
6017 * and we dont need an smp_mb() memory barrier.
6018 */
6019 napi->state = 0;
6020 again = false;
6021 } else {
6022 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6023 &sd->process_queue);
6024 }
6025 rps_unlock_irq_enable(sd);
6026 }
6027
6028 return work;
6029 }
6030
6031 /**
6032 * __napi_schedule - schedule for receive
6033 * @n: entry to schedule
6034 *
6035 * The entry's receive function will be scheduled to run.
6036 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6037 */
__napi_schedule(struct napi_struct * n)6038 void __napi_schedule(struct napi_struct *n)
6039 {
6040 unsigned long flags;
6041
6042 local_irq_save(flags);
6043 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6044 local_irq_restore(flags);
6045 }
6046 EXPORT_SYMBOL(__napi_schedule);
6047
6048 /**
6049 * napi_schedule_prep - check if napi can be scheduled
6050 * @n: napi context
6051 *
6052 * Test if NAPI routine is already running, and if not mark
6053 * it as running. This is used as a condition variable to
6054 * insure only one NAPI poll instance runs. We also make
6055 * sure there is no pending NAPI disable.
6056 */
napi_schedule_prep(struct napi_struct * n)6057 bool napi_schedule_prep(struct napi_struct *n)
6058 {
6059 unsigned long new, val = READ_ONCE(n->state);
6060
6061 do {
6062 if (unlikely(val & NAPIF_STATE_DISABLE))
6063 return false;
6064 new = val | NAPIF_STATE_SCHED;
6065
6066 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6067 * This was suggested by Alexander Duyck, as compiler
6068 * emits better code than :
6069 * if (val & NAPIF_STATE_SCHED)
6070 * new |= NAPIF_STATE_MISSED;
6071 */
6072 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6073 NAPIF_STATE_MISSED;
6074 } while (!try_cmpxchg(&n->state, &val, new));
6075
6076 return !(val & NAPIF_STATE_SCHED);
6077 }
6078 EXPORT_SYMBOL(napi_schedule_prep);
6079
6080 /**
6081 * __napi_schedule_irqoff - schedule for receive
6082 * @n: entry to schedule
6083 *
6084 * Variant of __napi_schedule() assuming hard irqs are masked.
6085 *
6086 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6087 * because the interrupt disabled assumption might not be true
6088 * due to force-threaded interrupts and spinlock substitution.
6089 */
__napi_schedule_irqoff(struct napi_struct * n)6090 void __napi_schedule_irqoff(struct napi_struct *n)
6091 {
6092 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6093 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6094 else
6095 __napi_schedule(n);
6096 }
6097 EXPORT_SYMBOL(__napi_schedule_irqoff);
6098
napi_complete_done(struct napi_struct * n,int work_done)6099 bool napi_complete_done(struct napi_struct *n, int work_done)
6100 {
6101 unsigned long flags, val, new, timeout = 0;
6102 bool ret = true;
6103
6104 /*
6105 * 1) Don't let napi dequeue from the cpu poll list
6106 * just in case its running on a different cpu.
6107 * 2) If we are busy polling, do nothing here, we have
6108 * the guarantee we will be called later.
6109 */
6110 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6111 NAPIF_STATE_IN_BUSY_POLL)))
6112 return false;
6113
6114 if (work_done) {
6115 if (n->gro_bitmask)
6116 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6117 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6118 }
6119 if (n->defer_hard_irqs_count > 0) {
6120 n->defer_hard_irqs_count--;
6121 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6122 if (timeout)
6123 ret = false;
6124 }
6125 if (n->gro_bitmask) {
6126 /* When the NAPI instance uses a timeout and keeps postponing
6127 * it, we need to bound somehow the time packets are kept in
6128 * the GRO layer
6129 */
6130 napi_gro_flush(n, !!timeout);
6131 }
6132
6133 gro_normal_list(n);
6134
6135 if (unlikely(!list_empty(&n->poll_list))) {
6136 /* If n->poll_list is not empty, we need to mask irqs */
6137 local_irq_save(flags);
6138 list_del_init(&n->poll_list);
6139 local_irq_restore(flags);
6140 }
6141 WRITE_ONCE(n->list_owner, -1);
6142
6143 val = READ_ONCE(n->state);
6144 do {
6145 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6146
6147 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6148 NAPIF_STATE_SCHED_THREADED |
6149 NAPIF_STATE_PREFER_BUSY_POLL);
6150
6151 /* If STATE_MISSED was set, leave STATE_SCHED set,
6152 * because we will call napi->poll() one more time.
6153 * This C code was suggested by Alexander Duyck to help gcc.
6154 */
6155 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6156 NAPIF_STATE_SCHED;
6157 } while (!try_cmpxchg(&n->state, &val, new));
6158
6159 if (unlikely(val & NAPIF_STATE_MISSED)) {
6160 __napi_schedule(n);
6161 return false;
6162 }
6163
6164 if (timeout)
6165 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6166 HRTIMER_MODE_REL_PINNED);
6167 return ret;
6168 }
6169 EXPORT_SYMBOL(napi_complete_done);
6170
6171 /* must be called under rcu_read_lock(), as we dont take a reference */
napi_by_id(unsigned int napi_id)6172 static struct napi_struct *napi_by_id(unsigned int napi_id)
6173 {
6174 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6175 struct napi_struct *napi;
6176
6177 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6178 if (napi->napi_id == napi_id)
6179 return napi;
6180
6181 return NULL;
6182 }
6183
6184 #if defined(CONFIG_NET_RX_BUSY_POLL)
6185
__busy_poll_stop(struct napi_struct * napi,bool skip_schedule)6186 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6187 {
6188 if (!skip_schedule) {
6189 gro_normal_list(napi);
6190 __napi_schedule(napi);
6191 return;
6192 }
6193
6194 if (napi->gro_bitmask) {
6195 /* flush too old packets
6196 * If HZ < 1000, flush all packets.
6197 */
6198 napi_gro_flush(napi, HZ >= 1000);
6199 }
6200
6201 gro_normal_list(napi);
6202 clear_bit(NAPI_STATE_SCHED, &napi->state);
6203 }
6204
busy_poll_stop(struct napi_struct * napi,void * have_poll_lock,bool prefer_busy_poll,u16 budget)6205 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6206 u16 budget)
6207 {
6208 bool skip_schedule = false;
6209 unsigned long timeout;
6210 int rc;
6211
6212 /* Busy polling means there is a high chance device driver hard irq
6213 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6214 * set in napi_schedule_prep().
6215 * Since we are about to call napi->poll() once more, we can safely
6216 * clear NAPI_STATE_MISSED.
6217 *
6218 * Note: x86 could use a single "lock and ..." instruction
6219 * to perform these two clear_bit()
6220 */
6221 clear_bit(NAPI_STATE_MISSED, &napi->state);
6222 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6223
6224 local_bh_disable();
6225
6226 if (prefer_busy_poll) {
6227 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6228 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6229 if (napi->defer_hard_irqs_count && timeout) {
6230 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6231 skip_schedule = true;
6232 }
6233 }
6234
6235 /* All we really want here is to re-enable device interrupts.
6236 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6237 */
6238 rc = napi->poll(napi, budget);
6239 /* We can't gro_normal_list() here, because napi->poll() might have
6240 * rearmed the napi (napi_complete_done()) in which case it could
6241 * already be running on another CPU.
6242 */
6243 trace_napi_poll(napi, rc, budget);
6244 netpoll_poll_unlock(have_poll_lock);
6245 if (rc == budget)
6246 __busy_poll_stop(napi, skip_schedule);
6247 local_bh_enable();
6248 }
6249
napi_busy_loop(unsigned int napi_id,bool (* loop_end)(void *,unsigned long),void * loop_end_arg,bool prefer_busy_poll,u16 budget)6250 void napi_busy_loop(unsigned int napi_id,
6251 bool (*loop_end)(void *, unsigned long),
6252 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6253 {
6254 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6255 int (*napi_poll)(struct napi_struct *napi, int budget);
6256 void *have_poll_lock = NULL;
6257 struct napi_struct *napi;
6258
6259 restart:
6260 napi_poll = NULL;
6261
6262 rcu_read_lock();
6263
6264 napi = napi_by_id(napi_id);
6265 if (!napi)
6266 goto out;
6267
6268 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6269 preempt_disable();
6270 for (;;) {
6271 int work = 0;
6272
6273 local_bh_disable();
6274 if (!napi_poll) {
6275 unsigned long val = READ_ONCE(napi->state);
6276
6277 /* If multiple threads are competing for this napi,
6278 * we avoid dirtying napi->state as much as we can.
6279 */
6280 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6281 NAPIF_STATE_IN_BUSY_POLL)) {
6282 if (prefer_busy_poll)
6283 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6284 goto count;
6285 }
6286 if (cmpxchg(&napi->state, val,
6287 val | NAPIF_STATE_IN_BUSY_POLL |
6288 NAPIF_STATE_SCHED) != val) {
6289 if (prefer_busy_poll)
6290 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6291 goto count;
6292 }
6293 have_poll_lock = netpoll_poll_lock(napi);
6294 napi_poll = napi->poll;
6295 }
6296 work = napi_poll(napi, budget);
6297 trace_napi_poll(napi, work, budget);
6298 gro_normal_list(napi);
6299 count:
6300 if (work > 0)
6301 __NET_ADD_STATS(dev_net(napi->dev),
6302 LINUX_MIB_BUSYPOLLRXPACKETS, work);
6303 local_bh_enable();
6304
6305 if (!loop_end || loop_end(loop_end_arg, start_time))
6306 break;
6307
6308 if (unlikely(need_resched())) {
6309 if (napi_poll)
6310 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6311 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6312 preempt_enable();
6313 rcu_read_unlock();
6314 cond_resched();
6315 if (loop_end(loop_end_arg, start_time))
6316 return;
6317 goto restart;
6318 }
6319 cpu_relax();
6320 }
6321 if (napi_poll)
6322 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6323 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6324 preempt_enable();
6325 out:
6326 rcu_read_unlock();
6327 }
6328 EXPORT_SYMBOL(napi_busy_loop);
6329
6330 #endif /* CONFIG_NET_RX_BUSY_POLL */
6331
napi_hash_add(struct napi_struct * napi)6332 static void napi_hash_add(struct napi_struct *napi)
6333 {
6334 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6335 return;
6336
6337 spin_lock(&napi_hash_lock);
6338
6339 /* 0..NR_CPUS range is reserved for sender_cpu use */
6340 do {
6341 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6342 napi_gen_id = MIN_NAPI_ID;
6343 } while (napi_by_id(napi_gen_id));
6344 napi->napi_id = napi_gen_id;
6345
6346 hlist_add_head_rcu(&napi->napi_hash_node,
6347 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6348
6349 spin_unlock(&napi_hash_lock);
6350 }
6351
6352 /* Warning : caller is responsible to make sure rcu grace period
6353 * is respected before freeing memory containing @napi
6354 */
napi_hash_del(struct napi_struct * napi)6355 static void napi_hash_del(struct napi_struct *napi)
6356 {
6357 spin_lock(&napi_hash_lock);
6358
6359 hlist_del_init_rcu(&napi->napi_hash_node);
6360
6361 spin_unlock(&napi_hash_lock);
6362 }
6363
napi_watchdog(struct hrtimer * timer)6364 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6365 {
6366 struct napi_struct *napi;
6367
6368 napi = container_of(timer, struct napi_struct, timer);
6369
6370 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6371 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6372 */
6373 if (!napi_disable_pending(napi) &&
6374 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6375 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6376 __napi_schedule_irqoff(napi);
6377 }
6378
6379 return HRTIMER_NORESTART;
6380 }
6381
init_gro_hash(struct napi_struct * napi)6382 static void init_gro_hash(struct napi_struct *napi)
6383 {
6384 int i;
6385
6386 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6387 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6388 napi->gro_hash[i].count = 0;
6389 }
6390 napi->gro_bitmask = 0;
6391 }
6392
dev_set_threaded(struct net_device * dev,bool threaded)6393 int dev_set_threaded(struct net_device *dev, bool threaded)
6394 {
6395 struct napi_struct *napi;
6396 int err = 0;
6397
6398 if (dev->threaded == threaded)
6399 return 0;
6400
6401 if (threaded) {
6402 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6403 if (!napi->thread) {
6404 err = napi_kthread_create(napi);
6405 if (err) {
6406 threaded = false;
6407 break;
6408 }
6409 }
6410 }
6411 }
6412
6413 dev->threaded = threaded;
6414
6415 /* Make sure kthread is created before THREADED bit
6416 * is set.
6417 */
6418 smp_mb__before_atomic();
6419
6420 /* Setting/unsetting threaded mode on a napi might not immediately
6421 * take effect, if the current napi instance is actively being
6422 * polled. In this case, the switch between threaded mode and
6423 * softirq mode will happen in the next round of napi_schedule().
6424 * This should not cause hiccups/stalls to the live traffic.
6425 */
6426 list_for_each_entry(napi, &dev->napi_list, dev_list)
6427 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
6428
6429 return err;
6430 }
6431 EXPORT_SYMBOL(dev_set_threaded);
6432
netif_napi_add_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)6433 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
6434 int (*poll)(struct napi_struct *, int), int weight)
6435 {
6436 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6437 return;
6438
6439 INIT_LIST_HEAD(&napi->poll_list);
6440 INIT_HLIST_NODE(&napi->napi_hash_node);
6441 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6442 napi->timer.function = napi_watchdog;
6443 init_gro_hash(napi);
6444 napi->skb = NULL;
6445 INIT_LIST_HEAD(&napi->rx_list);
6446 napi->rx_count = 0;
6447 napi->poll = poll;
6448 if (weight > NAPI_POLL_WEIGHT)
6449 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6450 weight);
6451 napi->weight = weight;
6452 napi->dev = dev;
6453 #ifdef CONFIG_NETPOLL
6454 napi->poll_owner = -1;
6455 #endif
6456 napi->list_owner = -1;
6457 set_bit(NAPI_STATE_SCHED, &napi->state);
6458 set_bit(NAPI_STATE_NPSVC, &napi->state);
6459 list_add_rcu(&napi->dev_list, &dev->napi_list);
6460 napi_hash_add(napi);
6461 napi_get_frags_check(napi);
6462 /* Create kthread for this napi if dev->threaded is set.
6463 * Clear dev->threaded if kthread creation failed so that
6464 * threaded mode will not be enabled in napi_enable().
6465 */
6466 if (dev->threaded && napi_kthread_create(napi))
6467 dev->threaded = 0;
6468 }
6469 EXPORT_SYMBOL(netif_napi_add_weight);
6470
napi_disable(struct napi_struct * n)6471 void napi_disable(struct napi_struct *n)
6472 {
6473 unsigned long val, new;
6474
6475 might_sleep();
6476 set_bit(NAPI_STATE_DISABLE, &n->state);
6477
6478 val = READ_ONCE(n->state);
6479 do {
6480 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6481 usleep_range(20, 200);
6482 val = READ_ONCE(n->state);
6483 }
6484
6485 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6486 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6487 } while (!try_cmpxchg(&n->state, &val, new));
6488
6489 hrtimer_cancel(&n->timer);
6490
6491 clear_bit(NAPI_STATE_DISABLE, &n->state);
6492 }
6493 EXPORT_SYMBOL(napi_disable);
6494
6495 /**
6496 * napi_enable - enable NAPI scheduling
6497 * @n: NAPI context
6498 *
6499 * Resume NAPI from being scheduled on this context.
6500 * Must be paired with napi_disable.
6501 */
napi_enable(struct napi_struct * n)6502 void napi_enable(struct napi_struct *n)
6503 {
6504 unsigned long new, val = READ_ONCE(n->state);
6505
6506 do {
6507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6508
6509 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6510 if (n->dev->threaded && n->thread)
6511 new |= NAPIF_STATE_THREADED;
6512 } while (!try_cmpxchg(&n->state, &val, new));
6513 }
6514 EXPORT_SYMBOL(napi_enable);
6515
flush_gro_hash(struct napi_struct * napi)6516 static void flush_gro_hash(struct napi_struct *napi)
6517 {
6518 int i;
6519
6520 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6521 struct sk_buff *skb, *n;
6522
6523 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6524 kfree_skb(skb);
6525 napi->gro_hash[i].count = 0;
6526 }
6527 }
6528
6529 /* Must be called in process context */
__netif_napi_del(struct napi_struct * napi)6530 void __netif_napi_del(struct napi_struct *napi)
6531 {
6532 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6533 return;
6534
6535 napi_hash_del(napi);
6536 list_del_rcu(&napi->dev_list);
6537 napi_free_frags(napi);
6538
6539 flush_gro_hash(napi);
6540 napi->gro_bitmask = 0;
6541
6542 if (napi->thread) {
6543 kthread_stop(napi->thread);
6544 napi->thread = NULL;
6545 }
6546 }
6547 EXPORT_SYMBOL(__netif_napi_del);
6548
__napi_poll(struct napi_struct * n,bool * repoll)6549 static int __napi_poll(struct napi_struct *n, bool *repoll)
6550 {
6551 int work, weight;
6552
6553 weight = n->weight;
6554
6555 /* This NAPI_STATE_SCHED test is for avoiding a race
6556 * with netpoll's poll_napi(). Only the entity which
6557 * obtains the lock and sees NAPI_STATE_SCHED set will
6558 * actually make the ->poll() call. Therefore we avoid
6559 * accidentally calling ->poll() when NAPI is not scheduled.
6560 */
6561 work = 0;
6562 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6563 work = n->poll(n, weight);
6564 trace_napi_poll(n, work, weight);
6565 }
6566
6567 if (unlikely(work > weight))
6568 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6569 n->poll, work, weight);
6570
6571 if (likely(work < weight))
6572 return work;
6573
6574 /* Drivers must not modify the NAPI state if they
6575 * consume the entire weight. In such cases this code
6576 * still "owns" the NAPI instance and therefore can
6577 * move the instance around on the list at-will.
6578 */
6579 if (unlikely(napi_disable_pending(n))) {
6580 napi_complete(n);
6581 return work;
6582 }
6583
6584 /* The NAPI context has more processing work, but busy-polling
6585 * is preferred. Exit early.
6586 */
6587 if (napi_prefer_busy_poll(n)) {
6588 if (napi_complete_done(n, work)) {
6589 /* If timeout is not set, we need to make sure
6590 * that the NAPI is re-scheduled.
6591 */
6592 napi_schedule(n);
6593 }
6594 return work;
6595 }
6596
6597 if (n->gro_bitmask) {
6598 /* flush too old packets
6599 * If HZ < 1000, flush all packets.
6600 */
6601 napi_gro_flush(n, HZ >= 1000);
6602 }
6603
6604 gro_normal_list(n);
6605
6606 /* Some drivers may have called napi_schedule
6607 * prior to exhausting their budget.
6608 */
6609 if (unlikely(!list_empty(&n->poll_list))) {
6610 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6611 n->dev ? n->dev->name : "backlog");
6612 return work;
6613 }
6614
6615 *repoll = true;
6616
6617 return work;
6618 }
6619
napi_poll(struct napi_struct * n,struct list_head * repoll)6620 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6621 {
6622 bool do_repoll = false;
6623 void *have;
6624 int work;
6625
6626 list_del_init(&n->poll_list);
6627
6628 have = netpoll_poll_lock(n);
6629
6630 work = __napi_poll(n, &do_repoll);
6631
6632 if (do_repoll)
6633 list_add_tail(&n->poll_list, repoll);
6634
6635 netpoll_poll_unlock(have);
6636
6637 return work;
6638 }
6639
napi_thread_wait(struct napi_struct * napi)6640 static int napi_thread_wait(struct napi_struct *napi)
6641 {
6642 bool woken = false;
6643
6644 set_current_state(TASK_INTERRUPTIBLE);
6645
6646 while (!kthread_should_stop()) {
6647 /* Testing SCHED_THREADED bit here to make sure the current
6648 * kthread owns this napi and could poll on this napi.
6649 * Testing SCHED bit is not enough because SCHED bit might be
6650 * set by some other busy poll thread or by napi_disable().
6651 */
6652 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6653 WARN_ON(!list_empty(&napi->poll_list));
6654 __set_current_state(TASK_RUNNING);
6655 return 0;
6656 }
6657
6658 schedule();
6659 /* woken being true indicates this thread owns this napi. */
6660 woken = true;
6661 set_current_state(TASK_INTERRUPTIBLE);
6662 }
6663 __set_current_state(TASK_RUNNING);
6664
6665 return -1;
6666 }
6667
skb_defer_free_flush(struct softnet_data * sd)6668 static void skb_defer_free_flush(struct softnet_data *sd)
6669 {
6670 struct sk_buff *skb, *next;
6671
6672 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6673 if (!READ_ONCE(sd->defer_list))
6674 return;
6675
6676 spin_lock(&sd->defer_lock);
6677 skb = sd->defer_list;
6678 sd->defer_list = NULL;
6679 sd->defer_count = 0;
6680 spin_unlock(&sd->defer_lock);
6681
6682 while (skb != NULL) {
6683 next = skb->next;
6684 napi_consume_skb(skb, 1);
6685 skb = next;
6686 }
6687 }
6688
napi_threaded_poll(void * data)6689 static int napi_threaded_poll(void *data)
6690 {
6691 struct napi_struct *napi = data;
6692 struct softnet_data *sd;
6693 void *have;
6694
6695 while (!napi_thread_wait(napi)) {
6696 unsigned long last_qs = jiffies;
6697
6698 for (;;) {
6699 bool repoll = false;
6700
6701 local_bh_disable();
6702 sd = this_cpu_ptr(&softnet_data);
6703 sd->in_napi_threaded_poll = true;
6704
6705 have = netpoll_poll_lock(napi);
6706 __napi_poll(napi, &repoll);
6707 netpoll_poll_unlock(have);
6708
6709 sd->in_napi_threaded_poll = false;
6710 barrier();
6711
6712 if (sd_has_rps_ipi_waiting(sd)) {
6713 local_irq_disable();
6714 net_rps_action_and_irq_enable(sd);
6715 }
6716 skb_defer_free_flush(sd);
6717 local_bh_enable();
6718
6719 if (!repoll)
6720 break;
6721
6722 rcu_softirq_qs_periodic(last_qs);
6723 cond_resched();
6724 }
6725 }
6726 return 0;
6727 }
6728
net_rx_action(struct softirq_action * h)6729 static __latent_entropy void net_rx_action(struct softirq_action *h)
6730 {
6731 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6732 unsigned long time_limit = jiffies +
6733 usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
6734 int budget = READ_ONCE(netdev_budget);
6735 LIST_HEAD(list);
6736 LIST_HEAD(repoll);
6737
6738 start:
6739 sd->in_net_rx_action = true;
6740 local_irq_disable();
6741 list_splice_init(&sd->poll_list, &list);
6742 local_irq_enable();
6743
6744 for (;;) {
6745 struct napi_struct *n;
6746
6747 skb_defer_free_flush(sd);
6748
6749 if (list_empty(&list)) {
6750 if (list_empty(&repoll)) {
6751 sd->in_net_rx_action = false;
6752 barrier();
6753 /* We need to check if ____napi_schedule()
6754 * had refilled poll_list while
6755 * sd->in_net_rx_action was true.
6756 */
6757 if (!list_empty(&sd->poll_list))
6758 goto start;
6759 if (!sd_has_rps_ipi_waiting(sd))
6760 goto end;
6761 }
6762 break;
6763 }
6764
6765 n = list_first_entry(&list, struct napi_struct, poll_list);
6766 budget -= napi_poll(n, &repoll);
6767
6768 /* If softirq window is exhausted then punt.
6769 * Allow this to run for 2 jiffies since which will allow
6770 * an average latency of 1.5/HZ.
6771 */
6772 if (unlikely(budget <= 0 ||
6773 time_after_eq(jiffies, time_limit))) {
6774 sd->time_squeeze++;
6775 break;
6776 }
6777 }
6778
6779 local_irq_disable();
6780
6781 list_splice_tail_init(&sd->poll_list, &list);
6782 list_splice_tail(&repoll, &list);
6783 list_splice(&list, &sd->poll_list);
6784 if (!list_empty(&sd->poll_list))
6785 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6786 else
6787 sd->in_net_rx_action = false;
6788
6789 net_rps_action_and_irq_enable(sd);
6790 end:;
6791 }
6792
6793 struct netdev_adjacent {
6794 struct net_device *dev;
6795 netdevice_tracker dev_tracker;
6796
6797 /* upper master flag, there can only be one master device per list */
6798 bool master;
6799
6800 /* lookup ignore flag */
6801 bool ignore;
6802
6803 /* counter for the number of times this device was added to us */
6804 u16 ref_nr;
6805
6806 /* private field for the users */
6807 void *private;
6808
6809 struct list_head list;
6810 struct rcu_head rcu;
6811 };
6812
__netdev_find_adj(struct net_device * adj_dev,struct list_head * adj_list)6813 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6814 struct list_head *adj_list)
6815 {
6816 struct netdev_adjacent *adj;
6817
6818 list_for_each_entry(adj, adj_list, list) {
6819 if (adj->dev == adj_dev)
6820 return adj;
6821 }
6822 return NULL;
6823 }
6824
____netdev_has_upper_dev(struct net_device * upper_dev,struct netdev_nested_priv * priv)6825 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
6826 struct netdev_nested_priv *priv)
6827 {
6828 struct net_device *dev = (struct net_device *)priv->data;
6829
6830 return upper_dev == dev;
6831 }
6832
6833 /**
6834 * netdev_has_upper_dev - Check if device is linked to an upper device
6835 * @dev: device
6836 * @upper_dev: upper device to check
6837 *
6838 * Find out if a device is linked to specified upper device and return true
6839 * in case it is. Note that this checks only immediate upper device,
6840 * not through a complete stack of devices. The caller must hold the RTNL lock.
6841 */
netdev_has_upper_dev(struct net_device * dev,struct net_device * upper_dev)6842 bool netdev_has_upper_dev(struct net_device *dev,
6843 struct net_device *upper_dev)
6844 {
6845 struct netdev_nested_priv priv = {
6846 .data = (void *)upper_dev,
6847 };
6848
6849 ASSERT_RTNL();
6850
6851 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6852 &priv);
6853 }
6854 EXPORT_SYMBOL(netdev_has_upper_dev);
6855
6856 /**
6857 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6858 * @dev: device
6859 * @upper_dev: upper device to check
6860 *
6861 * Find out if a device is linked to specified upper device and return true
6862 * in case it is. Note that this checks the entire upper device chain.
6863 * The caller must hold rcu lock.
6864 */
6865
netdev_has_upper_dev_all_rcu(struct net_device * dev,struct net_device * upper_dev)6866 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6867 struct net_device *upper_dev)
6868 {
6869 struct netdev_nested_priv priv = {
6870 .data = (void *)upper_dev,
6871 };
6872
6873 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6874 &priv);
6875 }
6876 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6877
6878 /**
6879 * netdev_has_any_upper_dev - Check if device is linked to some device
6880 * @dev: device
6881 *
6882 * Find out if a device is linked to an upper device and return true in case
6883 * it is. The caller must hold the RTNL lock.
6884 */
netdev_has_any_upper_dev(struct net_device * dev)6885 bool netdev_has_any_upper_dev(struct net_device *dev)
6886 {
6887 ASSERT_RTNL();
6888
6889 return !list_empty(&dev->adj_list.upper);
6890 }
6891 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6892
6893 /**
6894 * netdev_master_upper_dev_get - Get master upper device
6895 * @dev: device
6896 *
6897 * Find a master upper device and return pointer to it or NULL in case
6898 * it's not there. The caller must hold the RTNL lock.
6899 */
netdev_master_upper_dev_get(struct net_device * dev)6900 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6901 {
6902 struct netdev_adjacent *upper;
6903
6904 ASSERT_RTNL();
6905
6906 if (list_empty(&dev->adj_list.upper))
6907 return NULL;
6908
6909 upper = list_first_entry(&dev->adj_list.upper,
6910 struct netdev_adjacent, list);
6911 if (likely(upper->master))
6912 return upper->dev;
6913 return NULL;
6914 }
6915 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6916
__netdev_master_upper_dev_get(struct net_device * dev)6917 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6918 {
6919 struct netdev_adjacent *upper;
6920
6921 ASSERT_RTNL();
6922
6923 if (list_empty(&dev->adj_list.upper))
6924 return NULL;
6925
6926 upper = list_first_entry(&dev->adj_list.upper,
6927 struct netdev_adjacent, list);
6928 if (likely(upper->master) && !upper->ignore)
6929 return upper->dev;
6930 return NULL;
6931 }
6932
6933 /**
6934 * netdev_has_any_lower_dev - Check if device is linked to some device
6935 * @dev: device
6936 *
6937 * Find out if a device is linked to a lower device and return true in case
6938 * it is. The caller must hold the RTNL lock.
6939 */
netdev_has_any_lower_dev(struct net_device * dev)6940 static bool netdev_has_any_lower_dev(struct net_device *dev)
6941 {
6942 ASSERT_RTNL();
6943
6944 return !list_empty(&dev->adj_list.lower);
6945 }
6946
netdev_adjacent_get_private(struct list_head * adj_list)6947 void *netdev_adjacent_get_private(struct list_head *adj_list)
6948 {
6949 struct netdev_adjacent *adj;
6950
6951 adj = list_entry(adj_list, struct netdev_adjacent, list);
6952
6953 return adj->private;
6954 }
6955 EXPORT_SYMBOL(netdev_adjacent_get_private);
6956
6957 /**
6958 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6959 * @dev: device
6960 * @iter: list_head ** of the current position
6961 *
6962 * Gets the next device from the dev's upper list, starting from iter
6963 * position. The caller must hold RCU read lock.
6964 */
netdev_upper_get_next_dev_rcu(struct net_device * dev,struct list_head ** iter)6965 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6966 struct list_head **iter)
6967 {
6968 struct netdev_adjacent *upper;
6969
6970 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6971
6972 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6973
6974 if (&upper->list == &dev->adj_list.upper)
6975 return NULL;
6976
6977 *iter = &upper->list;
6978
6979 return upper->dev;
6980 }
6981 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6982
__netdev_next_upper_dev(struct net_device * dev,struct list_head ** iter,bool * ignore)6983 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6984 struct list_head **iter,
6985 bool *ignore)
6986 {
6987 struct netdev_adjacent *upper;
6988
6989 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6990
6991 if (&upper->list == &dev->adj_list.upper)
6992 return NULL;
6993
6994 *iter = &upper->list;
6995 *ignore = upper->ignore;
6996
6997 return upper->dev;
6998 }
6999
netdev_next_upper_dev_rcu(struct net_device * dev,struct list_head ** iter)7000 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7001 struct list_head **iter)
7002 {
7003 struct netdev_adjacent *upper;
7004
7005 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7006
7007 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7008
7009 if (&upper->list == &dev->adj_list.upper)
7010 return NULL;
7011
7012 *iter = &upper->list;
7013
7014 return upper->dev;
7015 }
7016
__netdev_walk_all_upper_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7017 static int __netdev_walk_all_upper_dev(struct net_device *dev,
7018 int (*fn)(struct net_device *dev,
7019 struct netdev_nested_priv *priv),
7020 struct netdev_nested_priv *priv)
7021 {
7022 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7023 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7024 int ret, cur = 0;
7025 bool ignore;
7026
7027 now = dev;
7028 iter = &dev->adj_list.upper;
7029
7030 while (1) {
7031 if (now != dev) {
7032 ret = fn(now, priv);
7033 if (ret)
7034 return ret;
7035 }
7036
7037 next = NULL;
7038 while (1) {
7039 udev = __netdev_next_upper_dev(now, &iter, &ignore);
7040 if (!udev)
7041 break;
7042 if (ignore)
7043 continue;
7044
7045 next = udev;
7046 niter = &udev->adj_list.upper;
7047 dev_stack[cur] = now;
7048 iter_stack[cur++] = iter;
7049 break;
7050 }
7051
7052 if (!next) {
7053 if (!cur)
7054 return 0;
7055 next = dev_stack[--cur];
7056 niter = iter_stack[cur];
7057 }
7058
7059 now = next;
7060 iter = niter;
7061 }
7062
7063 return 0;
7064 }
7065
netdev_walk_all_upper_dev_rcu(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7066 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7067 int (*fn)(struct net_device *dev,
7068 struct netdev_nested_priv *priv),
7069 struct netdev_nested_priv *priv)
7070 {
7071 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7072 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7073 int ret, cur = 0;
7074
7075 now = dev;
7076 iter = &dev->adj_list.upper;
7077
7078 while (1) {
7079 if (now != dev) {
7080 ret = fn(now, priv);
7081 if (ret)
7082 return ret;
7083 }
7084
7085 next = NULL;
7086 while (1) {
7087 udev = netdev_next_upper_dev_rcu(now, &iter);
7088 if (!udev)
7089 break;
7090
7091 next = udev;
7092 niter = &udev->adj_list.upper;
7093 dev_stack[cur] = now;
7094 iter_stack[cur++] = iter;
7095 break;
7096 }
7097
7098 if (!next) {
7099 if (!cur)
7100 return 0;
7101 next = dev_stack[--cur];
7102 niter = iter_stack[cur];
7103 }
7104
7105 now = next;
7106 iter = niter;
7107 }
7108
7109 return 0;
7110 }
7111 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7112
__netdev_has_upper_dev(struct net_device * dev,struct net_device * upper_dev)7113 static bool __netdev_has_upper_dev(struct net_device *dev,
7114 struct net_device *upper_dev)
7115 {
7116 struct netdev_nested_priv priv = {
7117 .flags = 0,
7118 .data = (void *)upper_dev,
7119 };
7120
7121 ASSERT_RTNL();
7122
7123 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7124 &priv);
7125 }
7126
7127 /**
7128 * netdev_lower_get_next_private - Get the next ->private from the
7129 * lower neighbour list
7130 * @dev: device
7131 * @iter: list_head ** of the current position
7132 *
7133 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7134 * list, starting from iter position. The caller must hold either hold the
7135 * RTNL lock or its own locking that guarantees that the neighbour lower
7136 * list will remain unchanged.
7137 */
netdev_lower_get_next_private(struct net_device * dev,struct list_head ** iter)7138 void *netdev_lower_get_next_private(struct net_device *dev,
7139 struct list_head **iter)
7140 {
7141 struct netdev_adjacent *lower;
7142
7143 lower = list_entry(*iter, struct netdev_adjacent, list);
7144
7145 if (&lower->list == &dev->adj_list.lower)
7146 return NULL;
7147
7148 *iter = lower->list.next;
7149
7150 return lower->private;
7151 }
7152 EXPORT_SYMBOL(netdev_lower_get_next_private);
7153
7154 /**
7155 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7156 * lower neighbour list, RCU
7157 * variant
7158 * @dev: device
7159 * @iter: list_head ** of the current position
7160 *
7161 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7162 * list, starting from iter position. The caller must hold RCU read lock.
7163 */
netdev_lower_get_next_private_rcu(struct net_device * dev,struct list_head ** iter)7164 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7165 struct list_head **iter)
7166 {
7167 struct netdev_adjacent *lower;
7168
7169 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7170
7171 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7172
7173 if (&lower->list == &dev->adj_list.lower)
7174 return NULL;
7175
7176 *iter = &lower->list;
7177
7178 return lower->private;
7179 }
7180 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7181
7182 /**
7183 * netdev_lower_get_next - Get the next device from the lower neighbour
7184 * list
7185 * @dev: device
7186 * @iter: list_head ** of the current position
7187 *
7188 * Gets the next netdev_adjacent from the dev's lower neighbour
7189 * list, starting from iter position. The caller must hold RTNL lock or
7190 * its own locking that guarantees that the neighbour lower
7191 * list will remain unchanged.
7192 */
netdev_lower_get_next(struct net_device * dev,struct list_head ** iter)7193 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7194 {
7195 struct netdev_adjacent *lower;
7196
7197 lower = list_entry(*iter, struct netdev_adjacent, list);
7198
7199 if (&lower->list == &dev->adj_list.lower)
7200 return NULL;
7201
7202 *iter = lower->list.next;
7203
7204 return lower->dev;
7205 }
7206 EXPORT_SYMBOL(netdev_lower_get_next);
7207
netdev_next_lower_dev(struct net_device * dev,struct list_head ** iter)7208 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7209 struct list_head **iter)
7210 {
7211 struct netdev_adjacent *lower;
7212
7213 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7214
7215 if (&lower->list == &dev->adj_list.lower)
7216 return NULL;
7217
7218 *iter = &lower->list;
7219
7220 return lower->dev;
7221 }
7222
__netdev_next_lower_dev(struct net_device * dev,struct list_head ** iter,bool * ignore)7223 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7224 struct list_head **iter,
7225 bool *ignore)
7226 {
7227 struct netdev_adjacent *lower;
7228
7229 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7230
7231 if (&lower->list == &dev->adj_list.lower)
7232 return NULL;
7233
7234 *iter = &lower->list;
7235 *ignore = lower->ignore;
7236
7237 return lower->dev;
7238 }
7239
netdev_walk_all_lower_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7240 int netdev_walk_all_lower_dev(struct net_device *dev,
7241 int (*fn)(struct net_device *dev,
7242 struct netdev_nested_priv *priv),
7243 struct netdev_nested_priv *priv)
7244 {
7245 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7246 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7247 int ret, cur = 0;
7248
7249 now = dev;
7250 iter = &dev->adj_list.lower;
7251
7252 while (1) {
7253 if (now != dev) {
7254 ret = fn(now, priv);
7255 if (ret)
7256 return ret;
7257 }
7258
7259 next = NULL;
7260 while (1) {
7261 ldev = netdev_next_lower_dev(now, &iter);
7262 if (!ldev)
7263 break;
7264
7265 next = ldev;
7266 niter = &ldev->adj_list.lower;
7267 dev_stack[cur] = now;
7268 iter_stack[cur++] = iter;
7269 break;
7270 }
7271
7272 if (!next) {
7273 if (!cur)
7274 return 0;
7275 next = dev_stack[--cur];
7276 niter = iter_stack[cur];
7277 }
7278
7279 now = next;
7280 iter = niter;
7281 }
7282
7283 return 0;
7284 }
7285 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7286
__netdev_walk_all_lower_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7287 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7288 int (*fn)(struct net_device *dev,
7289 struct netdev_nested_priv *priv),
7290 struct netdev_nested_priv *priv)
7291 {
7292 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7293 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7294 int ret, cur = 0;
7295 bool ignore;
7296
7297 now = dev;
7298 iter = &dev->adj_list.lower;
7299
7300 while (1) {
7301 if (now != dev) {
7302 ret = fn(now, priv);
7303 if (ret)
7304 return ret;
7305 }
7306
7307 next = NULL;
7308 while (1) {
7309 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7310 if (!ldev)
7311 break;
7312 if (ignore)
7313 continue;
7314
7315 next = ldev;
7316 niter = &ldev->adj_list.lower;
7317 dev_stack[cur] = now;
7318 iter_stack[cur++] = iter;
7319 break;
7320 }
7321
7322 if (!next) {
7323 if (!cur)
7324 return 0;
7325 next = dev_stack[--cur];
7326 niter = iter_stack[cur];
7327 }
7328
7329 now = next;
7330 iter = niter;
7331 }
7332
7333 return 0;
7334 }
7335
netdev_next_lower_dev_rcu(struct net_device * dev,struct list_head ** iter)7336 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7337 struct list_head **iter)
7338 {
7339 struct netdev_adjacent *lower;
7340
7341 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7342 if (&lower->list == &dev->adj_list.lower)
7343 return NULL;
7344
7345 *iter = &lower->list;
7346
7347 return lower->dev;
7348 }
7349 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7350
__netdev_upper_depth(struct net_device * dev)7351 static u8 __netdev_upper_depth(struct net_device *dev)
7352 {
7353 struct net_device *udev;
7354 struct list_head *iter;
7355 u8 max_depth = 0;
7356 bool ignore;
7357
7358 for (iter = &dev->adj_list.upper,
7359 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7360 udev;
7361 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7362 if (ignore)
7363 continue;
7364 if (max_depth < udev->upper_level)
7365 max_depth = udev->upper_level;
7366 }
7367
7368 return max_depth;
7369 }
7370
__netdev_lower_depth(struct net_device * dev)7371 static u8 __netdev_lower_depth(struct net_device *dev)
7372 {
7373 struct net_device *ldev;
7374 struct list_head *iter;
7375 u8 max_depth = 0;
7376 bool ignore;
7377
7378 for (iter = &dev->adj_list.lower,
7379 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7380 ldev;
7381 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7382 if (ignore)
7383 continue;
7384 if (max_depth < ldev->lower_level)
7385 max_depth = ldev->lower_level;
7386 }
7387
7388 return max_depth;
7389 }
7390
__netdev_update_upper_level(struct net_device * dev,struct netdev_nested_priv * __unused)7391 static int __netdev_update_upper_level(struct net_device *dev,
7392 struct netdev_nested_priv *__unused)
7393 {
7394 dev->upper_level = __netdev_upper_depth(dev) + 1;
7395 return 0;
7396 }
7397
7398 #ifdef CONFIG_LOCKDEP
7399 static LIST_HEAD(net_unlink_list);
7400
net_unlink_todo(struct net_device * dev)7401 static void net_unlink_todo(struct net_device *dev)
7402 {
7403 if (list_empty(&dev->unlink_list))
7404 list_add_tail(&dev->unlink_list, &net_unlink_list);
7405 }
7406 #endif
7407
__netdev_update_lower_level(struct net_device * dev,struct netdev_nested_priv * priv)7408 static int __netdev_update_lower_level(struct net_device *dev,
7409 struct netdev_nested_priv *priv)
7410 {
7411 dev->lower_level = __netdev_lower_depth(dev) + 1;
7412
7413 #ifdef CONFIG_LOCKDEP
7414 if (!priv)
7415 return 0;
7416
7417 if (priv->flags & NESTED_SYNC_IMM)
7418 dev->nested_level = dev->lower_level - 1;
7419 if (priv->flags & NESTED_SYNC_TODO)
7420 net_unlink_todo(dev);
7421 #endif
7422 return 0;
7423 }
7424
netdev_walk_all_lower_dev_rcu(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7425 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7426 int (*fn)(struct net_device *dev,
7427 struct netdev_nested_priv *priv),
7428 struct netdev_nested_priv *priv)
7429 {
7430 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7431 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7432 int ret, cur = 0;
7433
7434 now = dev;
7435 iter = &dev->adj_list.lower;
7436
7437 while (1) {
7438 if (now != dev) {
7439 ret = fn(now, priv);
7440 if (ret)
7441 return ret;
7442 }
7443
7444 next = NULL;
7445 while (1) {
7446 ldev = netdev_next_lower_dev_rcu(now, &iter);
7447 if (!ldev)
7448 break;
7449
7450 next = ldev;
7451 niter = &ldev->adj_list.lower;
7452 dev_stack[cur] = now;
7453 iter_stack[cur++] = iter;
7454 break;
7455 }
7456
7457 if (!next) {
7458 if (!cur)
7459 return 0;
7460 next = dev_stack[--cur];
7461 niter = iter_stack[cur];
7462 }
7463
7464 now = next;
7465 iter = niter;
7466 }
7467
7468 return 0;
7469 }
7470 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7471
7472 /**
7473 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7474 * lower neighbour list, RCU
7475 * variant
7476 * @dev: device
7477 *
7478 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7479 * list. The caller must hold RCU read lock.
7480 */
netdev_lower_get_first_private_rcu(struct net_device * dev)7481 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7482 {
7483 struct netdev_adjacent *lower;
7484
7485 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7486 struct netdev_adjacent, list);
7487 if (lower)
7488 return lower->private;
7489 return NULL;
7490 }
7491 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7492
7493 /**
7494 * netdev_master_upper_dev_get_rcu - Get master upper device
7495 * @dev: device
7496 *
7497 * Find a master upper device and return pointer to it or NULL in case
7498 * it's not there. The caller must hold the RCU read lock.
7499 */
netdev_master_upper_dev_get_rcu(struct net_device * dev)7500 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7501 {
7502 struct netdev_adjacent *upper;
7503
7504 upper = list_first_or_null_rcu(&dev->adj_list.upper,
7505 struct netdev_adjacent, list);
7506 if (upper && likely(upper->master))
7507 return upper->dev;
7508 return NULL;
7509 }
7510 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7511
netdev_adjacent_sysfs_add(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list)7512 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7513 struct net_device *adj_dev,
7514 struct list_head *dev_list)
7515 {
7516 char linkname[IFNAMSIZ+7];
7517
7518 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7519 "upper_%s" : "lower_%s", adj_dev->name);
7520 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7521 linkname);
7522 }
netdev_adjacent_sysfs_del(struct net_device * dev,char * name,struct list_head * dev_list)7523 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7524 char *name,
7525 struct list_head *dev_list)
7526 {
7527 char linkname[IFNAMSIZ+7];
7528
7529 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7530 "upper_%s" : "lower_%s", name);
7531 sysfs_remove_link(&(dev->dev.kobj), linkname);
7532 }
7533
netdev_adjacent_is_neigh_list(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list)7534 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7535 struct net_device *adj_dev,
7536 struct list_head *dev_list)
7537 {
7538 return (dev_list == &dev->adj_list.upper ||
7539 dev_list == &dev->adj_list.lower) &&
7540 net_eq(dev_net(dev), dev_net(adj_dev));
7541 }
7542
__netdev_adjacent_dev_insert(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list,void * private,bool master)7543 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7544 struct net_device *adj_dev,
7545 struct list_head *dev_list,
7546 void *private, bool master)
7547 {
7548 struct netdev_adjacent *adj;
7549 int ret;
7550
7551 adj = __netdev_find_adj(adj_dev, dev_list);
7552
7553 if (adj) {
7554 adj->ref_nr += 1;
7555 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7556 dev->name, adj_dev->name, adj->ref_nr);
7557
7558 return 0;
7559 }
7560
7561 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7562 if (!adj)
7563 return -ENOMEM;
7564
7565 adj->dev = adj_dev;
7566 adj->master = master;
7567 adj->ref_nr = 1;
7568 adj->private = private;
7569 adj->ignore = false;
7570 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7571
7572 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7573 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7574
7575 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7576 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7577 if (ret)
7578 goto free_adj;
7579 }
7580
7581 /* Ensure that master link is always the first item in list. */
7582 if (master) {
7583 ret = sysfs_create_link(&(dev->dev.kobj),
7584 &(adj_dev->dev.kobj), "master");
7585 if (ret)
7586 goto remove_symlinks;
7587
7588 list_add_rcu(&adj->list, dev_list);
7589 } else {
7590 list_add_tail_rcu(&adj->list, dev_list);
7591 }
7592
7593 return 0;
7594
7595 remove_symlinks:
7596 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7597 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7598 free_adj:
7599 netdev_put(adj_dev, &adj->dev_tracker);
7600 kfree(adj);
7601
7602 return ret;
7603 }
7604
__netdev_adjacent_dev_remove(struct net_device * dev,struct net_device * adj_dev,u16 ref_nr,struct list_head * dev_list)7605 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7606 struct net_device *adj_dev,
7607 u16 ref_nr,
7608 struct list_head *dev_list)
7609 {
7610 struct netdev_adjacent *adj;
7611
7612 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7613 dev->name, adj_dev->name, ref_nr);
7614
7615 adj = __netdev_find_adj(adj_dev, dev_list);
7616
7617 if (!adj) {
7618 pr_err("Adjacency does not exist for device %s from %s\n",
7619 dev->name, adj_dev->name);
7620 WARN_ON(1);
7621 return;
7622 }
7623
7624 if (adj->ref_nr > ref_nr) {
7625 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7626 dev->name, adj_dev->name, ref_nr,
7627 adj->ref_nr - ref_nr);
7628 adj->ref_nr -= ref_nr;
7629 return;
7630 }
7631
7632 if (adj->master)
7633 sysfs_remove_link(&(dev->dev.kobj), "master");
7634
7635 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7636 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7637
7638 list_del_rcu(&adj->list);
7639 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7640 adj_dev->name, dev->name, adj_dev->name);
7641 netdev_put(adj_dev, &adj->dev_tracker);
7642 kfree_rcu(adj, rcu);
7643 }
7644
__netdev_adjacent_dev_link_lists(struct net_device * dev,struct net_device * upper_dev,struct list_head * up_list,struct list_head * down_list,void * private,bool master)7645 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7646 struct net_device *upper_dev,
7647 struct list_head *up_list,
7648 struct list_head *down_list,
7649 void *private, bool master)
7650 {
7651 int ret;
7652
7653 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7654 private, master);
7655 if (ret)
7656 return ret;
7657
7658 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7659 private, false);
7660 if (ret) {
7661 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7662 return ret;
7663 }
7664
7665 return 0;
7666 }
7667
__netdev_adjacent_dev_unlink_lists(struct net_device * dev,struct net_device * upper_dev,u16 ref_nr,struct list_head * up_list,struct list_head * down_list)7668 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7669 struct net_device *upper_dev,
7670 u16 ref_nr,
7671 struct list_head *up_list,
7672 struct list_head *down_list)
7673 {
7674 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7675 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7676 }
7677
__netdev_adjacent_dev_link_neighbour(struct net_device * dev,struct net_device * upper_dev,void * private,bool master)7678 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7679 struct net_device *upper_dev,
7680 void *private, bool master)
7681 {
7682 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7683 &dev->adj_list.upper,
7684 &upper_dev->adj_list.lower,
7685 private, master);
7686 }
7687
__netdev_adjacent_dev_unlink_neighbour(struct net_device * dev,struct net_device * upper_dev)7688 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7689 struct net_device *upper_dev)
7690 {
7691 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7692 &dev->adj_list.upper,
7693 &upper_dev->adj_list.lower);
7694 }
7695
__netdev_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,bool master,void * upper_priv,void * upper_info,struct netdev_nested_priv * priv,struct netlink_ext_ack * extack)7696 static int __netdev_upper_dev_link(struct net_device *dev,
7697 struct net_device *upper_dev, bool master,
7698 void *upper_priv, void *upper_info,
7699 struct netdev_nested_priv *priv,
7700 struct netlink_ext_ack *extack)
7701 {
7702 struct netdev_notifier_changeupper_info changeupper_info = {
7703 .info = {
7704 .dev = dev,
7705 .extack = extack,
7706 },
7707 .upper_dev = upper_dev,
7708 .master = master,
7709 .linking = true,
7710 .upper_info = upper_info,
7711 };
7712 struct net_device *master_dev;
7713 int ret = 0;
7714
7715 ASSERT_RTNL();
7716
7717 if (dev == upper_dev)
7718 return -EBUSY;
7719
7720 /* To prevent loops, check if dev is not upper device to upper_dev. */
7721 if (__netdev_has_upper_dev(upper_dev, dev))
7722 return -EBUSY;
7723
7724 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7725 return -EMLINK;
7726
7727 if (!master) {
7728 if (__netdev_has_upper_dev(dev, upper_dev))
7729 return -EEXIST;
7730 } else {
7731 master_dev = __netdev_master_upper_dev_get(dev);
7732 if (master_dev)
7733 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7734 }
7735
7736 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7737 &changeupper_info.info);
7738 ret = notifier_to_errno(ret);
7739 if (ret)
7740 return ret;
7741
7742 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7743 master);
7744 if (ret)
7745 return ret;
7746
7747 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7748 &changeupper_info.info);
7749 ret = notifier_to_errno(ret);
7750 if (ret)
7751 goto rollback;
7752
7753 __netdev_update_upper_level(dev, NULL);
7754 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7755
7756 __netdev_update_lower_level(upper_dev, priv);
7757 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7758 priv);
7759
7760 return 0;
7761
7762 rollback:
7763 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7764
7765 return ret;
7766 }
7767
7768 /**
7769 * netdev_upper_dev_link - Add a link to the upper device
7770 * @dev: device
7771 * @upper_dev: new upper device
7772 * @extack: netlink extended ack
7773 *
7774 * Adds a link to device which is upper to this one. The caller must hold
7775 * the RTNL lock. On a failure a negative errno code is returned.
7776 * On success the reference counts are adjusted and the function
7777 * returns zero.
7778 */
netdev_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,struct netlink_ext_ack * extack)7779 int netdev_upper_dev_link(struct net_device *dev,
7780 struct net_device *upper_dev,
7781 struct netlink_ext_ack *extack)
7782 {
7783 struct netdev_nested_priv priv = {
7784 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7785 .data = NULL,
7786 };
7787
7788 return __netdev_upper_dev_link(dev, upper_dev, false,
7789 NULL, NULL, &priv, extack);
7790 }
7791 EXPORT_SYMBOL(netdev_upper_dev_link);
7792
7793 /**
7794 * netdev_master_upper_dev_link - Add a master link to the upper device
7795 * @dev: device
7796 * @upper_dev: new upper device
7797 * @upper_priv: upper device private
7798 * @upper_info: upper info to be passed down via notifier
7799 * @extack: netlink extended ack
7800 *
7801 * Adds a link to device which is upper to this one. In this case, only
7802 * one master upper device can be linked, although other non-master devices
7803 * might be linked as well. The caller must hold the RTNL lock.
7804 * On a failure a negative errno code is returned. On success the reference
7805 * counts are adjusted and the function returns zero.
7806 */
netdev_master_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,void * upper_priv,void * upper_info,struct netlink_ext_ack * extack)7807 int netdev_master_upper_dev_link(struct net_device *dev,
7808 struct net_device *upper_dev,
7809 void *upper_priv, void *upper_info,
7810 struct netlink_ext_ack *extack)
7811 {
7812 struct netdev_nested_priv priv = {
7813 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7814 .data = NULL,
7815 };
7816
7817 return __netdev_upper_dev_link(dev, upper_dev, true,
7818 upper_priv, upper_info, &priv, extack);
7819 }
7820 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7821
__netdev_upper_dev_unlink(struct net_device * dev,struct net_device * upper_dev,struct netdev_nested_priv * priv)7822 static void __netdev_upper_dev_unlink(struct net_device *dev,
7823 struct net_device *upper_dev,
7824 struct netdev_nested_priv *priv)
7825 {
7826 struct netdev_notifier_changeupper_info changeupper_info = {
7827 .info = {
7828 .dev = dev,
7829 },
7830 .upper_dev = upper_dev,
7831 .linking = false,
7832 };
7833
7834 ASSERT_RTNL();
7835
7836 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7837
7838 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7839 &changeupper_info.info);
7840
7841 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7842
7843 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7844 &changeupper_info.info);
7845
7846 __netdev_update_upper_level(dev, NULL);
7847 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7848
7849 __netdev_update_lower_level(upper_dev, priv);
7850 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7851 priv);
7852 }
7853
7854 /**
7855 * netdev_upper_dev_unlink - Removes a link to upper device
7856 * @dev: device
7857 * @upper_dev: new upper device
7858 *
7859 * Removes a link to device which is upper to this one. The caller must hold
7860 * the RTNL lock.
7861 */
netdev_upper_dev_unlink(struct net_device * dev,struct net_device * upper_dev)7862 void netdev_upper_dev_unlink(struct net_device *dev,
7863 struct net_device *upper_dev)
7864 {
7865 struct netdev_nested_priv priv = {
7866 .flags = NESTED_SYNC_TODO,
7867 .data = NULL,
7868 };
7869
7870 __netdev_upper_dev_unlink(dev, upper_dev, &priv);
7871 }
7872 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7873
__netdev_adjacent_dev_set(struct net_device * upper_dev,struct net_device * lower_dev,bool val)7874 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7875 struct net_device *lower_dev,
7876 bool val)
7877 {
7878 struct netdev_adjacent *adj;
7879
7880 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7881 if (adj)
7882 adj->ignore = val;
7883
7884 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7885 if (adj)
7886 adj->ignore = val;
7887 }
7888
netdev_adjacent_dev_disable(struct net_device * upper_dev,struct net_device * lower_dev)7889 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7890 struct net_device *lower_dev)
7891 {
7892 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7893 }
7894
netdev_adjacent_dev_enable(struct net_device * upper_dev,struct net_device * lower_dev)7895 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7896 struct net_device *lower_dev)
7897 {
7898 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7899 }
7900
netdev_adjacent_change_prepare(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev,struct netlink_ext_ack * extack)7901 int netdev_adjacent_change_prepare(struct net_device *old_dev,
7902 struct net_device *new_dev,
7903 struct net_device *dev,
7904 struct netlink_ext_ack *extack)
7905 {
7906 struct netdev_nested_priv priv = {
7907 .flags = 0,
7908 .data = NULL,
7909 };
7910 int err;
7911
7912 if (!new_dev)
7913 return 0;
7914
7915 if (old_dev && new_dev != old_dev)
7916 netdev_adjacent_dev_disable(dev, old_dev);
7917 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7918 extack);
7919 if (err) {
7920 if (old_dev && new_dev != old_dev)
7921 netdev_adjacent_dev_enable(dev, old_dev);
7922 return err;
7923 }
7924
7925 return 0;
7926 }
7927 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7928
netdev_adjacent_change_commit(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev)7929 void netdev_adjacent_change_commit(struct net_device *old_dev,
7930 struct net_device *new_dev,
7931 struct net_device *dev)
7932 {
7933 struct netdev_nested_priv priv = {
7934 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7935 .data = NULL,
7936 };
7937
7938 if (!new_dev || !old_dev)
7939 return;
7940
7941 if (new_dev == old_dev)
7942 return;
7943
7944 netdev_adjacent_dev_enable(dev, old_dev);
7945 __netdev_upper_dev_unlink(old_dev, dev, &priv);
7946 }
7947 EXPORT_SYMBOL(netdev_adjacent_change_commit);
7948
netdev_adjacent_change_abort(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev)7949 void netdev_adjacent_change_abort(struct net_device *old_dev,
7950 struct net_device *new_dev,
7951 struct net_device *dev)
7952 {
7953 struct netdev_nested_priv priv = {
7954 .flags = 0,
7955 .data = NULL,
7956 };
7957
7958 if (!new_dev)
7959 return;
7960
7961 if (old_dev && new_dev != old_dev)
7962 netdev_adjacent_dev_enable(dev, old_dev);
7963
7964 __netdev_upper_dev_unlink(new_dev, dev, &priv);
7965 }
7966 EXPORT_SYMBOL(netdev_adjacent_change_abort);
7967
7968 /**
7969 * netdev_bonding_info_change - Dispatch event about slave change
7970 * @dev: device
7971 * @bonding_info: info to dispatch
7972 *
7973 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7974 * The caller must hold the RTNL lock.
7975 */
netdev_bonding_info_change(struct net_device * dev,struct netdev_bonding_info * bonding_info)7976 void netdev_bonding_info_change(struct net_device *dev,
7977 struct netdev_bonding_info *bonding_info)
7978 {
7979 struct netdev_notifier_bonding_info info = {
7980 .info.dev = dev,
7981 };
7982
7983 memcpy(&info.bonding_info, bonding_info,
7984 sizeof(struct netdev_bonding_info));
7985 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7986 &info.info);
7987 }
7988 EXPORT_SYMBOL(netdev_bonding_info_change);
7989
netdev_offload_xstats_enable_l3(struct net_device * dev,struct netlink_ext_ack * extack)7990 static int netdev_offload_xstats_enable_l3(struct net_device *dev,
7991 struct netlink_ext_ack *extack)
7992 {
7993 struct netdev_notifier_offload_xstats_info info = {
7994 .info.dev = dev,
7995 .info.extack = extack,
7996 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
7997 };
7998 int err;
7999 int rc;
8000
8001 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
8002 GFP_KERNEL);
8003 if (!dev->offload_xstats_l3)
8004 return -ENOMEM;
8005
8006 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
8007 NETDEV_OFFLOAD_XSTATS_DISABLE,
8008 &info.info);
8009 err = notifier_to_errno(rc);
8010 if (err)
8011 goto free_stats;
8012
8013 return 0;
8014
8015 free_stats:
8016 kfree(dev->offload_xstats_l3);
8017 dev->offload_xstats_l3 = NULL;
8018 return err;
8019 }
8020
netdev_offload_xstats_enable(struct net_device * dev,enum netdev_offload_xstats_type type,struct netlink_ext_ack * extack)8021 int netdev_offload_xstats_enable(struct net_device *dev,
8022 enum netdev_offload_xstats_type type,
8023 struct netlink_ext_ack *extack)
8024 {
8025 ASSERT_RTNL();
8026
8027 if (netdev_offload_xstats_enabled(dev, type))
8028 return -EALREADY;
8029
8030 switch (type) {
8031 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8032 return netdev_offload_xstats_enable_l3(dev, extack);
8033 }
8034
8035 WARN_ON(1);
8036 return -EINVAL;
8037 }
8038 EXPORT_SYMBOL(netdev_offload_xstats_enable);
8039
netdev_offload_xstats_disable_l3(struct net_device * dev)8040 static void netdev_offload_xstats_disable_l3(struct net_device *dev)
8041 {
8042 struct netdev_notifier_offload_xstats_info info = {
8043 .info.dev = dev,
8044 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8045 };
8046
8047 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
8048 &info.info);
8049 kfree(dev->offload_xstats_l3);
8050 dev->offload_xstats_l3 = NULL;
8051 }
8052
netdev_offload_xstats_disable(struct net_device * dev,enum netdev_offload_xstats_type type)8053 int netdev_offload_xstats_disable(struct net_device *dev,
8054 enum netdev_offload_xstats_type type)
8055 {
8056 ASSERT_RTNL();
8057
8058 if (!netdev_offload_xstats_enabled(dev, type))
8059 return -EALREADY;
8060
8061 switch (type) {
8062 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8063 netdev_offload_xstats_disable_l3(dev);
8064 return 0;
8065 }
8066
8067 WARN_ON(1);
8068 return -EINVAL;
8069 }
8070 EXPORT_SYMBOL(netdev_offload_xstats_disable);
8071
netdev_offload_xstats_disable_all(struct net_device * dev)8072 static void netdev_offload_xstats_disable_all(struct net_device *dev)
8073 {
8074 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
8075 }
8076
8077 static struct rtnl_hw_stats64 *
netdev_offload_xstats_get_ptr(const struct net_device * dev,enum netdev_offload_xstats_type type)8078 netdev_offload_xstats_get_ptr(const struct net_device *dev,
8079 enum netdev_offload_xstats_type type)
8080 {
8081 switch (type) {
8082 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8083 return dev->offload_xstats_l3;
8084 }
8085
8086 WARN_ON(1);
8087 return NULL;
8088 }
8089
netdev_offload_xstats_enabled(const struct net_device * dev,enum netdev_offload_xstats_type type)8090 bool netdev_offload_xstats_enabled(const struct net_device *dev,
8091 enum netdev_offload_xstats_type type)
8092 {
8093 ASSERT_RTNL();
8094
8095 return netdev_offload_xstats_get_ptr(dev, type);
8096 }
8097 EXPORT_SYMBOL(netdev_offload_xstats_enabled);
8098
8099 struct netdev_notifier_offload_xstats_ru {
8100 bool used;
8101 };
8102
8103 struct netdev_notifier_offload_xstats_rd {
8104 struct rtnl_hw_stats64 stats;
8105 bool used;
8106 };
8107
netdev_hw_stats64_add(struct rtnl_hw_stats64 * dest,const struct rtnl_hw_stats64 * src)8108 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
8109 const struct rtnl_hw_stats64 *src)
8110 {
8111 dest->rx_packets += src->rx_packets;
8112 dest->tx_packets += src->tx_packets;
8113 dest->rx_bytes += src->rx_bytes;
8114 dest->tx_bytes += src->tx_bytes;
8115 dest->rx_errors += src->rx_errors;
8116 dest->tx_errors += src->tx_errors;
8117 dest->rx_dropped += src->rx_dropped;
8118 dest->tx_dropped += src->tx_dropped;
8119 dest->multicast += src->multicast;
8120 }
8121
netdev_offload_xstats_get_used(struct net_device * dev,enum netdev_offload_xstats_type type,bool * p_used,struct netlink_ext_ack * extack)8122 static int netdev_offload_xstats_get_used(struct net_device *dev,
8123 enum netdev_offload_xstats_type type,
8124 bool *p_used,
8125 struct netlink_ext_ack *extack)
8126 {
8127 struct netdev_notifier_offload_xstats_ru report_used = {};
8128 struct netdev_notifier_offload_xstats_info info = {
8129 .info.dev = dev,
8130 .info.extack = extack,
8131 .type = type,
8132 .report_used = &report_used,
8133 };
8134 int rc;
8135
8136 WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8137 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
8138 &info.info);
8139 *p_used = report_used.used;
8140 return notifier_to_errno(rc);
8141 }
8142
netdev_offload_xstats_get_stats(struct net_device * dev,enum netdev_offload_xstats_type type,struct rtnl_hw_stats64 * p_stats,bool * p_used,struct netlink_ext_ack * extack)8143 static int netdev_offload_xstats_get_stats(struct net_device *dev,
8144 enum netdev_offload_xstats_type type,
8145 struct rtnl_hw_stats64 *p_stats,
8146 bool *p_used,
8147 struct netlink_ext_ack *extack)
8148 {
8149 struct netdev_notifier_offload_xstats_rd report_delta = {};
8150 struct netdev_notifier_offload_xstats_info info = {
8151 .info.dev = dev,
8152 .info.extack = extack,
8153 .type = type,
8154 .report_delta = &report_delta,
8155 };
8156 struct rtnl_hw_stats64 *stats;
8157 int rc;
8158
8159 stats = netdev_offload_xstats_get_ptr(dev, type);
8160 if (WARN_ON(!stats))
8161 return -EINVAL;
8162
8163 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
8164 &info.info);
8165
8166 /* Cache whatever we got, even if there was an error, otherwise the
8167 * successful stats retrievals would get lost.
8168 */
8169 netdev_hw_stats64_add(stats, &report_delta.stats);
8170
8171 if (p_stats)
8172 *p_stats = *stats;
8173 *p_used = report_delta.used;
8174
8175 return notifier_to_errno(rc);
8176 }
8177
netdev_offload_xstats_get(struct net_device * dev,enum netdev_offload_xstats_type type,struct rtnl_hw_stats64 * p_stats,bool * p_used,struct netlink_ext_ack * extack)8178 int netdev_offload_xstats_get(struct net_device *dev,
8179 enum netdev_offload_xstats_type type,
8180 struct rtnl_hw_stats64 *p_stats, bool *p_used,
8181 struct netlink_ext_ack *extack)
8182 {
8183 ASSERT_RTNL();
8184
8185 if (p_stats)
8186 return netdev_offload_xstats_get_stats(dev, type, p_stats,
8187 p_used, extack);
8188 else
8189 return netdev_offload_xstats_get_used(dev, type, p_used,
8190 extack);
8191 }
8192 EXPORT_SYMBOL(netdev_offload_xstats_get);
8193
8194 void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd * report_delta,const struct rtnl_hw_stats64 * stats)8195 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8196 const struct rtnl_hw_stats64 *stats)
8197 {
8198 report_delta->used = true;
8199 netdev_hw_stats64_add(&report_delta->stats, stats);
8200 }
8201 EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8202
8203 void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru * report_used)8204 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8205 {
8206 report_used->used = true;
8207 }
8208 EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8209
netdev_offload_xstats_push_delta(struct net_device * dev,enum netdev_offload_xstats_type type,const struct rtnl_hw_stats64 * p_stats)8210 void netdev_offload_xstats_push_delta(struct net_device *dev,
8211 enum netdev_offload_xstats_type type,
8212 const struct rtnl_hw_stats64 *p_stats)
8213 {
8214 struct rtnl_hw_stats64 *stats;
8215
8216 ASSERT_RTNL();
8217
8218 stats = netdev_offload_xstats_get_ptr(dev, type);
8219 if (WARN_ON(!stats))
8220 return;
8221
8222 netdev_hw_stats64_add(stats, p_stats);
8223 }
8224 EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8225
8226 /**
8227 * netdev_get_xmit_slave - Get the xmit slave of master device
8228 * @dev: device
8229 * @skb: The packet
8230 * @all_slaves: assume all the slaves are active
8231 *
8232 * The reference counters are not incremented so the caller must be
8233 * careful with locks. The caller must hold RCU lock.
8234 * %NULL is returned if no slave is found.
8235 */
8236
netdev_get_xmit_slave(struct net_device * dev,struct sk_buff * skb,bool all_slaves)8237 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8238 struct sk_buff *skb,
8239 bool all_slaves)
8240 {
8241 const struct net_device_ops *ops = dev->netdev_ops;
8242
8243 if (!ops->ndo_get_xmit_slave)
8244 return NULL;
8245 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8246 }
8247 EXPORT_SYMBOL(netdev_get_xmit_slave);
8248
netdev_sk_get_lower_dev(struct net_device * dev,struct sock * sk)8249 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8250 struct sock *sk)
8251 {
8252 const struct net_device_ops *ops = dev->netdev_ops;
8253
8254 if (!ops->ndo_sk_get_lower_dev)
8255 return NULL;
8256 return ops->ndo_sk_get_lower_dev(dev, sk);
8257 }
8258
8259 /**
8260 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8261 * @dev: device
8262 * @sk: the socket
8263 *
8264 * %NULL is returned if no lower device is found.
8265 */
8266
netdev_sk_get_lowest_dev(struct net_device * dev,struct sock * sk)8267 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8268 struct sock *sk)
8269 {
8270 struct net_device *lower;
8271
8272 lower = netdev_sk_get_lower_dev(dev, sk);
8273 while (lower) {
8274 dev = lower;
8275 lower = netdev_sk_get_lower_dev(dev, sk);
8276 }
8277
8278 return dev;
8279 }
8280 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8281
netdev_adjacent_add_links(struct net_device * dev)8282 static void netdev_adjacent_add_links(struct net_device *dev)
8283 {
8284 struct netdev_adjacent *iter;
8285
8286 struct net *net = dev_net(dev);
8287
8288 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8289 if (!net_eq(net, dev_net(iter->dev)))
8290 continue;
8291 netdev_adjacent_sysfs_add(iter->dev, dev,
8292 &iter->dev->adj_list.lower);
8293 netdev_adjacent_sysfs_add(dev, iter->dev,
8294 &dev->adj_list.upper);
8295 }
8296
8297 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8298 if (!net_eq(net, dev_net(iter->dev)))
8299 continue;
8300 netdev_adjacent_sysfs_add(iter->dev, dev,
8301 &iter->dev->adj_list.upper);
8302 netdev_adjacent_sysfs_add(dev, iter->dev,
8303 &dev->adj_list.lower);
8304 }
8305 }
8306
netdev_adjacent_del_links(struct net_device * dev)8307 static void netdev_adjacent_del_links(struct net_device *dev)
8308 {
8309 struct netdev_adjacent *iter;
8310
8311 struct net *net = dev_net(dev);
8312
8313 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8314 if (!net_eq(net, dev_net(iter->dev)))
8315 continue;
8316 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8317 &iter->dev->adj_list.lower);
8318 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8319 &dev->adj_list.upper);
8320 }
8321
8322 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8323 if (!net_eq(net, dev_net(iter->dev)))
8324 continue;
8325 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8326 &iter->dev->adj_list.upper);
8327 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8328 &dev->adj_list.lower);
8329 }
8330 }
8331
netdev_adjacent_rename_links(struct net_device * dev,char * oldname)8332 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8333 {
8334 struct netdev_adjacent *iter;
8335
8336 struct net *net = dev_net(dev);
8337
8338 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8339 if (!net_eq(net, dev_net(iter->dev)))
8340 continue;
8341 netdev_adjacent_sysfs_del(iter->dev, oldname,
8342 &iter->dev->adj_list.lower);
8343 netdev_adjacent_sysfs_add(iter->dev, dev,
8344 &iter->dev->adj_list.lower);
8345 }
8346
8347 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8348 if (!net_eq(net, dev_net(iter->dev)))
8349 continue;
8350 netdev_adjacent_sysfs_del(iter->dev, oldname,
8351 &iter->dev->adj_list.upper);
8352 netdev_adjacent_sysfs_add(iter->dev, dev,
8353 &iter->dev->adj_list.upper);
8354 }
8355 }
8356
netdev_lower_dev_get_private(struct net_device * dev,struct net_device * lower_dev)8357 void *netdev_lower_dev_get_private(struct net_device *dev,
8358 struct net_device *lower_dev)
8359 {
8360 struct netdev_adjacent *lower;
8361
8362 if (!lower_dev)
8363 return NULL;
8364 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8365 if (!lower)
8366 return NULL;
8367
8368 return lower->private;
8369 }
8370 EXPORT_SYMBOL(netdev_lower_dev_get_private);
8371
8372
8373 /**
8374 * netdev_lower_state_changed - Dispatch event about lower device state change
8375 * @lower_dev: device
8376 * @lower_state_info: state to dispatch
8377 *
8378 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8379 * The caller must hold the RTNL lock.
8380 */
netdev_lower_state_changed(struct net_device * lower_dev,void * lower_state_info)8381 void netdev_lower_state_changed(struct net_device *lower_dev,
8382 void *lower_state_info)
8383 {
8384 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8385 .info.dev = lower_dev,
8386 };
8387
8388 ASSERT_RTNL();
8389 changelowerstate_info.lower_state_info = lower_state_info;
8390 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8391 &changelowerstate_info.info);
8392 }
8393 EXPORT_SYMBOL(netdev_lower_state_changed);
8394
dev_change_rx_flags(struct net_device * dev,int flags)8395 static void dev_change_rx_flags(struct net_device *dev, int flags)
8396 {
8397 const struct net_device_ops *ops = dev->netdev_ops;
8398
8399 if (ops->ndo_change_rx_flags)
8400 ops->ndo_change_rx_flags(dev, flags);
8401 }
8402
__dev_set_promiscuity(struct net_device * dev,int inc,bool notify)8403 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8404 {
8405 unsigned int old_flags = dev->flags;
8406 kuid_t uid;
8407 kgid_t gid;
8408
8409 ASSERT_RTNL();
8410
8411 dev->flags |= IFF_PROMISC;
8412 dev->promiscuity += inc;
8413 if (dev->promiscuity == 0) {
8414 /*
8415 * Avoid overflow.
8416 * If inc causes overflow, untouch promisc and return error.
8417 */
8418 if (inc < 0)
8419 dev->flags &= ~IFF_PROMISC;
8420 else {
8421 dev->promiscuity -= inc;
8422 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8423 return -EOVERFLOW;
8424 }
8425 }
8426 if (dev->flags != old_flags) {
8427 netdev_info(dev, "%s promiscuous mode\n",
8428 dev->flags & IFF_PROMISC ? "entered" : "left");
8429 if (audit_enabled) {
8430 current_uid_gid(&uid, &gid);
8431 audit_log(audit_context(), GFP_ATOMIC,
8432 AUDIT_ANOM_PROMISCUOUS,
8433 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8434 dev->name, (dev->flags & IFF_PROMISC),
8435 (old_flags & IFF_PROMISC),
8436 from_kuid(&init_user_ns, audit_get_loginuid(current)),
8437 from_kuid(&init_user_ns, uid),
8438 from_kgid(&init_user_ns, gid),
8439 audit_get_sessionid(current));
8440 }
8441
8442 dev_change_rx_flags(dev, IFF_PROMISC);
8443 }
8444 if (notify)
8445 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
8446 return 0;
8447 }
8448
8449 /**
8450 * dev_set_promiscuity - update promiscuity count on a device
8451 * @dev: device
8452 * @inc: modifier
8453 *
8454 * Add or remove promiscuity from a device. While the count in the device
8455 * remains above zero the interface remains promiscuous. Once it hits zero
8456 * the device reverts back to normal filtering operation. A negative inc
8457 * value is used to drop promiscuity on the device.
8458 * Return 0 if successful or a negative errno code on error.
8459 */
dev_set_promiscuity(struct net_device * dev,int inc)8460 int dev_set_promiscuity(struct net_device *dev, int inc)
8461 {
8462 unsigned int old_flags = dev->flags;
8463 int err;
8464
8465 err = __dev_set_promiscuity(dev, inc, true);
8466 if (err < 0)
8467 return err;
8468 if (dev->flags != old_flags)
8469 dev_set_rx_mode(dev);
8470 return err;
8471 }
8472 EXPORT_SYMBOL(dev_set_promiscuity);
8473
__dev_set_allmulti(struct net_device * dev,int inc,bool notify)8474 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8475 {
8476 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8477
8478 ASSERT_RTNL();
8479
8480 dev->flags |= IFF_ALLMULTI;
8481 dev->allmulti += inc;
8482 if (dev->allmulti == 0) {
8483 /*
8484 * Avoid overflow.
8485 * If inc causes overflow, untouch allmulti and return error.
8486 */
8487 if (inc < 0)
8488 dev->flags &= ~IFF_ALLMULTI;
8489 else {
8490 dev->allmulti -= inc;
8491 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8492 return -EOVERFLOW;
8493 }
8494 }
8495 if (dev->flags ^ old_flags) {
8496 netdev_info(dev, "%s allmulticast mode\n",
8497 dev->flags & IFF_ALLMULTI ? "entered" : "left");
8498 dev_change_rx_flags(dev, IFF_ALLMULTI);
8499 dev_set_rx_mode(dev);
8500 if (notify)
8501 __dev_notify_flags(dev, old_flags,
8502 dev->gflags ^ old_gflags, 0, NULL);
8503 }
8504 return 0;
8505 }
8506
8507 /**
8508 * dev_set_allmulti - update allmulti count on a device
8509 * @dev: device
8510 * @inc: modifier
8511 *
8512 * Add or remove reception of all multicast frames to a device. While the
8513 * count in the device remains above zero the interface remains listening
8514 * to all interfaces. Once it hits zero the device reverts back to normal
8515 * filtering operation. A negative @inc value is used to drop the counter
8516 * when releasing a resource needing all multicasts.
8517 * Return 0 if successful or a negative errno code on error.
8518 */
8519
dev_set_allmulti(struct net_device * dev,int inc)8520 int dev_set_allmulti(struct net_device *dev, int inc)
8521 {
8522 return __dev_set_allmulti(dev, inc, true);
8523 }
8524 EXPORT_SYMBOL(dev_set_allmulti);
8525
8526 /*
8527 * Upload unicast and multicast address lists to device and
8528 * configure RX filtering. When the device doesn't support unicast
8529 * filtering it is put in promiscuous mode while unicast addresses
8530 * are present.
8531 */
__dev_set_rx_mode(struct net_device * dev)8532 void __dev_set_rx_mode(struct net_device *dev)
8533 {
8534 const struct net_device_ops *ops = dev->netdev_ops;
8535
8536 /* dev_open will call this function so the list will stay sane. */
8537 if (!(dev->flags&IFF_UP))
8538 return;
8539
8540 if (!netif_device_present(dev))
8541 return;
8542
8543 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8544 /* Unicast addresses changes may only happen under the rtnl,
8545 * therefore calling __dev_set_promiscuity here is safe.
8546 */
8547 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8548 __dev_set_promiscuity(dev, 1, false);
8549 dev->uc_promisc = true;
8550 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8551 __dev_set_promiscuity(dev, -1, false);
8552 dev->uc_promisc = false;
8553 }
8554 }
8555
8556 if (ops->ndo_set_rx_mode)
8557 ops->ndo_set_rx_mode(dev);
8558 }
8559
dev_set_rx_mode(struct net_device * dev)8560 void dev_set_rx_mode(struct net_device *dev)
8561 {
8562 netif_addr_lock_bh(dev);
8563 __dev_set_rx_mode(dev);
8564 netif_addr_unlock_bh(dev);
8565 }
8566
8567 /**
8568 * dev_get_flags - get flags reported to userspace
8569 * @dev: device
8570 *
8571 * Get the combination of flag bits exported through APIs to userspace.
8572 */
dev_get_flags(const struct net_device * dev)8573 unsigned int dev_get_flags(const struct net_device *dev)
8574 {
8575 unsigned int flags;
8576
8577 flags = (dev->flags & ~(IFF_PROMISC |
8578 IFF_ALLMULTI |
8579 IFF_RUNNING |
8580 IFF_LOWER_UP |
8581 IFF_DORMANT)) |
8582 (dev->gflags & (IFF_PROMISC |
8583 IFF_ALLMULTI));
8584
8585 if (netif_running(dev)) {
8586 if (netif_oper_up(dev))
8587 flags |= IFF_RUNNING;
8588 if (netif_carrier_ok(dev))
8589 flags |= IFF_LOWER_UP;
8590 if (netif_dormant(dev))
8591 flags |= IFF_DORMANT;
8592 }
8593
8594 return flags;
8595 }
8596 EXPORT_SYMBOL(dev_get_flags);
8597
__dev_change_flags(struct net_device * dev,unsigned int flags,struct netlink_ext_ack * extack)8598 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8599 struct netlink_ext_ack *extack)
8600 {
8601 unsigned int old_flags = dev->flags;
8602 int ret;
8603
8604 ASSERT_RTNL();
8605
8606 /*
8607 * Set the flags on our device.
8608 */
8609
8610 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8611 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8612 IFF_AUTOMEDIA)) |
8613 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8614 IFF_ALLMULTI));
8615
8616 /*
8617 * Load in the correct multicast list now the flags have changed.
8618 */
8619
8620 if ((old_flags ^ flags) & IFF_MULTICAST)
8621 dev_change_rx_flags(dev, IFF_MULTICAST);
8622
8623 dev_set_rx_mode(dev);
8624
8625 /*
8626 * Have we downed the interface. We handle IFF_UP ourselves
8627 * according to user attempts to set it, rather than blindly
8628 * setting it.
8629 */
8630
8631 ret = 0;
8632 if ((old_flags ^ flags) & IFF_UP) {
8633 if (old_flags & IFF_UP)
8634 __dev_close(dev);
8635 else
8636 ret = __dev_open(dev, extack);
8637 }
8638
8639 if ((flags ^ dev->gflags) & IFF_PROMISC) {
8640 int inc = (flags & IFF_PROMISC) ? 1 : -1;
8641 unsigned int old_flags = dev->flags;
8642
8643 dev->gflags ^= IFF_PROMISC;
8644
8645 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8646 if (dev->flags != old_flags)
8647 dev_set_rx_mode(dev);
8648 }
8649
8650 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8651 * is important. Some (broken) drivers set IFF_PROMISC, when
8652 * IFF_ALLMULTI is requested not asking us and not reporting.
8653 */
8654 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8655 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8656
8657 dev->gflags ^= IFF_ALLMULTI;
8658 __dev_set_allmulti(dev, inc, false);
8659 }
8660
8661 return ret;
8662 }
8663
__dev_notify_flags(struct net_device * dev,unsigned int old_flags,unsigned int gchanges,u32 portid,const struct nlmsghdr * nlh)8664 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8665 unsigned int gchanges, u32 portid,
8666 const struct nlmsghdr *nlh)
8667 {
8668 unsigned int changes = dev->flags ^ old_flags;
8669
8670 if (gchanges)
8671 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
8672
8673 if (changes & IFF_UP) {
8674 if (dev->flags & IFF_UP)
8675 call_netdevice_notifiers(NETDEV_UP, dev);
8676 else
8677 call_netdevice_notifiers(NETDEV_DOWN, dev);
8678 }
8679
8680 if (dev->flags & IFF_UP &&
8681 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8682 struct netdev_notifier_change_info change_info = {
8683 .info = {
8684 .dev = dev,
8685 },
8686 .flags_changed = changes,
8687 };
8688
8689 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8690 }
8691 }
8692
8693 /**
8694 * dev_change_flags - change device settings
8695 * @dev: device
8696 * @flags: device state flags
8697 * @extack: netlink extended ack
8698 *
8699 * Change settings on device based state flags. The flags are
8700 * in the userspace exported format.
8701 */
dev_change_flags(struct net_device * dev,unsigned int flags,struct netlink_ext_ack * extack)8702 int dev_change_flags(struct net_device *dev, unsigned int flags,
8703 struct netlink_ext_ack *extack)
8704 {
8705 int ret;
8706 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8707
8708 ret = __dev_change_flags(dev, flags, extack);
8709 if (ret < 0)
8710 return ret;
8711
8712 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8713 __dev_notify_flags(dev, old_flags, changes, 0, NULL);
8714 return ret;
8715 }
8716 EXPORT_SYMBOL(dev_change_flags);
8717
__dev_set_mtu(struct net_device * dev,int new_mtu)8718 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8719 {
8720 const struct net_device_ops *ops = dev->netdev_ops;
8721
8722 if (ops->ndo_change_mtu)
8723 return ops->ndo_change_mtu(dev, new_mtu);
8724
8725 /* Pairs with all the lockless reads of dev->mtu in the stack */
8726 WRITE_ONCE(dev->mtu, new_mtu);
8727 return 0;
8728 }
8729 EXPORT_SYMBOL(__dev_set_mtu);
8730
dev_validate_mtu(struct net_device * dev,int new_mtu,struct netlink_ext_ack * extack)8731 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8732 struct netlink_ext_ack *extack)
8733 {
8734 /* MTU must be positive, and in range */
8735 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8736 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8737 return -EINVAL;
8738 }
8739
8740 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8741 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8742 return -EINVAL;
8743 }
8744 return 0;
8745 }
8746
8747 /**
8748 * dev_set_mtu_ext - Change maximum transfer unit
8749 * @dev: device
8750 * @new_mtu: new transfer unit
8751 * @extack: netlink extended ack
8752 *
8753 * Change the maximum transfer size of the network device.
8754 */
dev_set_mtu_ext(struct net_device * dev,int new_mtu,struct netlink_ext_ack * extack)8755 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8756 struct netlink_ext_ack *extack)
8757 {
8758 int err, orig_mtu;
8759
8760 if (new_mtu == dev->mtu)
8761 return 0;
8762
8763 err = dev_validate_mtu(dev, new_mtu, extack);
8764 if (err)
8765 return err;
8766
8767 if (!netif_device_present(dev))
8768 return -ENODEV;
8769
8770 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8771 err = notifier_to_errno(err);
8772 if (err)
8773 return err;
8774
8775 orig_mtu = dev->mtu;
8776 err = __dev_set_mtu(dev, new_mtu);
8777
8778 if (!err) {
8779 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8780 orig_mtu);
8781 err = notifier_to_errno(err);
8782 if (err) {
8783 /* setting mtu back and notifying everyone again,
8784 * so that they have a chance to revert changes.
8785 */
8786 __dev_set_mtu(dev, orig_mtu);
8787 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8788 new_mtu);
8789 }
8790 }
8791 return err;
8792 }
8793
dev_set_mtu(struct net_device * dev,int new_mtu)8794 int dev_set_mtu(struct net_device *dev, int new_mtu)
8795 {
8796 struct netlink_ext_ack extack;
8797 int err;
8798
8799 memset(&extack, 0, sizeof(extack));
8800 err = dev_set_mtu_ext(dev, new_mtu, &extack);
8801 if (err && extack._msg)
8802 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8803 return err;
8804 }
8805 EXPORT_SYMBOL(dev_set_mtu);
8806
8807 /**
8808 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8809 * @dev: device
8810 * @new_len: new tx queue length
8811 */
dev_change_tx_queue_len(struct net_device * dev,unsigned long new_len)8812 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8813 {
8814 unsigned int orig_len = dev->tx_queue_len;
8815 int res;
8816
8817 if (new_len != (unsigned int)new_len)
8818 return -ERANGE;
8819
8820 if (new_len != orig_len) {
8821 dev->tx_queue_len = new_len;
8822 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8823 res = notifier_to_errno(res);
8824 if (res)
8825 goto err_rollback;
8826 res = dev_qdisc_change_tx_queue_len(dev);
8827 if (res)
8828 goto err_rollback;
8829 }
8830
8831 return 0;
8832
8833 err_rollback:
8834 netdev_err(dev, "refused to change device tx_queue_len\n");
8835 dev->tx_queue_len = orig_len;
8836 return res;
8837 }
8838
8839 /**
8840 * dev_set_group - Change group this device belongs to
8841 * @dev: device
8842 * @new_group: group this device should belong to
8843 */
dev_set_group(struct net_device * dev,int new_group)8844 void dev_set_group(struct net_device *dev, int new_group)
8845 {
8846 dev->group = new_group;
8847 }
8848
8849 /**
8850 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8851 * @dev: device
8852 * @addr: new address
8853 * @extack: netlink extended ack
8854 */
dev_pre_changeaddr_notify(struct net_device * dev,const char * addr,struct netlink_ext_ack * extack)8855 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8856 struct netlink_ext_ack *extack)
8857 {
8858 struct netdev_notifier_pre_changeaddr_info info = {
8859 .info.dev = dev,
8860 .info.extack = extack,
8861 .dev_addr = addr,
8862 };
8863 int rc;
8864
8865 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8866 return notifier_to_errno(rc);
8867 }
8868 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8869
8870 /**
8871 * dev_set_mac_address - Change Media Access Control Address
8872 * @dev: device
8873 * @sa: new address
8874 * @extack: netlink extended ack
8875 *
8876 * Change the hardware (MAC) address of the device
8877 */
dev_set_mac_address(struct net_device * dev,struct sockaddr * sa,struct netlink_ext_ack * extack)8878 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8879 struct netlink_ext_ack *extack)
8880 {
8881 const struct net_device_ops *ops = dev->netdev_ops;
8882 int err;
8883
8884 if (!ops->ndo_set_mac_address)
8885 return -EOPNOTSUPP;
8886 if (sa->sa_family != dev->type)
8887 return -EINVAL;
8888 if (!netif_device_present(dev))
8889 return -ENODEV;
8890 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8891 if (err)
8892 return err;
8893 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) {
8894 err = ops->ndo_set_mac_address(dev, sa);
8895 if (err)
8896 return err;
8897 }
8898 dev->addr_assign_type = NET_ADDR_SET;
8899 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8900 add_device_randomness(dev->dev_addr, dev->addr_len);
8901 return 0;
8902 }
8903 EXPORT_SYMBOL(dev_set_mac_address);
8904
8905 static DECLARE_RWSEM(dev_addr_sem);
8906
dev_set_mac_address_user(struct net_device * dev,struct sockaddr * sa,struct netlink_ext_ack * extack)8907 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8908 struct netlink_ext_ack *extack)
8909 {
8910 int ret;
8911
8912 down_write(&dev_addr_sem);
8913 ret = dev_set_mac_address(dev, sa, extack);
8914 up_write(&dev_addr_sem);
8915 return ret;
8916 }
8917 EXPORT_SYMBOL(dev_set_mac_address_user);
8918
dev_get_mac_address(struct sockaddr * sa,struct net * net,char * dev_name)8919 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8920 {
8921 size_t size = sizeof(sa->sa_data_min);
8922 struct net_device *dev;
8923 int ret = 0;
8924
8925 down_read(&dev_addr_sem);
8926 rcu_read_lock();
8927
8928 dev = dev_get_by_name_rcu(net, dev_name);
8929 if (!dev) {
8930 ret = -ENODEV;
8931 goto unlock;
8932 }
8933 if (!dev->addr_len)
8934 memset(sa->sa_data, 0, size);
8935 else
8936 memcpy(sa->sa_data, dev->dev_addr,
8937 min_t(size_t, size, dev->addr_len));
8938 sa->sa_family = dev->type;
8939
8940 unlock:
8941 rcu_read_unlock();
8942 up_read(&dev_addr_sem);
8943 return ret;
8944 }
8945 EXPORT_SYMBOL(dev_get_mac_address);
8946
8947 /**
8948 * dev_change_carrier - Change device carrier
8949 * @dev: device
8950 * @new_carrier: new value
8951 *
8952 * Change device carrier
8953 */
dev_change_carrier(struct net_device * dev,bool new_carrier)8954 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8955 {
8956 const struct net_device_ops *ops = dev->netdev_ops;
8957
8958 if (!ops->ndo_change_carrier)
8959 return -EOPNOTSUPP;
8960 if (!netif_device_present(dev))
8961 return -ENODEV;
8962 return ops->ndo_change_carrier(dev, new_carrier);
8963 }
8964
8965 /**
8966 * dev_get_phys_port_id - Get device physical port ID
8967 * @dev: device
8968 * @ppid: port ID
8969 *
8970 * Get device physical port ID
8971 */
dev_get_phys_port_id(struct net_device * dev,struct netdev_phys_item_id * ppid)8972 int dev_get_phys_port_id(struct net_device *dev,
8973 struct netdev_phys_item_id *ppid)
8974 {
8975 const struct net_device_ops *ops = dev->netdev_ops;
8976
8977 if (!ops->ndo_get_phys_port_id)
8978 return -EOPNOTSUPP;
8979 return ops->ndo_get_phys_port_id(dev, ppid);
8980 }
8981
8982 /**
8983 * dev_get_phys_port_name - Get device physical port name
8984 * @dev: device
8985 * @name: port name
8986 * @len: limit of bytes to copy to name
8987 *
8988 * Get device physical port name
8989 */
dev_get_phys_port_name(struct net_device * dev,char * name,size_t len)8990 int dev_get_phys_port_name(struct net_device *dev,
8991 char *name, size_t len)
8992 {
8993 const struct net_device_ops *ops = dev->netdev_ops;
8994 int err;
8995
8996 if (ops->ndo_get_phys_port_name) {
8997 err = ops->ndo_get_phys_port_name(dev, name, len);
8998 if (err != -EOPNOTSUPP)
8999 return err;
9000 }
9001 return devlink_compat_phys_port_name_get(dev, name, len);
9002 }
9003
9004 /**
9005 * dev_get_port_parent_id - Get the device's port parent identifier
9006 * @dev: network device
9007 * @ppid: pointer to a storage for the port's parent identifier
9008 * @recurse: allow/disallow recursion to lower devices
9009 *
9010 * Get the devices's port parent identifier
9011 */
dev_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid,bool recurse)9012 int dev_get_port_parent_id(struct net_device *dev,
9013 struct netdev_phys_item_id *ppid,
9014 bool recurse)
9015 {
9016 const struct net_device_ops *ops = dev->netdev_ops;
9017 struct netdev_phys_item_id first = { };
9018 struct net_device *lower_dev;
9019 struct list_head *iter;
9020 int err;
9021
9022 if (ops->ndo_get_port_parent_id) {
9023 err = ops->ndo_get_port_parent_id(dev, ppid);
9024 if (err != -EOPNOTSUPP)
9025 return err;
9026 }
9027
9028 err = devlink_compat_switch_id_get(dev, ppid);
9029 if (!recurse || err != -EOPNOTSUPP)
9030 return err;
9031
9032 netdev_for_each_lower_dev(dev, lower_dev, iter) {
9033 err = dev_get_port_parent_id(lower_dev, ppid, true);
9034 if (err)
9035 break;
9036 if (!first.id_len)
9037 first = *ppid;
9038 else if (memcmp(&first, ppid, sizeof(*ppid)))
9039 return -EOPNOTSUPP;
9040 }
9041
9042 return err;
9043 }
9044 EXPORT_SYMBOL(dev_get_port_parent_id);
9045
9046 /**
9047 * netdev_port_same_parent_id - Indicate if two network devices have
9048 * the same port parent identifier
9049 * @a: first network device
9050 * @b: second network device
9051 */
netdev_port_same_parent_id(struct net_device * a,struct net_device * b)9052 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9053 {
9054 struct netdev_phys_item_id a_id = { };
9055 struct netdev_phys_item_id b_id = { };
9056
9057 if (dev_get_port_parent_id(a, &a_id, true) ||
9058 dev_get_port_parent_id(b, &b_id, true))
9059 return false;
9060
9061 return netdev_phys_item_id_same(&a_id, &b_id);
9062 }
9063 EXPORT_SYMBOL(netdev_port_same_parent_id);
9064
9065 /**
9066 * dev_change_proto_down - set carrier according to proto_down.
9067 *
9068 * @dev: device
9069 * @proto_down: new value
9070 */
dev_change_proto_down(struct net_device * dev,bool proto_down)9071 int dev_change_proto_down(struct net_device *dev, bool proto_down)
9072 {
9073 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
9074 return -EOPNOTSUPP;
9075 if (!netif_device_present(dev))
9076 return -ENODEV;
9077 if (proto_down)
9078 netif_carrier_off(dev);
9079 else
9080 netif_carrier_on(dev);
9081 dev->proto_down = proto_down;
9082 return 0;
9083 }
9084
9085 /**
9086 * dev_change_proto_down_reason - proto down reason
9087 *
9088 * @dev: device
9089 * @mask: proto down mask
9090 * @value: proto down value
9091 */
dev_change_proto_down_reason(struct net_device * dev,unsigned long mask,u32 value)9092 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9093 u32 value)
9094 {
9095 int b;
9096
9097 if (!mask) {
9098 dev->proto_down_reason = value;
9099 } else {
9100 for_each_set_bit(b, &mask, 32) {
9101 if (value & (1 << b))
9102 dev->proto_down_reason |= BIT(b);
9103 else
9104 dev->proto_down_reason &= ~BIT(b);
9105 }
9106 }
9107 }
9108
9109 struct bpf_xdp_link {
9110 struct bpf_link link;
9111 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9112 int flags;
9113 };
9114
dev_xdp_mode(struct net_device * dev,u32 flags)9115 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9116 {
9117 if (flags & XDP_FLAGS_HW_MODE)
9118 return XDP_MODE_HW;
9119 if (flags & XDP_FLAGS_DRV_MODE)
9120 return XDP_MODE_DRV;
9121 if (flags & XDP_FLAGS_SKB_MODE)
9122 return XDP_MODE_SKB;
9123 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9124 }
9125
dev_xdp_bpf_op(struct net_device * dev,enum bpf_xdp_mode mode)9126 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9127 {
9128 switch (mode) {
9129 case XDP_MODE_SKB:
9130 return generic_xdp_install;
9131 case XDP_MODE_DRV:
9132 case XDP_MODE_HW:
9133 return dev->netdev_ops->ndo_bpf;
9134 default:
9135 return NULL;
9136 }
9137 }
9138
dev_xdp_link(struct net_device * dev,enum bpf_xdp_mode mode)9139 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9140 enum bpf_xdp_mode mode)
9141 {
9142 return dev->xdp_state[mode].link;
9143 }
9144
dev_xdp_prog(struct net_device * dev,enum bpf_xdp_mode mode)9145 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9146 enum bpf_xdp_mode mode)
9147 {
9148 struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9149
9150 if (link)
9151 return link->link.prog;
9152 return dev->xdp_state[mode].prog;
9153 }
9154
dev_xdp_prog_count(struct net_device * dev)9155 u8 dev_xdp_prog_count(struct net_device *dev)
9156 {
9157 u8 count = 0;
9158 int i;
9159
9160 for (i = 0; i < __MAX_XDP_MODE; i++)
9161 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9162 count++;
9163 return count;
9164 }
9165 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9166
dev_xdp_prog_id(struct net_device * dev,enum bpf_xdp_mode mode)9167 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9168 {
9169 struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9170
9171 return prog ? prog->aux->id : 0;
9172 }
9173
dev_xdp_set_link(struct net_device * dev,enum bpf_xdp_mode mode,struct bpf_xdp_link * link)9174 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9175 struct bpf_xdp_link *link)
9176 {
9177 dev->xdp_state[mode].link = link;
9178 dev->xdp_state[mode].prog = NULL;
9179 }
9180
dev_xdp_set_prog(struct net_device * dev,enum bpf_xdp_mode mode,struct bpf_prog * prog)9181 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9182 struct bpf_prog *prog)
9183 {
9184 dev->xdp_state[mode].link = NULL;
9185 dev->xdp_state[mode].prog = prog;
9186 }
9187
dev_xdp_install(struct net_device * dev,enum bpf_xdp_mode mode,bpf_op_t bpf_op,struct netlink_ext_ack * extack,u32 flags,struct bpf_prog * prog)9188 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9189 bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9190 u32 flags, struct bpf_prog *prog)
9191 {
9192 struct netdev_bpf xdp;
9193 int err;
9194
9195 memset(&xdp, 0, sizeof(xdp));
9196 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9197 xdp.extack = extack;
9198 xdp.flags = flags;
9199 xdp.prog = prog;
9200
9201 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9202 * "moved" into driver), so they don't increment it on their own, but
9203 * they do decrement refcnt when program is detached or replaced.
9204 * Given net_device also owns link/prog, we need to bump refcnt here
9205 * to prevent drivers from underflowing it.
9206 */
9207 if (prog)
9208 bpf_prog_inc(prog);
9209 err = bpf_op(dev, &xdp);
9210 if (err) {
9211 if (prog)
9212 bpf_prog_put(prog);
9213 return err;
9214 }
9215
9216 if (mode != XDP_MODE_HW)
9217 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9218
9219 return 0;
9220 }
9221
dev_xdp_uninstall(struct net_device * dev)9222 static void dev_xdp_uninstall(struct net_device *dev)
9223 {
9224 struct bpf_xdp_link *link;
9225 struct bpf_prog *prog;
9226 enum bpf_xdp_mode mode;
9227 bpf_op_t bpf_op;
9228
9229 ASSERT_RTNL();
9230
9231 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9232 prog = dev_xdp_prog(dev, mode);
9233 if (!prog)
9234 continue;
9235
9236 bpf_op = dev_xdp_bpf_op(dev, mode);
9237 if (!bpf_op)
9238 continue;
9239
9240 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9241
9242 /* auto-detach link from net device */
9243 link = dev_xdp_link(dev, mode);
9244 if (link)
9245 link->dev = NULL;
9246 else
9247 bpf_prog_put(prog);
9248
9249 dev_xdp_set_link(dev, mode, NULL);
9250 }
9251 }
9252
dev_xdp_attach(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog,u32 flags)9253 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9254 struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9255 struct bpf_prog *old_prog, u32 flags)
9256 {
9257 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9258 struct bpf_prog *cur_prog;
9259 struct net_device *upper;
9260 struct list_head *iter;
9261 enum bpf_xdp_mode mode;
9262 bpf_op_t bpf_op;
9263 int err;
9264
9265 ASSERT_RTNL();
9266
9267 /* either link or prog attachment, never both */
9268 if (link && (new_prog || old_prog))
9269 return -EINVAL;
9270 /* link supports only XDP mode flags */
9271 if (link && (flags & ~XDP_FLAGS_MODES)) {
9272 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9273 return -EINVAL;
9274 }
9275 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9276 if (num_modes > 1) {
9277 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9278 return -EINVAL;
9279 }
9280 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9281 if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9282 NL_SET_ERR_MSG(extack,
9283 "More than one program loaded, unset mode is ambiguous");
9284 return -EINVAL;
9285 }
9286 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9287 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9288 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9289 return -EINVAL;
9290 }
9291
9292 mode = dev_xdp_mode(dev, flags);
9293 /* can't replace attached link */
9294 if (dev_xdp_link(dev, mode)) {
9295 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9296 return -EBUSY;
9297 }
9298
9299 /* don't allow if an upper device already has a program */
9300 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9301 if (dev_xdp_prog_count(upper) > 0) {
9302 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9303 return -EEXIST;
9304 }
9305 }
9306
9307 cur_prog = dev_xdp_prog(dev, mode);
9308 /* can't replace attached prog with link */
9309 if (link && cur_prog) {
9310 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9311 return -EBUSY;
9312 }
9313 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9314 NL_SET_ERR_MSG(extack, "Active program does not match expected");
9315 return -EEXIST;
9316 }
9317
9318 /* put effective new program into new_prog */
9319 if (link)
9320 new_prog = link->link.prog;
9321
9322 if (new_prog) {
9323 bool offload = mode == XDP_MODE_HW;
9324 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9325 ? XDP_MODE_DRV : XDP_MODE_SKB;
9326
9327 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9328 NL_SET_ERR_MSG(extack, "XDP program already attached");
9329 return -EBUSY;
9330 }
9331 if (!offload && dev_xdp_prog(dev, other_mode)) {
9332 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9333 return -EEXIST;
9334 }
9335 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
9336 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
9337 return -EINVAL;
9338 }
9339 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
9340 NL_SET_ERR_MSG(extack, "Program bound to different device");
9341 return -EINVAL;
9342 }
9343 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9344 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9345 return -EINVAL;
9346 }
9347 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9348 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9349 return -EINVAL;
9350 }
9351 }
9352
9353 /* don't call drivers if the effective program didn't change */
9354 if (new_prog != cur_prog) {
9355 bpf_op = dev_xdp_bpf_op(dev, mode);
9356 if (!bpf_op) {
9357 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9358 return -EOPNOTSUPP;
9359 }
9360
9361 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9362 if (err)
9363 return err;
9364 }
9365
9366 if (link)
9367 dev_xdp_set_link(dev, mode, link);
9368 else
9369 dev_xdp_set_prog(dev, mode, new_prog);
9370 if (cur_prog)
9371 bpf_prog_put(cur_prog);
9372
9373 return 0;
9374 }
9375
dev_xdp_attach_link(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link)9376 static int dev_xdp_attach_link(struct net_device *dev,
9377 struct netlink_ext_ack *extack,
9378 struct bpf_xdp_link *link)
9379 {
9380 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9381 }
9382
dev_xdp_detach_link(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link)9383 static int dev_xdp_detach_link(struct net_device *dev,
9384 struct netlink_ext_ack *extack,
9385 struct bpf_xdp_link *link)
9386 {
9387 enum bpf_xdp_mode mode;
9388 bpf_op_t bpf_op;
9389
9390 ASSERT_RTNL();
9391
9392 mode = dev_xdp_mode(dev, link->flags);
9393 if (dev_xdp_link(dev, mode) != link)
9394 return -EINVAL;
9395
9396 bpf_op = dev_xdp_bpf_op(dev, mode);
9397 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9398 dev_xdp_set_link(dev, mode, NULL);
9399 return 0;
9400 }
9401
bpf_xdp_link_release(struct bpf_link * link)9402 static void bpf_xdp_link_release(struct bpf_link *link)
9403 {
9404 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9405
9406 rtnl_lock();
9407
9408 /* if racing with net_device's tear down, xdp_link->dev might be
9409 * already NULL, in which case link was already auto-detached
9410 */
9411 if (xdp_link->dev) {
9412 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9413 xdp_link->dev = NULL;
9414 }
9415
9416 rtnl_unlock();
9417 }
9418
bpf_xdp_link_detach(struct bpf_link * link)9419 static int bpf_xdp_link_detach(struct bpf_link *link)
9420 {
9421 bpf_xdp_link_release(link);
9422 return 0;
9423 }
9424
bpf_xdp_link_dealloc(struct bpf_link * link)9425 static void bpf_xdp_link_dealloc(struct bpf_link *link)
9426 {
9427 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9428
9429 kfree(xdp_link);
9430 }
9431
bpf_xdp_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)9432 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9433 struct seq_file *seq)
9434 {
9435 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9436 u32 ifindex = 0;
9437
9438 rtnl_lock();
9439 if (xdp_link->dev)
9440 ifindex = xdp_link->dev->ifindex;
9441 rtnl_unlock();
9442
9443 seq_printf(seq, "ifindex:\t%u\n", ifindex);
9444 }
9445
bpf_xdp_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)9446 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9447 struct bpf_link_info *info)
9448 {
9449 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9450 u32 ifindex = 0;
9451
9452 rtnl_lock();
9453 if (xdp_link->dev)
9454 ifindex = xdp_link->dev->ifindex;
9455 rtnl_unlock();
9456
9457 info->xdp.ifindex = ifindex;
9458 return 0;
9459 }
9460
bpf_xdp_link_update(struct bpf_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog)9461 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9462 struct bpf_prog *old_prog)
9463 {
9464 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9465 enum bpf_xdp_mode mode;
9466 bpf_op_t bpf_op;
9467 int err = 0;
9468
9469 rtnl_lock();
9470
9471 /* link might have been auto-released already, so fail */
9472 if (!xdp_link->dev) {
9473 err = -ENOLINK;
9474 goto out_unlock;
9475 }
9476
9477 if (old_prog && link->prog != old_prog) {
9478 err = -EPERM;
9479 goto out_unlock;
9480 }
9481 old_prog = link->prog;
9482 if (old_prog->type != new_prog->type ||
9483 old_prog->expected_attach_type != new_prog->expected_attach_type) {
9484 err = -EINVAL;
9485 goto out_unlock;
9486 }
9487
9488 if (old_prog == new_prog) {
9489 /* no-op, don't disturb drivers */
9490 bpf_prog_put(new_prog);
9491 goto out_unlock;
9492 }
9493
9494 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9495 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9496 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9497 xdp_link->flags, new_prog);
9498 if (err)
9499 goto out_unlock;
9500
9501 old_prog = xchg(&link->prog, new_prog);
9502 bpf_prog_put(old_prog);
9503
9504 out_unlock:
9505 rtnl_unlock();
9506 return err;
9507 }
9508
9509 static const struct bpf_link_ops bpf_xdp_link_lops = {
9510 .release = bpf_xdp_link_release,
9511 .dealloc = bpf_xdp_link_dealloc,
9512 .detach = bpf_xdp_link_detach,
9513 .show_fdinfo = bpf_xdp_link_show_fdinfo,
9514 .fill_link_info = bpf_xdp_link_fill_link_info,
9515 .update_prog = bpf_xdp_link_update,
9516 };
9517
bpf_xdp_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)9518 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9519 {
9520 struct net *net = current->nsproxy->net_ns;
9521 struct bpf_link_primer link_primer;
9522 struct netlink_ext_ack extack = {};
9523 struct bpf_xdp_link *link;
9524 struct net_device *dev;
9525 int err, fd;
9526
9527 rtnl_lock();
9528 dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9529 if (!dev) {
9530 rtnl_unlock();
9531 return -EINVAL;
9532 }
9533
9534 link = kzalloc(sizeof(*link), GFP_USER);
9535 if (!link) {
9536 err = -ENOMEM;
9537 goto unlock;
9538 }
9539
9540 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9541 link->dev = dev;
9542 link->flags = attr->link_create.flags;
9543
9544 err = bpf_link_prime(&link->link, &link_primer);
9545 if (err) {
9546 kfree(link);
9547 goto unlock;
9548 }
9549
9550 err = dev_xdp_attach_link(dev, &extack, link);
9551 rtnl_unlock();
9552
9553 if (err) {
9554 link->dev = NULL;
9555 bpf_link_cleanup(&link_primer);
9556 trace_bpf_xdp_link_attach_failed(extack._msg);
9557 goto out_put_dev;
9558 }
9559
9560 fd = bpf_link_settle(&link_primer);
9561 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9562 dev_put(dev);
9563 return fd;
9564
9565 unlock:
9566 rtnl_unlock();
9567
9568 out_put_dev:
9569 dev_put(dev);
9570 return err;
9571 }
9572
9573 /**
9574 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9575 * @dev: device
9576 * @extack: netlink extended ack
9577 * @fd: new program fd or negative value to clear
9578 * @expected_fd: old program fd that userspace expects to replace or clear
9579 * @flags: xdp-related flags
9580 *
9581 * Set or clear a bpf program for a device
9582 */
dev_change_xdp_fd(struct net_device * dev,struct netlink_ext_ack * extack,int fd,int expected_fd,u32 flags)9583 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9584 int fd, int expected_fd, u32 flags)
9585 {
9586 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9587 struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9588 int err;
9589
9590 ASSERT_RTNL();
9591
9592 if (fd >= 0) {
9593 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9594 mode != XDP_MODE_SKB);
9595 if (IS_ERR(new_prog))
9596 return PTR_ERR(new_prog);
9597 }
9598
9599 if (expected_fd >= 0) {
9600 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9601 mode != XDP_MODE_SKB);
9602 if (IS_ERR(old_prog)) {
9603 err = PTR_ERR(old_prog);
9604 old_prog = NULL;
9605 goto err_out;
9606 }
9607 }
9608
9609 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9610
9611 err_out:
9612 if (err && new_prog)
9613 bpf_prog_put(new_prog);
9614 if (old_prog)
9615 bpf_prog_put(old_prog);
9616 return err;
9617 }
9618
9619 /**
9620 * dev_index_reserve() - allocate an ifindex in a namespace
9621 * @net: the applicable net namespace
9622 * @ifindex: requested ifindex, pass %0 to get one allocated
9623 *
9624 * Allocate a ifindex for a new device. Caller must either use the ifindex
9625 * to store the device (via list_netdevice()) or call dev_index_release()
9626 * to give the index up.
9627 *
9628 * Return: a suitable unique value for a new device interface number or -errno.
9629 */
dev_index_reserve(struct net * net,u32 ifindex)9630 static int dev_index_reserve(struct net *net, u32 ifindex)
9631 {
9632 int err;
9633
9634 if (ifindex > INT_MAX) {
9635 DEBUG_NET_WARN_ON_ONCE(1);
9636 return -EINVAL;
9637 }
9638
9639 if (!ifindex)
9640 err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
9641 xa_limit_31b, &net->ifindex, GFP_KERNEL);
9642 else
9643 err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
9644 if (err < 0)
9645 return err;
9646
9647 return ifindex;
9648 }
9649
dev_index_release(struct net * net,int ifindex)9650 static void dev_index_release(struct net *net, int ifindex)
9651 {
9652 /* Expect only unused indexes, unlist_netdevice() removes the used */
9653 WARN_ON(xa_erase(&net->dev_by_index, ifindex));
9654 }
9655
9656 /* Delayed registration/unregisteration */
9657 LIST_HEAD(net_todo_list);
9658 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9659
net_set_todo(struct net_device * dev)9660 static void net_set_todo(struct net_device *dev)
9661 {
9662 list_add_tail(&dev->todo_list, &net_todo_list);
9663 atomic_inc(&dev_net(dev)->dev_unreg_count);
9664 }
9665
netdev_sync_upper_features(struct net_device * lower,struct net_device * upper,netdev_features_t features)9666 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9667 struct net_device *upper, netdev_features_t features)
9668 {
9669 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9670 netdev_features_t feature;
9671 int feature_bit;
9672
9673 for_each_netdev_feature(upper_disables, feature_bit) {
9674 feature = __NETIF_F_BIT(feature_bit);
9675 if (!(upper->wanted_features & feature)
9676 && (features & feature)) {
9677 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9678 &feature, upper->name);
9679 features &= ~feature;
9680 }
9681 }
9682
9683 return features;
9684 }
9685
netdev_sync_lower_features(struct net_device * upper,struct net_device * lower,netdev_features_t features)9686 static void netdev_sync_lower_features(struct net_device *upper,
9687 struct net_device *lower, netdev_features_t features)
9688 {
9689 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9690 netdev_features_t feature;
9691 int feature_bit;
9692
9693 for_each_netdev_feature(upper_disables, feature_bit) {
9694 feature = __NETIF_F_BIT(feature_bit);
9695 if (!(features & feature) && (lower->features & feature)) {
9696 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9697 &feature, lower->name);
9698 lower->wanted_features &= ~feature;
9699 __netdev_update_features(lower);
9700
9701 if (unlikely(lower->features & feature))
9702 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9703 &feature, lower->name);
9704 else
9705 netdev_features_change(lower);
9706 }
9707 }
9708 }
9709
netdev_fix_features(struct net_device * dev,netdev_features_t features)9710 static netdev_features_t netdev_fix_features(struct net_device *dev,
9711 netdev_features_t features)
9712 {
9713 /* Fix illegal checksum combinations */
9714 if ((features & NETIF_F_HW_CSUM) &&
9715 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9716 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9717 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9718 }
9719
9720 /* TSO requires that SG is present as well. */
9721 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9722 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9723 features &= ~NETIF_F_ALL_TSO;
9724 }
9725
9726 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9727 !(features & NETIF_F_IP_CSUM)) {
9728 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9729 features &= ~NETIF_F_TSO;
9730 features &= ~NETIF_F_TSO_ECN;
9731 }
9732
9733 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9734 !(features & NETIF_F_IPV6_CSUM)) {
9735 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9736 features &= ~NETIF_F_TSO6;
9737 }
9738
9739 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9740 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9741 features &= ~NETIF_F_TSO_MANGLEID;
9742
9743 /* TSO ECN requires that TSO is present as well. */
9744 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9745 features &= ~NETIF_F_TSO_ECN;
9746
9747 /* Software GSO depends on SG. */
9748 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9749 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9750 features &= ~NETIF_F_GSO;
9751 }
9752
9753 /* GSO partial features require GSO partial be set */
9754 if ((features & dev->gso_partial_features) &&
9755 !(features & NETIF_F_GSO_PARTIAL)) {
9756 netdev_dbg(dev,
9757 "Dropping partially supported GSO features since no GSO partial.\n");
9758 features &= ~dev->gso_partial_features;
9759 }
9760
9761 if (!(features & NETIF_F_RXCSUM)) {
9762 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9763 * successfully merged by hardware must also have the
9764 * checksum verified by hardware. If the user does not
9765 * want to enable RXCSUM, logically, we should disable GRO_HW.
9766 */
9767 if (features & NETIF_F_GRO_HW) {
9768 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9769 features &= ~NETIF_F_GRO_HW;
9770 }
9771 }
9772
9773 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9774 if (features & NETIF_F_RXFCS) {
9775 if (features & NETIF_F_LRO) {
9776 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9777 features &= ~NETIF_F_LRO;
9778 }
9779
9780 if (features & NETIF_F_GRO_HW) {
9781 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9782 features &= ~NETIF_F_GRO_HW;
9783 }
9784 }
9785
9786 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
9787 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9788 features &= ~NETIF_F_LRO;
9789 }
9790
9791 if (features & NETIF_F_HW_TLS_TX) {
9792 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9793 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9794 bool hw_csum = features & NETIF_F_HW_CSUM;
9795
9796 if (!ip_csum && !hw_csum) {
9797 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9798 features &= ~NETIF_F_HW_TLS_TX;
9799 }
9800 }
9801
9802 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9803 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9804 features &= ~NETIF_F_HW_TLS_RX;
9805 }
9806
9807 return features;
9808 }
9809
__netdev_update_features(struct net_device * dev)9810 int __netdev_update_features(struct net_device *dev)
9811 {
9812 struct net_device *upper, *lower;
9813 netdev_features_t features;
9814 struct list_head *iter;
9815 int err = -1;
9816
9817 ASSERT_RTNL();
9818
9819 features = netdev_get_wanted_features(dev);
9820
9821 if (dev->netdev_ops->ndo_fix_features)
9822 features = dev->netdev_ops->ndo_fix_features(dev, features);
9823
9824 /* driver might be less strict about feature dependencies */
9825 features = netdev_fix_features(dev, features);
9826
9827 /* some features can't be enabled if they're off on an upper device */
9828 netdev_for_each_upper_dev_rcu(dev, upper, iter)
9829 features = netdev_sync_upper_features(dev, upper, features);
9830
9831 if (dev->features == features)
9832 goto sync_lower;
9833
9834 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9835 &dev->features, &features);
9836
9837 if (dev->netdev_ops->ndo_set_features)
9838 err = dev->netdev_ops->ndo_set_features(dev, features);
9839 else
9840 err = 0;
9841
9842 if (unlikely(err < 0)) {
9843 netdev_err(dev,
9844 "set_features() failed (%d); wanted %pNF, left %pNF\n",
9845 err, &features, &dev->features);
9846 /* return non-0 since some features might have changed and
9847 * it's better to fire a spurious notification than miss it
9848 */
9849 return -1;
9850 }
9851
9852 sync_lower:
9853 /* some features must be disabled on lower devices when disabled
9854 * on an upper device (think: bonding master or bridge)
9855 */
9856 netdev_for_each_lower_dev(dev, lower, iter)
9857 netdev_sync_lower_features(dev, lower, features);
9858
9859 if (!err) {
9860 netdev_features_t diff = features ^ dev->features;
9861
9862 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9863 /* udp_tunnel_{get,drop}_rx_info both need
9864 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9865 * device, or they won't do anything.
9866 * Thus we need to update dev->features
9867 * *before* calling udp_tunnel_get_rx_info,
9868 * but *after* calling udp_tunnel_drop_rx_info.
9869 */
9870 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9871 dev->features = features;
9872 udp_tunnel_get_rx_info(dev);
9873 } else {
9874 udp_tunnel_drop_rx_info(dev);
9875 }
9876 }
9877
9878 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9879 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9880 dev->features = features;
9881 err |= vlan_get_rx_ctag_filter_info(dev);
9882 } else {
9883 vlan_drop_rx_ctag_filter_info(dev);
9884 }
9885 }
9886
9887 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9888 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9889 dev->features = features;
9890 err |= vlan_get_rx_stag_filter_info(dev);
9891 } else {
9892 vlan_drop_rx_stag_filter_info(dev);
9893 }
9894 }
9895
9896 dev->features = features;
9897 }
9898
9899 return err < 0 ? 0 : 1;
9900 }
9901
9902 /**
9903 * netdev_update_features - recalculate device features
9904 * @dev: the device to check
9905 *
9906 * Recalculate dev->features set and send notifications if it
9907 * has changed. Should be called after driver or hardware dependent
9908 * conditions might have changed that influence the features.
9909 */
netdev_update_features(struct net_device * dev)9910 void netdev_update_features(struct net_device *dev)
9911 {
9912 if (__netdev_update_features(dev))
9913 netdev_features_change(dev);
9914 }
9915 EXPORT_SYMBOL(netdev_update_features);
9916
9917 /**
9918 * netdev_change_features - recalculate device features
9919 * @dev: the device to check
9920 *
9921 * Recalculate dev->features set and send notifications even
9922 * if they have not changed. Should be called instead of
9923 * netdev_update_features() if also dev->vlan_features might
9924 * have changed to allow the changes to be propagated to stacked
9925 * VLAN devices.
9926 */
netdev_change_features(struct net_device * dev)9927 void netdev_change_features(struct net_device *dev)
9928 {
9929 __netdev_update_features(dev);
9930 netdev_features_change(dev);
9931 }
9932 EXPORT_SYMBOL(netdev_change_features);
9933
9934 /**
9935 * netif_stacked_transfer_operstate - transfer operstate
9936 * @rootdev: the root or lower level device to transfer state from
9937 * @dev: the device to transfer operstate to
9938 *
9939 * Transfer operational state from root to device. This is normally
9940 * called when a stacking relationship exists between the root
9941 * device and the device(a leaf device).
9942 */
netif_stacked_transfer_operstate(const struct net_device * rootdev,struct net_device * dev)9943 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9944 struct net_device *dev)
9945 {
9946 if (rootdev->operstate == IF_OPER_DORMANT)
9947 netif_dormant_on(dev);
9948 else
9949 netif_dormant_off(dev);
9950
9951 if (rootdev->operstate == IF_OPER_TESTING)
9952 netif_testing_on(dev);
9953 else
9954 netif_testing_off(dev);
9955
9956 if (netif_carrier_ok(rootdev))
9957 netif_carrier_on(dev);
9958 else
9959 netif_carrier_off(dev);
9960 }
9961 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9962
netif_alloc_rx_queues(struct net_device * dev)9963 static int netif_alloc_rx_queues(struct net_device *dev)
9964 {
9965 unsigned int i, count = dev->num_rx_queues;
9966 struct netdev_rx_queue *rx;
9967 size_t sz = count * sizeof(*rx);
9968 int err = 0;
9969
9970 BUG_ON(count < 1);
9971
9972 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9973 if (!rx)
9974 return -ENOMEM;
9975
9976 dev->_rx = rx;
9977
9978 for (i = 0; i < count; i++) {
9979 rx[i].dev = dev;
9980
9981 /* XDP RX-queue setup */
9982 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
9983 if (err < 0)
9984 goto err_rxq_info;
9985 }
9986 return 0;
9987
9988 err_rxq_info:
9989 /* Rollback successful reg's and free other resources */
9990 while (i--)
9991 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9992 kvfree(dev->_rx);
9993 dev->_rx = NULL;
9994 return err;
9995 }
9996
netif_free_rx_queues(struct net_device * dev)9997 static void netif_free_rx_queues(struct net_device *dev)
9998 {
9999 unsigned int i, count = dev->num_rx_queues;
10000
10001 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10002 if (!dev->_rx)
10003 return;
10004
10005 for (i = 0; i < count; i++)
10006 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10007
10008 kvfree(dev->_rx);
10009 }
10010
netdev_init_one_queue(struct net_device * dev,struct netdev_queue * queue,void * _unused)10011 static void netdev_init_one_queue(struct net_device *dev,
10012 struct netdev_queue *queue, void *_unused)
10013 {
10014 /* Initialize queue lock */
10015 spin_lock_init(&queue->_xmit_lock);
10016 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10017 queue->xmit_lock_owner = -1;
10018 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10019 queue->dev = dev;
10020 #ifdef CONFIG_BQL
10021 dql_init(&queue->dql, HZ);
10022 #endif
10023 }
10024
netif_free_tx_queues(struct net_device * dev)10025 static void netif_free_tx_queues(struct net_device *dev)
10026 {
10027 kvfree(dev->_tx);
10028 }
10029
netif_alloc_netdev_queues(struct net_device * dev)10030 static int netif_alloc_netdev_queues(struct net_device *dev)
10031 {
10032 unsigned int count = dev->num_tx_queues;
10033 struct netdev_queue *tx;
10034 size_t sz = count * sizeof(*tx);
10035
10036 if (count < 1 || count > 0xffff)
10037 return -EINVAL;
10038
10039 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10040 if (!tx)
10041 return -ENOMEM;
10042
10043 dev->_tx = tx;
10044
10045 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10046 spin_lock_init(&dev->tx_global_lock);
10047
10048 return 0;
10049 }
10050
netif_tx_stop_all_queues(struct net_device * dev)10051 void netif_tx_stop_all_queues(struct net_device *dev)
10052 {
10053 unsigned int i;
10054
10055 for (i = 0; i < dev->num_tx_queues; i++) {
10056 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10057
10058 netif_tx_stop_queue(txq);
10059 }
10060 }
10061 EXPORT_SYMBOL(netif_tx_stop_all_queues);
10062
netdev_do_alloc_pcpu_stats(struct net_device * dev)10063 static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
10064 {
10065 void __percpu *v;
10066
10067 /* Drivers implementing ndo_get_peer_dev must support tstat
10068 * accounting, so that skb_do_redirect() can bump the dev's
10069 * RX stats upon network namespace switch.
10070 */
10071 if (dev->netdev_ops->ndo_get_peer_dev &&
10072 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
10073 return -EOPNOTSUPP;
10074
10075 switch (dev->pcpu_stat_type) {
10076 case NETDEV_PCPU_STAT_NONE:
10077 return 0;
10078 case NETDEV_PCPU_STAT_LSTATS:
10079 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
10080 break;
10081 case NETDEV_PCPU_STAT_TSTATS:
10082 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
10083 break;
10084 case NETDEV_PCPU_STAT_DSTATS:
10085 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
10086 break;
10087 default:
10088 return -EINVAL;
10089 }
10090
10091 return v ? 0 : -ENOMEM;
10092 }
10093
netdev_do_free_pcpu_stats(struct net_device * dev)10094 static void netdev_do_free_pcpu_stats(struct net_device *dev)
10095 {
10096 switch (dev->pcpu_stat_type) {
10097 case NETDEV_PCPU_STAT_NONE:
10098 return;
10099 case NETDEV_PCPU_STAT_LSTATS:
10100 free_percpu(dev->lstats);
10101 break;
10102 case NETDEV_PCPU_STAT_TSTATS:
10103 free_percpu(dev->tstats);
10104 break;
10105 case NETDEV_PCPU_STAT_DSTATS:
10106 free_percpu(dev->dstats);
10107 break;
10108 }
10109 }
10110
10111 /**
10112 * register_netdevice() - register a network device
10113 * @dev: device to register
10114 *
10115 * Take a prepared network device structure and make it externally accessible.
10116 * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10117 * Callers must hold the rtnl lock - you may want register_netdev()
10118 * instead of this.
10119 */
register_netdevice(struct net_device * dev)10120 int register_netdevice(struct net_device *dev)
10121 {
10122 int ret;
10123 struct net *net = dev_net(dev);
10124
10125 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10126 NETDEV_FEATURE_COUNT);
10127 BUG_ON(dev_boot_phase);
10128 ASSERT_RTNL();
10129
10130 might_sleep();
10131
10132 /* When net_device's are persistent, this will be fatal. */
10133 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10134 BUG_ON(!net);
10135
10136 ret = ethtool_check_ops(dev->ethtool_ops);
10137 if (ret)
10138 return ret;
10139
10140 spin_lock_init(&dev->addr_list_lock);
10141 netdev_set_addr_lockdep_class(dev);
10142
10143 ret = dev_get_valid_name(net, dev, dev->name);
10144 if (ret < 0)
10145 goto out;
10146
10147 ret = -ENOMEM;
10148 dev->name_node = netdev_name_node_head_alloc(dev);
10149 if (!dev->name_node)
10150 goto out;
10151
10152 /* Init, if this function is available */
10153 if (dev->netdev_ops->ndo_init) {
10154 ret = dev->netdev_ops->ndo_init(dev);
10155 if (ret) {
10156 if (ret > 0)
10157 ret = -EIO;
10158 goto err_free_name;
10159 }
10160 }
10161
10162 if (((dev->hw_features | dev->features) &
10163 NETIF_F_HW_VLAN_CTAG_FILTER) &&
10164 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10165 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10166 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10167 ret = -EINVAL;
10168 goto err_uninit;
10169 }
10170
10171 ret = netdev_do_alloc_pcpu_stats(dev);
10172 if (ret)
10173 goto err_uninit;
10174
10175 ret = dev_index_reserve(net, dev->ifindex);
10176 if (ret < 0)
10177 goto err_free_pcpu;
10178 dev->ifindex = ret;
10179
10180 /* Transfer changeable features to wanted_features and enable
10181 * software offloads (GSO and GRO).
10182 */
10183 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10184 dev->features |= NETIF_F_SOFT_FEATURES;
10185
10186 if (dev->udp_tunnel_nic_info) {
10187 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10188 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10189 }
10190
10191 dev->wanted_features = dev->features & dev->hw_features;
10192
10193 if (!(dev->flags & IFF_LOOPBACK))
10194 dev->hw_features |= NETIF_F_NOCACHE_COPY;
10195
10196 /* If IPv4 TCP segmentation offload is supported we should also
10197 * allow the device to enable segmenting the frame with the option
10198 * of ignoring a static IP ID value. This doesn't enable the
10199 * feature itself but allows the user to enable it later.
10200 */
10201 if (dev->hw_features & NETIF_F_TSO)
10202 dev->hw_features |= NETIF_F_TSO_MANGLEID;
10203 if (dev->vlan_features & NETIF_F_TSO)
10204 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10205 if (dev->mpls_features & NETIF_F_TSO)
10206 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10207 if (dev->hw_enc_features & NETIF_F_TSO)
10208 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10209
10210 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10211 */
10212 dev->vlan_features |= NETIF_F_HIGHDMA;
10213
10214 /* Make NETIF_F_SG inheritable to tunnel devices.
10215 */
10216 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10217
10218 /* Make NETIF_F_SG inheritable to MPLS.
10219 */
10220 dev->mpls_features |= NETIF_F_SG;
10221
10222 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10223 ret = notifier_to_errno(ret);
10224 if (ret)
10225 goto err_ifindex_release;
10226
10227 ret = netdev_register_kobject(dev);
10228 write_lock(&dev_base_lock);
10229 dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
10230 write_unlock(&dev_base_lock);
10231 if (ret)
10232 goto err_uninit_notify;
10233
10234 __netdev_update_features(dev);
10235
10236 /*
10237 * Default initial state at registry is that the
10238 * device is present.
10239 */
10240
10241 set_bit(__LINK_STATE_PRESENT, &dev->state);
10242
10243 linkwatch_init_dev(dev);
10244
10245 dev_init_scheduler(dev);
10246
10247 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10248 list_netdevice(dev);
10249
10250 add_device_randomness(dev->dev_addr, dev->addr_len);
10251
10252 /* If the device has permanent device address, driver should
10253 * set dev_addr and also addr_assign_type should be set to
10254 * NET_ADDR_PERM (default value).
10255 */
10256 if (dev->addr_assign_type == NET_ADDR_PERM)
10257 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10258
10259 /* Notify protocols, that a new device appeared. */
10260 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10261 ret = notifier_to_errno(ret);
10262 if (ret) {
10263 /* Expect explicit free_netdev() on failure */
10264 dev->needs_free_netdev = false;
10265 unregister_netdevice_queue(dev, NULL);
10266 goto out;
10267 }
10268 /*
10269 * Prevent userspace races by waiting until the network
10270 * device is fully setup before sending notifications.
10271 */
10272 if (!dev->rtnl_link_ops ||
10273 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10274 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
10275
10276 out:
10277 return ret;
10278
10279 err_uninit_notify:
10280 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10281 err_ifindex_release:
10282 dev_index_release(net, dev->ifindex);
10283 err_free_pcpu:
10284 netdev_do_free_pcpu_stats(dev);
10285 err_uninit:
10286 if (dev->netdev_ops->ndo_uninit)
10287 dev->netdev_ops->ndo_uninit(dev);
10288 if (dev->priv_destructor)
10289 dev->priv_destructor(dev);
10290 err_free_name:
10291 netdev_name_node_free(dev->name_node);
10292 goto out;
10293 }
10294 EXPORT_SYMBOL(register_netdevice);
10295
10296 /**
10297 * init_dummy_netdev - init a dummy network device for NAPI
10298 * @dev: device to init
10299 *
10300 * This takes a network device structure and initialize the minimum
10301 * amount of fields so it can be used to schedule NAPI polls without
10302 * registering a full blown interface. This is to be used by drivers
10303 * that need to tie several hardware interfaces to a single NAPI
10304 * poll scheduler due to HW limitations.
10305 */
init_dummy_netdev(struct net_device * dev)10306 int init_dummy_netdev(struct net_device *dev)
10307 {
10308 /* Clear everything. Note we don't initialize spinlocks
10309 * are they aren't supposed to be taken by any of the
10310 * NAPI code and this dummy netdev is supposed to be
10311 * only ever used for NAPI polls
10312 */
10313 memset(dev, 0, sizeof(struct net_device));
10314
10315 /* make sure we BUG if trying to hit standard
10316 * register/unregister code path
10317 */
10318 dev->reg_state = NETREG_DUMMY;
10319
10320 /* NAPI wants this */
10321 INIT_LIST_HEAD(&dev->napi_list);
10322
10323 /* a dummy interface is started by default */
10324 set_bit(__LINK_STATE_PRESENT, &dev->state);
10325 set_bit(__LINK_STATE_START, &dev->state);
10326
10327 /* napi_busy_loop stats accounting wants this */
10328 dev_net_set(dev, &init_net);
10329
10330 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10331 * because users of this 'device' dont need to change
10332 * its refcount.
10333 */
10334
10335 return 0;
10336 }
10337 EXPORT_SYMBOL_GPL(init_dummy_netdev);
10338
10339
10340 /**
10341 * register_netdev - register a network device
10342 * @dev: device to register
10343 *
10344 * Take a completed network device structure and add it to the kernel
10345 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10346 * chain. 0 is returned on success. A negative errno code is returned
10347 * on a failure to set up the device, or if the name is a duplicate.
10348 *
10349 * This is a wrapper around register_netdevice that takes the rtnl semaphore
10350 * and expands the device name if you passed a format string to
10351 * alloc_netdev.
10352 */
register_netdev(struct net_device * dev)10353 int register_netdev(struct net_device *dev)
10354 {
10355 int err;
10356
10357 if (rtnl_lock_killable())
10358 return -EINTR;
10359 err = register_netdevice(dev);
10360 rtnl_unlock();
10361 return err;
10362 }
10363 EXPORT_SYMBOL(register_netdev);
10364
netdev_refcnt_read(const struct net_device * dev)10365 int netdev_refcnt_read(const struct net_device *dev)
10366 {
10367 #ifdef CONFIG_PCPU_DEV_REFCNT
10368 int i, refcnt = 0;
10369
10370 for_each_possible_cpu(i)
10371 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10372 return refcnt;
10373 #else
10374 return refcount_read(&dev->dev_refcnt);
10375 #endif
10376 }
10377 EXPORT_SYMBOL(netdev_refcnt_read);
10378
10379 int netdev_unregister_timeout_secs __read_mostly = 10;
10380
10381 #define WAIT_REFS_MIN_MSECS 1
10382 #define WAIT_REFS_MAX_MSECS 250
10383 /**
10384 * netdev_wait_allrefs_any - wait until all references are gone.
10385 * @list: list of net_devices to wait on
10386 *
10387 * This is called when unregistering network devices.
10388 *
10389 * Any protocol or device that holds a reference should register
10390 * for netdevice notification, and cleanup and put back the
10391 * reference if they receive an UNREGISTER event.
10392 * We can get stuck here if buggy protocols don't correctly
10393 * call dev_put.
10394 */
netdev_wait_allrefs_any(struct list_head * list)10395 static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
10396 {
10397 unsigned long rebroadcast_time, warning_time;
10398 struct net_device *dev;
10399 int wait = 0;
10400
10401 rebroadcast_time = warning_time = jiffies;
10402
10403 list_for_each_entry(dev, list, todo_list)
10404 if (netdev_refcnt_read(dev) == 1)
10405 return dev;
10406
10407 while (true) {
10408 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10409 rtnl_lock();
10410
10411 /* Rebroadcast unregister notification */
10412 list_for_each_entry(dev, list, todo_list)
10413 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10414
10415 __rtnl_unlock();
10416 rcu_barrier();
10417 rtnl_lock();
10418
10419 list_for_each_entry(dev, list, todo_list)
10420 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10421 &dev->state)) {
10422 /* We must not have linkwatch events
10423 * pending on unregister. If this
10424 * happens, we simply run the queue
10425 * unscheduled, resulting in a noop
10426 * for this device.
10427 */
10428 linkwatch_run_queue();
10429 break;
10430 }
10431
10432 __rtnl_unlock();
10433
10434 rebroadcast_time = jiffies;
10435 }
10436
10437 rcu_barrier();
10438
10439 if (!wait) {
10440 wait = WAIT_REFS_MIN_MSECS;
10441 } else {
10442 msleep(wait);
10443 wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10444 }
10445
10446 list_for_each_entry(dev, list, todo_list)
10447 if (netdev_refcnt_read(dev) == 1)
10448 return dev;
10449
10450 if (time_after(jiffies, warning_time +
10451 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
10452 list_for_each_entry(dev, list, todo_list) {
10453 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10454 dev->name, netdev_refcnt_read(dev));
10455 ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10456 }
10457
10458 warning_time = jiffies;
10459 }
10460 }
10461 }
10462
10463 /* The sequence is:
10464 *
10465 * rtnl_lock();
10466 * ...
10467 * register_netdevice(x1);
10468 * register_netdevice(x2);
10469 * ...
10470 * unregister_netdevice(y1);
10471 * unregister_netdevice(y2);
10472 * ...
10473 * rtnl_unlock();
10474 * free_netdev(y1);
10475 * free_netdev(y2);
10476 *
10477 * We are invoked by rtnl_unlock().
10478 * This allows us to deal with problems:
10479 * 1) We can delete sysfs objects which invoke hotplug
10480 * without deadlocking with linkwatch via keventd.
10481 * 2) Since we run with the RTNL semaphore not held, we can sleep
10482 * safely in order to wait for the netdev refcnt to drop to zero.
10483 *
10484 * We must not return until all unregister events added during
10485 * the interval the lock was held have been completed.
10486 */
netdev_run_todo(void)10487 void netdev_run_todo(void)
10488 {
10489 struct net_device *dev, *tmp;
10490 struct list_head list;
10491 #ifdef CONFIG_LOCKDEP
10492 struct list_head unlink_list;
10493
10494 list_replace_init(&net_unlink_list, &unlink_list);
10495
10496 while (!list_empty(&unlink_list)) {
10497 struct net_device *dev = list_first_entry(&unlink_list,
10498 struct net_device,
10499 unlink_list);
10500 list_del_init(&dev->unlink_list);
10501 dev->nested_level = dev->lower_level - 1;
10502 }
10503 #endif
10504
10505 /* Snapshot list, allow later requests */
10506 list_replace_init(&net_todo_list, &list);
10507
10508 __rtnl_unlock();
10509
10510 /* Wait for rcu callbacks to finish before next phase */
10511 if (!list_empty(&list))
10512 rcu_barrier();
10513
10514 list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10515 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10516 netdev_WARN(dev, "run_todo but not unregistering\n");
10517 list_del(&dev->todo_list);
10518 continue;
10519 }
10520
10521 write_lock(&dev_base_lock);
10522 dev->reg_state = NETREG_UNREGISTERED;
10523 write_unlock(&dev_base_lock);
10524 linkwatch_forget_dev(dev);
10525 }
10526
10527 while (!list_empty(&list)) {
10528 dev = netdev_wait_allrefs_any(&list);
10529 list_del(&dev->todo_list);
10530
10531 /* paranoia */
10532 BUG_ON(netdev_refcnt_read(dev) != 1);
10533 BUG_ON(!list_empty(&dev->ptype_all));
10534 BUG_ON(!list_empty(&dev->ptype_specific));
10535 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10536 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10537
10538 netdev_do_free_pcpu_stats(dev);
10539 if (dev->priv_destructor)
10540 dev->priv_destructor(dev);
10541 if (dev->needs_free_netdev)
10542 free_netdev(dev);
10543
10544 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
10545 wake_up(&netdev_unregistering_wq);
10546
10547 /* Free network device */
10548 kobject_put(&dev->dev.kobj);
10549 }
10550 }
10551
10552 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10553 * all the same fields in the same order as net_device_stats, with only
10554 * the type differing, but rtnl_link_stats64 may have additional fields
10555 * at the end for newer counters.
10556 */
netdev_stats_to_stats64(struct rtnl_link_stats64 * stats64,const struct net_device_stats * netdev_stats)10557 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10558 const struct net_device_stats *netdev_stats)
10559 {
10560 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
10561 const atomic_long_t *src = (atomic_long_t *)netdev_stats;
10562 u64 *dst = (u64 *)stats64;
10563
10564 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10565 for (i = 0; i < n; i++)
10566 dst[i] = (unsigned long)atomic_long_read(&src[i]);
10567 /* zero out counters that only exist in rtnl_link_stats64 */
10568 memset((char *)stats64 + n * sizeof(u64), 0,
10569 sizeof(*stats64) - n * sizeof(u64));
10570 }
10571 EXPORT_SYMBOL(netdev_stats_to_stats64);
10572
netdev_core_stats_alloc(struct net_device * dev)10573 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
10574 {
10575 struct net_device_core_stats __percpu *p;
10576
10577 p = alloc_percpu_gfp(struct net_device_core_stats,
10578 GFP_ATOMIC | __GFP_NOWARN);
10579
10580 if (p && cmpxchg(&dev->core_stats, NULL, p))
10581 free_percpu(p);
10582
10583 /* This READ_ONCE() pairs with the cmpxchg() above */
10584 return READ_ONCE(dev->core_stats);
10585 }
10586 EXPORT_SYMBOL(netdev_core_stats_alloc);
10587
10588 /**
10589 * dev_get_stats - get network device statistics
10590 * @dev: device to get statistics from
10591 * @storage: place to store stats
10592 *
10593 * Get network statistics from device. Return @storage.
10594 * The device driver may provide its own method by setting
10595 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10596 * otherwise the internal statistics structure is used.
10597 */
dev_get_stats(struct net_device * dev,struct rtnl_link_stats64 * storage)10598 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10599 struct rtnl_link_stats64 *storage)
10600 {
10601 const struct net_device_ops *ops = dev->netdev_ops;
10602 const struct net_device_core_stats __percpu *p;
10603
10604 if (ops->ndo_get_stats64) {
10605 memset(storage, 0, sizeof(*storage));
10606 ops->ndo_get_stats64(dev, storage);
10607 } else if (ops->ndo_get_stats) {
10608 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10609 } else {
10610 netdev_stats_to_stats64(storage, &dev->stats);
10611 }
10612
10613 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10614 p = READ_ONCE(dev->core_stats);
10615 if (p) {
10616 const struct net_device_core_stats *core_stats;
10617 int i;
10618
10619 for_each_possible_cpu(i) {
10620 core_stats = per_cpu_ptr(p, i);
10621 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
10622 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
10623 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
10624 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
10625 }
10626 }
10627 return storage;
10628 }
10629 EXPORT_SYMBOL(dev_get_stats);
10630
10631 /**
10632 * dev_fetch_sw_netstats - get per-cpu network device statistics
10633 * @s: place to store stats
10634 * @netstats: per-cpu network stats to read from
10635 *
10636 * Read per-cpu network statistics and populate the related fields in @s.
10637 */
dev_fetch_sw_netstats(struct rtnl_link_stats64 * s,const struct pcpu_sw_netstats __percpu * netstats)10638 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10639 const struct pcpu_sw_netstats __percpu *netstats)
10640 {
10641 int cpu;
10642
10643 for_each_possible_cpu(cpu) {
10644 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
10645 const struct pcpu_sw_netstats *stats;
10646 unsigned int start;
10647
10648 stats = per_cpu_ptr(netstats, cpu);
10649 do {
10650 start = u64_stats_fetch_begin(&stats->syncp);
10651 rx_packets = u64_stats_read(&stats->rx_packets);
10652 rx_bytes = u64_stats_read(&stats->rx_bytes);
10653 tx_packets = u64_stats_read(&stats->tx_packets);
10654 tx_bytes = u64_stats_read(&stats->tx_bytes);
10655 } while (u64_stats_fetch_retry(&stats->syncp, start));
10656
10657 s->rx_packets += rx_packets;
10658 s->rx_bytes += rx_bytes;
10659 s->tx_packets += tx_packets;
10660 s->tx_bytes += tx_bytes;
10661 }
10662 }
10663 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10664
10665 /**
10666 * dev_get_tstats64 - ndo_get_stats64 implementation
10667 * @dev: device to get statistics from
10668 * @s: place to store stats
10669 *
10670 * Populate @s from dev->stats and dev->tstats. Can be used as
10671 * ndo_get_stats64() callback.
10672 */
dev_get_tstats64(struct net_device * dev,struct rtnl_link_stats64 * s)10673 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10674 {
10675 netdev_stats_to_stats64(s, &dev->stats);
10676 dev_fetch_sw_netstats(s, dev->tstats);
10677 }
10678 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10679
dev_ingress_queue_create(struct net_device * dev)10680 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10681 {
10682 struct netdev_queue *queue = dev_ingress_queue(dev);
10683
10684 #ifdef CONFIG_NET_CLS_ACT
10685 if (queue)
10686 return queue;
10687 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10688 if (!queue)
10689 return NULL;
10690 netdev_init_one_queue(dev, queue, NULL);
10691 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10692 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
10693 rcu_assign_pointer(dev->ingress_queue, queue);
10694 #endif
10695 return queue;
10696 }
10697
10698 static const struct ethtool_ops default_ethtool_ops;
10699
netdev_set_default_ethtool_ops(struct net_device * dev,const struct ethtool_ops * ops)10700 void netdev_set_default_ethtool_ops(struct net_device *dev,
10701 const struct ethtool_ops *ops)
10702 {
10703 if (dev->ethtool_ops == &default_ethtool_ops)
10704 dev->ethtool_ops = ops;
10705 }
10706 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10707
10708 /**
10709 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
10710 * @dev: netdev to enable the IRQ coalescing on
10711 *
10712 * Sets a conservative default for SW IRQ coalescing. Users can use
10713 * sysfs attributes to override the default values.
10714 */
netdev_sw_irq_coalesce_default_on(struct net_device * dev)10715 void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
10716 {
10717 WARN_ON(dev->reg_state == NETREG_REGISTERED);
10718
10719 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
10720 dev->gro_flush_timeout = 20000;
10721 dev->napi_defer_hard_irqs = 1;
10722 }
10723 }
10724 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
10725
netdev_freemem(struct net_device * dev)10726 void netdev_freemem(struct net_device *dev)
10727 {
10728 char *addr = (char *)dev - dev->padded;
10729
10730 kvfree(addr);
10731 }
10732
10733 /**
10734 * alloc_netdev_mqs - allocate network device
10735 * @sizeof_priv: size of private data to allocate space for
10736 * @name: device name format string
10737 * @name_assign_type: origin of device name
10738 * @setup: callback to initialize device
10739 * @txqs: the number of TX subqueues to allocate
10740 * @rxqs: the number of RX subqueues to allocate
10741 *
10742 * Allocates a struct net_device with private data area for driver use
10743 * and performs basic initialization. Also allocates subqueue structs
10744 * for each queue on the device.
10745 */
alloc_netdev_mqs(int sizeof_priv,const char * name,unsigned char name_assign_type,void (* setup)(struct net_device *),unsigned int txqs,unsigned int rxqs)10746 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10747 unsigned char name_assign_type,
10748 void (*setup)(struct net_device *),
10749 unsigned int txqs, unsigned int rxqs)
10750 {
10751 struct net_device *dev;
10752 unsigned int alloc_size;
10753 struct net_device *p;
10754
10755 BUG_ON(strlen(name) >= sizeof(dev->name));
10756
10757 if (txqs < 1) {
10758 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10759 return NULL;
10760 }
10761
10762 if (rxqs < 1) {
10763 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10764 return NULL;
10765 }
10766
10767 alloc_size = sizeof(struct net_device);
10768 if (sizeof_priv) {
10769 /* ensure 32-byte alignment of private area */
10770 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10771 alloc_size += sizeof_priv;
10772 }
10773 /* ensure 32-byte alignment of whole construct */
10774 alloc_size += NETDEV_ALIGN - 1;
10775
10776 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10777 if (!p)
10778 return NULL;
10779
10780 dev = PTR_ALIGN(p, NETDEV_ALIGN);
10781 dev->padded = (char *)dev - (char *)p;
10782
10783 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
10784 #ifdef CONFIG_PCPU_DEV_REFCNT
10785 dev->pcpu_refcnt = alloc_percpu(int);
10786 if (!dev->pcpu_refcnt)
10787 goto free_dev;
10788 __dev_hold(dev);
10789 #else
10790 refcount_set(&dev->dev_refcnt, 1);
10791 #endif
10792
10793 if (dev_addr_init(dev))
10794 goto free_pcpu;
10795
10796 dev_mc_init(dev);
10797 dev_uc_init(dev);
10798
10799 dev_net_set(dev, &init_net);
10800
10801 dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
10802 dev->xdp_zc_max_segs = 1;
10803 dev->gso_max_segs = GSO_MAX_SEGS;
10804 dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
10805 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
10806 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
10807 dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
10808 dev->tso_max_segs = TSO_MAX_SEGS;
10809 dev->upper_level = 1;
10810 dev->lower_level = 1;
10811 #ifdef CONFIG_LOCKDEP
10812 dev->nested_level = 0;
10813 INIT_LIST_HEAD(&dev->unlink_list);
10814 #endif
10815
10816 INIT_LIST_HEAD(&dev->napi_list);
10817 INIT_LIST_HEAD(&dev->unreg_list);
10818 INIT_LIST_HEAD(&dev->close_list);
10819 INIT_LIST_HEAD(&dev->link_watch_list);
10820 INIT_LIST_HEAD(&dev->adj_list.upper);
10821 INIT_LIST_HEAD(&dev->adj_list.lower);
10822 INIT_LIST_HEAD(&dev->ptype_all);
10823 INIT_LIST_HEAD(&dev->ptype_specific);
10824 INIT_LIST_HEAD(&dev->net_notifier_list);
10825 #ifdef CONFIG_NET_SCHED
10826 hash_init(dev->qdisc_hash);
10827 #endif
10828 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10829 setup(dev);
10830
10831 if (!dev->tx_queue_len) {
10832 dev->priv_flags |= IFF_NO_QUEUE;
10833 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10834 }
10835
10836 dev->num_tx_queues = txqs;
10837 dev->real_num_tx_queues = txqs;
10838 if (netif_alloc_netdev_queues(dev))
10839 goto free_all;
10840
10841 dev->num_rx_queues = rxqs;
10842 dev->real_num_rx_queues = rxqs;
10843 if (netif_alloc_rx_queues(dev))
10844 goto free_all;
10845
10846 strcpy(dev->name, name);
10847 dev->name_assign_type = name_assign_type;
10848 dev->group = INIT_NETDEV_GROUP;
10849 if (!dev->ethtool_ops)
10850 dev->ethtool_ops = &default_ethtool_ops;
10851
10852 nf_hook_netdev_init(dev);
10853
10854 return dev;
10855
10856 free_all:
10857 free_netdev(dev);
10858 return NULL;
10859
10860 free_pcpu:
10861 #ifdef CONFIG_PCPU_DEV_REFCNT
10862 free_percpu(dev->pcpu_refcnt);
10863 free_dev:
10864 #endif
10865 netdev_freemem(dev);
10866 return NULL;
10867 }
10868 EXPORT_SYMBOL(alloc_netdev_mqs);
10869
10870 /**
10871 * free_netdev - free network device
10872 * @dev: device
10873 *
10874 * This function does the last stage of destroying an allocated device
10875 * interface. The reference to the device object is released. If this
10876 * is the last reference then it will be freed.Must be called in process
10877 * context.
10878 */
free_netdev(struct net_device * dev)10879 void free_netdev(struct net_device *dev)
10880 {
10881 struct napi_struct *p, *n;
10882
10883 might_sleep();
10884
10885 /* When called immediately after register_netdevice() failed the unwind
10886 * handling may still be dismantling the device. Handle that case by
10887 * deferring the free.
10888 */
10889 if (dev->reg_state == NETREG_UNREGISTERING) {
10890 ASSERT_RTNL();
10891 dev->needs_free_netdev = true;
10892 return;
10893 }
10894
10895 netif_free_tx_queues(dev);
10896 netif_free_rx_queues(dev);
10897
10898 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10899
10900 /* Flush device addresses */
10901 dev_addr_flush(dev);
10902
10903 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10904 netif_napi_del(p);
10905
10906 ref_tracker_dir_exit(&dev->refcnt_tracker);
10907 #ifdef CONFIG_PCPU_DEV_REFCNT
10908 free_percpu(dev->pcpu_refcnt);
10909 dev->pcpu_refcnt = NULL;
10910 #endif
10911 free_percpu(dev->core_stats);
10912 dev->core_stats = NULL;
10913 free_percpu(dev->xdp_bulkq);
10914 dev->xdp_bulkq = NULL;
10915
10916 /* Compatibility with error handling in drivers */
10917 if (dev->reg_state == NETREG_UNINITIALIZED) {
10918 netdev_freemem(dev);
10919 return;
10920 }
10921
10922 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10923 dev->reg_state = NETREG_RELEASED;
10924
10925 /* will free via device release */
10926 put_device(&dev->dev);
10927 }
10928 EXPORT_SYMBOL(free_netdev);
10929
10930 /**
10931 * synchronize_net - Synchronize with packet receive processing
10932 *
10933 * Wait for packets currently being received to be done.
10934 * Does not block later packets from starting.
10935 */
synchronize_net(void)10936 void synchronize_net(void)
10937 {
10938 might_sleep();
10939 if (rtnl_is_locked())
10940 synchronize_rcu_expedited();
10941 else
10942 synchronize_rcu();
10943 }
10944 EXPORT_SYMBOL(synchronize_net);
10945
10946 /**
10947 * unregister_netdevice_queue - remove device from the kernel
10948 * @dev: device
10949 * @head: list
10950 *
10951 * This function shuts down a device interface and removes it
10952 * from the kernel tables.
10953 * If head not NULL, device is queued to be unregistered later.
10954 *
10955 * Callers must hold the rtnl semaphore. You may want
10956 * unregister_netdev() instead of this.
10957 */
10958
unregister_netdevice_queue(struct net_device * dev,struct list_head * head)10959 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10960 {
10961 ASSERT_RTNL();
10962
10963 if (head) {
10964 list_move_tail(&dev->unreg_list, head);
10965 } else {
10966 LIST_HEAD(single);
10967
10968 list_add(&dev->unreg_list, &single);
10969 unregister_netdevice_many(&single);
10970 }
10971 }
10972 EXPORT_SYMBOL(unregister_netdevice_queue);
10973
unregister_netdevice_many_notify(struct list_head * head,u32 portid,const struct nlmsghdr * nlh)10974 void unregister_netdevice_many_notify(struct list_head *head,
10975 u32 portid, const struct nlmsghdr *nlh)
10976 {
10977 struct net_device *dev, *tmp;
10978 LIST_HEAD(close_head);
10979
10980 BUG_ON(dev_boot_phase);
10981 ASSERT_RTNL();
10982
10983 if (list_empty(head))
10984 return;
10985
10986 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10987 /* Some devices call without registering
10988 * for initialization unwind. Remove those
10989 * devices and proceed with the remaining.
10990 */
10991 if (dev->reg_state == NETREG_UNINITIALIZED) {
10992 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10993 dev->name, dev);
10994
10995 WARN_ON(1);
10996 list_del(&dev->unreg_list);
10997 continue;
10998 }
10999 dev->dismantle = true;
11000 BUG_ON(dev->reg_state != NETREG_REGISTERED);
11001 }
11002
11003 /* If device is running, close it first. */
11004 list_for_each_entry(dev, head, unreg_list)
11005 list_add_tail(&dev->close_list, &close_head);
11006 dev_close_many(&close_head, true);
11007
11008 list_for_each_entry(dev, head, unreg_list) {
11009 /* And unlink it from device chain. */
11010 write_lock(&dev_base_lock);
11011 unlist_netdevice(dev, false);
11012 dev->reg_state = NETREG_UNREGISTERING;
11013 write_unlock(&dev_base_lock);
11014 }
11015 flush_all_backlogs();
11016
11017 synchronize_net();
11018
11019 list_for_each_entry(dev, head, unreg_list) {
11020 struct sk_buff *skb = NULL;
11021
11022 /* Shutdown queueing discipline. */
11023 dev_shutdown(dev);
11024 dev_tcx_uninstall(dev);
11025 dev_xdp_uninstall(dev);
11026 bpf_dev_bound_netdev_unregister(dev);
11027
11028 netdev_offload_xstats_disable_all(dev);
11029
11030 /* Notify protocols, that we are about to destroy
11031 * this device. They should clean all the things.
11032 */
11033 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11034
11035 if (!dev->rtnl_link_ops ||
11036 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11037 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11038 GFP_KERNEL, NULL, 0,
11039 portid, nlh);
11040
11041 /*
11042 * Flush the unicast and multicast chains
11043 */
11044 dev_uc_flush(dev);
11045 dev_mc_flush(dev);
11046
11047 netdev_name_node_alt_flush(dev);
11048 netdev_name_node_free(dev->name_node);
11049
11050 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
11051
11052 if (dev->netdev_ops->ndo_uninit)
11053 dev->netdev_ops->ndo_uninit(dev);
11054
11055 if (skb)
11056 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
11057
11058 /* Notifier chain MUST detach us all upper devices. */
11059 WARN_ON(netdev_has_any_upper_dev(dev));
11060 WARN_ON(netdev_has_any_lower_dev(dev));
11061
11062 /* Remove entries from kobject tree */
11063 netdev_unregister_kobject(dev);
11064 #ifdef CONFIG_XPS
11065 /* Remove XPS queueing entries */
11066 netif_reset_xps_queues_gt(dev, 0);
11067 #endif
11068 }
11069
11070 synchronize_net();
11071
11072 list_for_each_entry(dev, head, unreg_list) {
11073 netdev_put(dev, &dev->dev_registered_tracker);
11074 net_set_todo(dev);
11075 }
11076
11077 list_del(head);
11078 }
11079
11080 /**
11081 * unregister_netdevice_many - unregister many devices
11082 * @head: list of devices
11083 *
11084 * Note: As most callers use a stack allocated list_head,
11085 * we force a list_del() to make sure stack wont be corrupted later.
11086 */
unregister_netdevice_many(struct list_head * head)11087 void unregister_netdevice_many(struct list_head *head)
11088 {
11089 unregister_netdevice_many_notify(head, 0, NULL);
11090 }
11091 EXPORT_SYMBOL(unregister_netdevice_many);
11092
11093 /**
11094 * unregister_netdev - remove device from the kernel
11095 * @dev: device
11096 *
11097 * This function shuts down a device interface and removes it
11098 * from the kernel tables.
11099 *
11100 * This is just a wrapper for unregister_netdevice that takes
11101 * the rtnl semaphore. In general you want to use this and not
11102 * unregister_netdevice.
11103 */
unregister_netdev(struct net_device * dev)11104 void unregister_netdev(struct net_device *dev)
11105 {
11106 rtnl_lock();
11107 unregister_netdevice(dev);
11108 rtnl_unlock();
11109 }
11110 EXPORT_SYMBOL(unregister_netdev);
11111
11112 /**
11113 * __dev_change_net_namespace - move device to different nethost namespace
11114 * @dev: device
11115 * @net: network namespace
11116 * @pat: If not NULL name pattern to try if the current device name
11117 * is already taken in the destination network namespace.
11118 * @new_ifindex: If not zero, specifies device index in the target
11119 * namespace.
11120 *
11121 * This function shuts down a device interface and moves it
11122 * to a new network namespace. On success 0 is returned, on
11123 * a failure a netagive errno code is returned.
11124 *
11125 * Callers must hold the rtnl semaphore.
11126 */
11127
__dev_change_net_namespace(struct net_device * dev,struct net * net,const char * pat,int new_ifindex)11128 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
11129 const char *pat, int new_ifindex)
11130 {
11131 struct netdev_name_node *name_node;
11132 struct net *net_old = dev_net(dev);
11133 char new_name[IFNAMSIZ] = {};
11134 int err, new_nsid;
11135
11136 ASSERT_RTNL();
11137
11138 /* Don't allow namespace local devices to be moved. */
11139 err = -EINVAL;
11140 if (dev->features & NETIF_F_NETNS_LOCAL)
11141 goto out;
11142
11143 /* Ensure the device has been registrered */
11144 if (dev->reg_state != NETREG_REGISTERED)
11145 goto out;
11146
11147 /* Get out if there is nothing todo */
11148 err = 0;
11149 if (net_eq(net_old, net))
11150 goto out;
11151
11152 /* Pick the destination device name, and ensure
11153 * we can use it in the destination network namespace.
11154 */
11155 err = -EEXIST;
11156 if (netdev_name_in_use(net, dev->name)) {
11157 /* We get here if we can't use the current device name */
11158 if (!pat)
11159 goto out;
11160 err = dev_prep_valid_name(net, dev, pat, new_name);
11161 if (err < 0)
11162 goto out;
11163 }
11164 /* Check that none of the altnames conflicts. */
11165 err = -EEXIST;
11166 netdev_for_each_altname(dev, name_node)
11167 if (netdev_name_in_use(net, name_node->name))
11168 goto out;
11169
11170 /* Check that new_ifindex isn't used yet. */
11171 if (new_ifindex) {
11172 err = dev_index_reserve(net, new_ifindex);
11173 if (err < 0)
11174 goto out;
11175 } else {
11176 /* If there is an ifindex conflict assign a new one */
11177 err = dev_index_reserve(net, dev->ifindex);
11178 if (err == -EBUSY)
11179 err = dev_index_reserve(net, 0);
11180 if (err < 0)
11181 goto out;
11182 new_ifindex = err;
11183 }
11184
11185 /*
11186 * And now a mini version of register_netdevice unregister_netdevice.
11187 */
11188
11189 /* If device is running close it first. */
11190 dev_close(dev);
11191
11192 /* And unlink it from device chain */
11193 unlist_netdevice(dev, true);
11194
11195 synchronize_net();
11196
11197 /* Shutdown queueing discipline. */
11198 dev_shutdown(dev);
11199
11200 /* Notify protocols, that we are about to destroy
11201 * this device. They should clean all the things.
11202 *
11203 * Note that dev->reg_state stays at NETREG_REGISTERED.
11204 * This is wanted because this way 8021q and macvlan know
11205 * the device is just moving and can keep their slaves up.
11206 */
11207 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11208 rcu_barrier();
11209
11210 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11211
11212 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11213 new_ifindex);
11214
11215 /*
11216 * Flush the unicast and multicast chains
11217 */
11218 dev_uc_flush(dev);
11219 dev_mc_flush(dev);
11220
11221 /* Send a netdev-removed uevent to the old namespace */
11222 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11223 netdev_adjacent_del_links(dev);
11224
11225 /* Move per-net netdevice notifiers that are following the netdevice */
11226 move_netdevice_notifiers_dev_net(dev, net);
11227
11228 /* Actually switch the network namespace */
11229 dev_net_set(dev, net);
11230 dev->ifindex = new_ifindex;
11231
11232 /* Send a netdev-add uevent to the new namespace */
11233 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11234 netdev_adjacent_add_links(dev);
11235
11236 if (new_name[0]) /* Rename the netdev to prepared name */
11237 strscpy(dev->name, new_name, IFNAMSIZ);
11238
11239 /* Fixup kobjects */
11240 err = device_rename(&dev->dev, dev->name);
11241 WARN_ON(err);
11242
11243 /* Adapt owner in case owning user namespace of target network
11244 * namespace is different from the original one.
11245 */
11246 err = netdev_change_owner(dev, net_old, net);
11247 WARN_ON(err);
11248
11249 /* Add the device back in the hashes */
11250 list_netdevice(dev);
11251
11252 /* Notify protocols, that a new device appeared. */
11253 call_netdevice_notifiers(NETDEV_REGISTER, dev);
11254
11255 /*
11256 * Prevent userspace races by waiting until the network
11257 * device is fully setup before sending notifications.
11258 */
11259 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
11260
11261 synchronize_net();
11262 err = 0;
11263 out:
11264 return err;
11265 }
11266 EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11267
dev_cpu_dead(unsigned int oldcpu)11268 static int dev_cpu_dead(unsigned int oldcpu)
11269 {
11270 struct sk_buff **list_skb;
11271 struct sk_buff *skb;
11272 unsigned int cpu;
11273 struct softnet_data *sd, *oldsd, *remsd = NULL;
11274
11275 local_irq_disable();
11276 cpu = smp_processor_id();
11277 sd = &per_cpu(softnet_data, cpu);
11278 oldsd = &per_cpu(softnet_data, oldcpu);
11279
11280 /* Find end of our completion_queue. */
11281 list_skb = &sd->completion_queue;
11282 while (*list_skb)
11283 list_skb = &(*list_skb)->next;
11284 /* Append completion queue from offline CPU. */
11285 *list_skb = oldsd->completion_queue;
11286 oldsd->completion_queue = NULL;
11287
11288 /* Append output queue from offline CPU. */
11289 if (oldsd->output_queue) {
11290 *sd->output_queue_tailp = oldsd->output_queue;
11291 sd->output_queue_tailp = oldsd->output_queue_tailp;
11292 oldsd->output_queue = NULL;
11293 oldsd->output_queue_tailp = &oldsd->output_queue;
11294 }
11295 /* Append NAPI poll list from offline CPU, with one exception :
11296 * process_backlog() must be called by cpu owning percpu backlog.
11297 * We properly handle process_queue & input_pkt_queue later.
11298 */
11299 while (!list_empty(&oldsd->poll_list)) {
11300 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11301 struct napi_struct,
11302 poll_list);
11303
11304 list_del_init(&napi->poll_list);
11305 if (napi->poll == process_backlog)
11306 napi->state = 0;
11307 else
11308 ____napi_schedule(sd, napi);
11309 }
11310
11311 raise_softirq_irqoff(NET_TX_SOFTIRQ);
11312 local_irq_enable();
11313
11314 #ifdef CONFIG_RPS
11315 remsd = oldsd->rps_ipi_list;
11316 oldsd->rps_ipi_list = NULL;
11317 #endif
11318 /* send out pending IPI's on offline CPU */
11319 net_rps_send_ipi(remsd);
11320
11321 /* Process offline CPU's input_pkt_queue */
11322 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11323 netif_rx(skb);
11324 input_queue_head_incr(oldsd);
11325 }
11326 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11327 netif_rx(skb);
11328 input_queue_head_incr(oldsd);
11329 }
11330
11331 return 0;
11332 }
11333
11334 /**
11335 * netdev_increment_features - increment feature set by one
11336 * @all: current feature set
11337 * @one: new feature set
11338 * @mask: mask feature set
11339 *
11340 * Computes a new feature set after adding a device with feature set
11341 * @one to the master device with current feature set @all. Will not
11342 * enable anything that is off in @mask. Returns the new feature set.
11343 */
netdev_increment_features(netdev_features_t all,netdev_features_t one,netdev_features_t mask)11344 netdev_features_t netdev_increment_features(netdev_features_t all,
11345 netdev_features_t one, netdev_features_t mask)
11346 {
11347 if (mask & NETIF_F_HW_CSUM)
11348 mask |= NETIF_F_CSUM_MASK;
11349 mask |= NETIF_F_VLAN_CHALLENGED;
11350
11351 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11352 all &= one | ~NETIF_F_ALL_FOR_ALL;
11353
11354 /* If one device supports hw checksumming, set for all. */
11355 if (all & NETIF_F_HW_CSUM)
11356 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11357
11358 return all;
11359 }
11360 EXPORT_SYMBOL(netdev_increment_features);
11361
netdev_create_hash(void)11362 static struct hlist_head * __net_init netdev_create_hash(void)
11363 {
11364 int i;
11365 struct hlist_head *hash;
11366
11367 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11368 if (hash != NULL)
11369 for (i = 0; i < NETDEV_HASHENTRIES; i++)
11370 INIT_HLIST_HEAD(&hash[i]);
11371
11372 return hash;
11373 }
11374
11375 /* Initialize per network namespace state */
netdev_init(struct net * net)11376 static int __net_init netdev_init(struct net *net)
11377 {
11378 BUILD_BUG_ON(GRO_HASH_BUCKETS >
11379 8 * sizeof_field(struct napi_struct, gro_bitmask));
11380
11381 INIT_LIST_HEAD(&net->dev_base_head);
11382
11383 net->dev_name_head = netdev_create_hash();
11384 if (net->dev_name_head == NULL)
11385 goto err_name;
11386
11387 net->dev_index_head = netdev_create_hash();
11388 if (net->dev_index_head == NULL)
11389 goto err_idx;
11390
11391 xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1);
11392
11393 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11394
11395 return 0;
11396
11397 err_idx:
11398 kfree(net->dev_name_head);
11399 err_name:
11400 return -ENOMEM;
11401 }
11402
11403 /**
11404 * netdev_drivername - network driver for the device
11405 * @dev: network device
11406 *
11407 * Determine network driver for device.
11408 */
netdev_drivername(const struct net_device * dev)11409 const char *netdev_drivername(const struct net_device *dev)
11410 {
11411 const struct device_driver *driver;
11412 const struct device *parent;
11413 const char *empty = "";
11414
11415 parent = dev->dev.parent;
11416 if (!parent)
11417 return empty;
11418
11419 driver = parent->driver;
11420 if (driver && driver->name)
11421 return driver->name;
11422 return empty;
11423 }
11424
__netdev_printk(const char * level,const struct net_device * dev,struct va_format * vaf)11425 static void __netdev_printk(const char *level, const struct net_device *dev,
11426 struct va_format *vaf)
11427 {
11428 if (dev && dev->dev.parent) {
11429 dev_printk_emit(level[1] - '0',
11430 dev->dev.parent,
11431 "%s %s %s%s: %pV",
11432 dev_driver_string(dev->dev.parent),
11433 dev_name(dev->dev.parent),
11434 netdev_name(dev), netdev_reg_state(dev),
11435 vaf);
11436 } else if (dev) {
11437 printk("%s%s%s: %pV",
11438 level, netdev_name(dev), netdev_reg_state(dev), vaf);
11439 } else {
11440 printk("%s(NULL net_device): %pV", level, vaf);
11441 }
11442 }
11443
netdev_printk(const char * level,const struct net_device * dev,const char * format,...)11444 void netdev_printk(const char *level, const struct net_device *dev,
11445 const char *format, ...)
11446 {
11447 struct va_format vaf;
11448 va_list args;
11449
11450 va_start(args, format);
11451
11452 vaf.fmt = format;
11453 vaf.va = &args;
11454
11455 __netdev_printk(level, dev, &vaf);
11456
11457 va_end(args);
11458 }
11459 EXPORT_SYMBOL(netdev_printk);
11460
11461 #define define_netdev_printk_level(func, level) \
11462 void func(const struct net_device *dev, const char *fmt, ...) \
11463 { \
11464 struct va_format vaf; \
11465 va_list args; \
11466 \
11467 va_start(args, fmt); \
11468 \
11469 vaf.fmt = fmt; \
11470 vaf.va = &args; \
11471 \
11472 __netdev_printk(level, dev, &vaf); \
11473 \
11474 va_end(args); \
11475 } \
11476 EXPORT_SYMBOL(func);
11477
11478 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11479 define_netdev_printk_level(netdev_alert, KERN_ALERT);
11480 define_netdev_printk_level(netdev_crit, KERN_CRIT);
11481 define_netdev_printk_level(netdev_err, KERN_ERR);
11482 define_netdev_printk_level(netdev_warn, KERN_WARNING);
11483 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11484 define_netdev_printk_level(netdev_info, KERN_INFO);
11485
netdev_exit(struct net * net)11486 static void __net_exit netdev_exit(struct net *net)
11487 {
11488 kfree(net->dev_name_head);
11489 kfree(net->dev_index_head);
11490 xa_destroy(&net->dev_by_index);
11491 if (net != &init_net)
11492 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11493 }
11494
11495 static struct pernet_operations __net_initdata netdev_net_ops = {
11496 .init = netdev_init,
11497 .exit = netdev_exit,
11498 };
11499
default_device_exit_net(struct net * net)11500 static void __net_exit default_device_exit_net(struct net *net)
11501 {
11502 struct netdev_name_node *name_node, *tmp;
11503 struct net_device *dev, *aux;
11504 /*
11505 * Push all migratable network devices back to the
11506 * initial network namespace
11507 */
11508 ASSERT_RTNL();
11509 for_each_netdev_safe(net, dev, aux) {
11510 int err;
11511 char fb_name[IFNAMSIZ];
11512
11513 /* Ignore unmoveable devices (i.e. loopback) */
11514 if (dev->features & NETIF_F_NETNS_LOCAL)
11515 continue;
11516
11517 /* Leave virtual devices for the generic cleanup */
11518 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11519 continue;
11520
11521 /* Push remaining network devices to init_net */
11522 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11523 if (netdev_name_in_use(&init_net, fb_name))
11524 snprintf(fb_name, IFNAMSIZ, "dev%%d");
11525
11526 netdev_for_each_altname_safe(dev, name_node, tmp)
11527 if (netdev_name_in_use(&init_net, name_node->name)) {
11528 netdev_name_node_del(name_node);
11529 synchronize_rcu();
11530 __netdev_name_node_alt_destroy(name_node);
11531 }
11532
11533 err = dev_change_net_namespace(dev, &init_net, fb_name);
11534 if (err) {
11535 pr_emerg("%s: failed to move %s to init_net: %d\n",
11536 __func__, dev->name, err);
11537 BUG();
11538 }
11539 }
11540 }
11541
default_device_exit_batch(struct list_head * net_list)11542 static void __net_exit default_device_exit_batch(struct list_head *net_list)
11543 {
11544 /* At exit all network devices most be removed from a network
11545 * namespace. Do this in the reverse order of registration.
11546 * Do this across as many network namespaces as possible to
11547 * improve batching efficiency.
11548 */
11549 struct net_device *dev;
11550 struct net *net;
11551 LIST_HEAD(dev_kill_list);
11552
11553 rtnl_lock();
11554 list_for_each_entry(net, net_list, exit_list) {
11555 default_device_exit_net(net);
11556 cond_resched();
11557 }
11558
11559 list_for_each_entry(net, net_list, exit_list) {
11560 for_each_netdev_reverse(net, dev) {
11561 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11562 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11563 else
11564 unregister_netdevice_queue(dev, &dev_kill_list);
11565 }
11566 }
11567 unregister_netdevice_many(&dev_kill_list);
11568 rtnl_unlock();
11569 }
11570
11571 static struct pernet_operations __net_initdata default_device_ops = {
11572 .exit_batch = default_device_exit_batch,
11573 };
11574
11575 /*
11576 * Initialize the DEV module. At boot time this walks the device list and
11577 * unhooks any devices that fail to initialise (normally hardware not
11578 * present) and leaves us with a valid list of present and active devices.
11579 *
11580 */
11581
11582 /*
11583 * This is called single threaded during boot, so no need
11584 * to take the rtnl semaphore.
11585 */
net_dev_init(void)11586 static int __init net_dev_init(void)
11587 {
11588 int i, rc = -ENOMEM;
11589
11590 BUG_ON(!dev_boot_phase);
11591
11592 if (dev_proc_init())
11593 goto out;
11594
11595 if (netdev_kobject_init())
11596 goto out;
11597
11598 INIT_LIST_HEAD(&ptype_all);
11599 for (i = 0; i < PTYPE_HASH_SIZE; i++)
11600 INIT_LIST_HEAD(&ptype_base[i]);
11601
11602 if (register_pernet_subsys(&netdev_net_ops))
11603 goto out;
11604
11605 /*
11606 * Initialise the packet receive queues.
11607 */
11608
11609 for_each_possible_cpu(i) {
11610 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11611 struct softnet_data *sd = &per_cpu(softnet_data, i);
11612
11613 INIT_WORK(flush, flush_backlog);
11614
11615 skb_queue_head_init(&sd->input_pkt_queue);
11616 skb_queue_head_init(&sd->process_queue);
11617 #ifdef CONFIG_XFRM_OFFLOAD
11618 skb_queue_head_init(&sd->xfrm_backlog);
11619 #endif
11620 INIT_LIST_HEAD(&sd->poll_list);
11621 sd->output_queue_tailp = &sd->output_queue;
11622 #ifdef CONFIG_RPS
11623 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11624 sd->cpu = i;
11625 #endif
11626 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11627 spin_lock_init(&sd->defer_lock);
11628
11629 init_gro_hash(&sd->backlog);
11630 sd->backlog.poll = process_backlog;
11631 sd->backlog.weight = weight_p;
11632 }
11633
11634 dev_boot_phase = 0;
11635
11636 /* The loopback device is special if any other network devices
11637 * is present in a network namespace the loopback device must
11638 * be present. Since we now dynamically allocate and free the
11639 * loopback device ensure this invariant is maintained by
11640 * keeping the loopback device as the first device on the
11641 * list of network devices. Ensuring the loopback devices
11642 * is the first device that appears and the last network device
11643 * that disappears.
11644 */
11645 if (register_pernet_device(&loopback_net_ops))
11646 goto out;
11647
11648 if (register_pernet_device(&default_device_ops))
11649 goto out;
11650
11651 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11652 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11653
11654 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11655 NULL, dev_cpu_dead);
11656 WARN_ON(rc < 0);
11657 rc = 0;
11658 out:
11659 return rc;
11660 }
11661
11662 subsys_initcall(net_dev_init);
11663