xref: /openbmc/linux/net/core/dev.c (revision cd4d09ec)
1 /*
2  * 	NET3	Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  * 		Authors:	Ross Biro
11  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <rzsfl@rz.uni-sb.de>
16  *		Alan Cox <gw4pts@gw4pts.ampr.org>
17  *		David Hinds <dahinds@users.sourceforge.net>
18  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *		Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *              			to 2 if register_netdev gets called
25  *              			before net_dev_init & also removed a
26  *              			few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *		Alan Cox	: 	Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *              			indefinitely on dev->refcnt
71  * 		J Hadi Salim	:	- Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dst.h>
103 #include <net/dst_metadata.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/module.h>
110 #include <linux/netpoll.h>
111 #include <linux/rcupdate.h>
112 #include <linux/delay.h>
113 #include <net/iw_handler.h>
114 #include <asm/current.h>
115 #include <linux/audit.h>
116 #include <linux/dmaengine.h>
117 #include <linux/err.h>
118 #include <linux/ctype.h>
119 #include <linux/if_arp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/ip.h>
122 #include <net/ip.h>
123 #include <net/mpls.h>
124 #include <linux/ipv6.h>
125 #include <linux/in.h>
126 #include <linux/jhash.h>
127 #include <linux/random.h>
128 #include <trace/events/napi.h>
129 #include <trace/events/net.h>
130 #include <trace/events/skb.h>
131 #include <linux/pci.h>
132 #include <linux/inetdevice.h>
133 #include <linux/cpu_rmap.h>
134 #include <linux/static_key.h>
135 #include <linux/hashtable.h>
136 #include <linux/vmalloc.h>
137 #include <linux/if_macvlan.h>
138 #include <linux/errqueue.h>
139 #include <linux/hrtimer.h>
140 #include <linux/netfilter_ingress.h>
141 #include <linux/sctp.h>
142 
143 #include "net-sysfs.h"
144 
145 /* Instead of increasing this, you should create a hash table. */
146 #define MAX_GRO_SKBS 8
147 
148 /* This should be increased if a protocol with a bigger head is added. */
149 #define GRO_MAX_HEAD (MAX_HEADER + 128)
150 
151 static DEFINE_SPINLOCK(ptype_lock);
152 static DEFINE_SPINLOCK(offload_lock);
153 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
154 struct list_head ptype_all __read_mostly;	/* Taps */
155 static struct list_head offload_base __read_mostly;
156 
157 static int netif_rx_internal(struct sk_buff *skb);
158 static int call_netdevice_notifiers_info(unsigned long val,
159 					 struct net_device *dev,
160 					 struct netdev_notifier_info *info);
161 
162 /*
163  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
164  * semaphore.
165  *
166  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
167  *
168  * Writers must hold the rtnl semaphore while they loop through the
169  * dev_base_head list, and hold dev_base_lock for writing when they do the
170  * actual updates.  This allows pure readers to access the list even
171  * while a writer is preparing to update it.
172  *
173  * To put it another way, dev_base_lock is held for writing only to
174  * protect against pure readers; the rtnl semaphore provides the
175  * protection against other writers.
176  *
177  * See, for example usages, register_netdevice() and
178  * unregister_netdevice(), which must be called with the rtnl
179  * semaphore held.
180  */
181 DEFINE_RWLOCK(dev_base_lock);
182 EXPORT_SYMBOL(dev_base_lock);
183 
184 /* protects napi_hash addition/deletion and napi_gen_id */
185 static DEFINE_SPINLOCK(napi_hash_lock);
186 
187 static unsigned int napi_gen_id = NR_CPUS;
188 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
189 
190 static seqcount_t devnet_rename_seq;
191 
192 static inline void dev_base_seq_inc(struct net *net)
193 {
194 	while (++net->dev_base_seq == 0);
195 }
196 
197 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
198 {
199 	unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
200 
201 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
202 }
203 
204 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
205 {
206 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
207 }
208 
209 static inline void rps_lock(struct softnet_data *sd)
210 {
211 #ifdef CONFIG_RPS
212 	spin_lock(&sd->input_pkt_queue.lock);
213 #endif
214 }
215 
216 static inline void rps_unlock(struct softnet_data *sd)
217 {
218 #ifdef CONFIG_RPS
219 	spin_unlock(&sd->input_pkt_queue.lock);
220 #endif
221 }
222 
223 /* Device list insertion */
224 static void list_netdevice(struct net_device *dev)
225 {
226 	struct net *net = dev_net(dev);
227 
228 	ASSERT_RTNL();
229 
230 	write_lock_bh(&dev_base_lock);
231 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
232 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
233 	hlist_add_head_rcu(&dev->index_hlist,
234 			   dev_index_hash(net, dev->ifindex));
235 	write_unlock_bh(&dev_base_lock);
236 
237 	dev_base_seq_inc(net);
238 }
239 
240 /* Device list removal
241  * caller must respect a RCU grace period before freeing/reusing dev
242  */
243 static void unlist_netdevice(struct net_device *dev)
244 {
245 	ASSERT_RTNL();
246 
247 	/* Unlink dev from the device chain */
248 	write_lock_bh(&dev_base_lock);
249 	list_del_rcu(&dev->dev_list);
250 	hlist_del_rcu(&dev->name_hlist);
251 	hlist_del_rcu(&dev->index_hlist);
252 	write_unlock_bh(&dev_base_lock);
253 
254 	dev_base_seq_inc(dev_net(dev));
255 }
256 
257 /*
258  *	Our notifier list
259  */
260 
261 static RAW_NOTIFIER_HEAD(netdev_chain);
262 
263 /*
264  *	Device drivers call our routines to queue packets here. We empty the
265  *	queue in the local softnet handler.
266  */
267 
268 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
269 EXPORT_PER_CPU_SYMBOL(softnet_data);
270 
271 #ifdef CONFIG_LOCKDEP
272 /*
273  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
274  * according to dev->type
275  */
276 static const unsigned short netdev_lock_type[] =
277 	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
278 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
279 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
280 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
281 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
282 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
283 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
284 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
285 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
286 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
287 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
288 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
289 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
290 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
291 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
292 
293 static const char *const netdev_lock_name[] =
294 	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
306 	 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
307 	 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
308 	 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
309 
310 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
311 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
312 
313 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
314 {
315 	int i;
316 
317 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
318 		if (netdev_lock_type[i] == dev_type)
319 			return i;
320 	/* the last key is used by default */
321 	return ARRAY_SIZE(netdev_lock_type) - 1;
322 }
323 
324 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 						 unsigned short dev_type)
326 {
327 	int i;
328 
329 	i = netdev_lock_pos(dev_type);
330 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
331 				   netdev_lock_name[i]);
332 }
333 
334 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
335 {
336 	int i;
337 
338 	i = netdev_lock_pos(dev->type);
339 	lockdep_set_class_and_name(&dev->addr_list_lock,
340 				   &netdev_addr_lock_key[i],
341 				   netdev_lock_name[i]);
342 }
343 #else
344 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
345 						 unsigned short dev_type)
346 {
347 }
348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
349 {
350 }
351 #endif
352 
353 /*******************************************************************************
354 
355 		Protocol management and registration routines
356 
357 *******************************************************************************/
358 
359 /*
360  *	Add a protocol ID to the list. Now that the input handler is
361  *	smarter we can dispense with all the messy stuff that used to be
362  *	here.
363  *
364  *	BEWARE!!! Protocol handlers, mangling input packets,
365  *	MUST BE last in hash buckets and checking protocol handlers
366  *	MUST start from promiscuous ptype_all chain in net_bh.
367  *	It is true now, do not change it.
368  *	Explanation follows: if protocol handler, mangling packet, will
369  *	be the first on list, it is not able to sense, that packet
370  *	is cloned and should be copied-on-write, so that it will
371  *	change it and subsequent readers will get broken packet.
372  *							--ANK (980803)
373  */
374 
375 static inline struct list_head *ptype_head(const struct packet_type *pt)
376 {
377 	if (pt->type == htons(ETH_P_ALL))
378 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
379 	else
380 		return pt->dev ? &pt->dev->ptype_specific :
381 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
382 }
383 
384 /**
385  *	dev_add_pack - add packet handler
386  *	@pt: packet type declaration
387  *
388  *	Add a protocol handler to the networking stack. The passed &packet_type
389  *	is linked into kernel lists and may not be freed until it has been
390  *	removed from the kernel lists.
391  *
392  *	This call does not sleep therefore it can not
393  *	guarantee all CPU's that are in middle of receiving packets
394  *	will see the new packet type (until the next received packet).
395  */
396 
397 void dev_add_pack(struct packet_type *pt)
398 {
399 	struct list_head *head = ptype_head(pt);
400 
401 	spin_lock(&ptype_lock);
402 	list_add_rcu(&pt->list, head);
403 	spin_unlock(&ptype_lock);
404 }
405 EXPORT_SYMBOL(dev_add_pack);
406 
407 /**
408  *	__dev_remove_pack	 - remove packet handler
409  *	@pt: packet type declaration
410  *
411  *	Remove a protocol handler that was previously added to the kernel
412  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
413  *	from the kernel lists and can be freed or reused once this function
414  *	returns.
415  *
416  *      The packet type might still be in use by receivers
417  *	and must not be freed until after all the CPU's have gone
418  *	through a quiescent state.
419  */
420 void __dev_remove_pack(struct packet_type *pt)
421 {
422 	struct list_head *head = ptype_head(pt);
423 	struct packet_type *pt1;
424 
425 	spin_lock(&ptype_lock);
426 
427 	list_for_each_entry(pt1, head, list) {
428 		if (pt == pt1) {
429 			list_del_rcu(&pt->list);
430 			goto out;
431 		}
432 	}
433 
434 	pr_warn("dev_remove_pack: %p not found\n", pt);
435 out:
436 	spin_unlock(&ptype_lock);
437 }
438 EXPORT_SYMBOL(__dev_remove_pack);
439 
440 /**
441  *	dev_remove_pack	 - remove packet handler
442  *	@pt: packet type declaration
443  *
444  *	Remove a protocol handler that was previously added to the kernel
445  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
446  *	from the kernel lists and can be freed or reused once this function
447  *	returns.
448  *
449  *	This call sleeps to guarantee that no CPU is looking at the packet
450  *	type after return.
451  */
452 void dev_remove_pack(struct packet_type *pt)
453 {
454 	__dev_remove_pack(pt);
455 
456 	synchronize_net();
457 }
458 EXPORT_SYMBOL(dev_remove_pack);
459 
460 
461 /**
462  *	dev_add_offload - register offload handlers
463  *	@po: protocol offload declaration
464  *
465  *	Add protocol offload handlers to the networking stack. The passed
466  *	&proto_offload is linked into kernel lists and may not be freed until
467  *	it has been removed from the kernel lists.
468  *
469  *	This call does not sleep therefore it can not
470  *	guarantee all CPU's that are in middle of receiving packets
471  *	will see the new offload handlers (until the next received packet).
472  */
473 void dev_add_offload(struct packet_offload *po)
474 {
475 	struct packet_offload *elem;
476 
477 	spin_lock(&offload_lock);
478 	list_for_each_entry(elem, &offload_base, list) {
479 		if (po->priority < elem->priority)
480 			break;
481 	}
482 	list_add_rcu(&po->list, elem->list.prev);
483 	spin_unlock(&offload_lock);
484 }
485 EXPORT_SYMBOL(dev_add_offload);
486 
487 /**
488  *	__dev_remove_offload	 - remove offload handler
489  *	@po: packet offload declaration
490  *
491  *	Remove a protocol offload handler that was previously added to the
492  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
493  *	is removed from the kernel lists and can be freed or reused once this
494  *	function returns.
495  *
496  *      The packet type might still be in use by receivers
497  *	and must not be freed until after all the CPU's have gone
498  *	through a quiescent state.
499  */
500 static void __dev_remove_offload(struct packet_offload *po)
501 {
502 	struct list_head *head = &offload_base;
503 	struct packet_offload *po1;
504 
505 	spin_lock(&offload_lock);
506 
507 	list_for_each_entry(po1, head, list) {
508 		if (po == po1) {
509 			list_del_rcu(&po->list);
510 			goto out;
511 		}
512 	}
513 
514 	pr_warn("dev_remove_offload: %p not found\n", po);
515 out:
516 	spin_unlock(&offload_lock);
517 }
518 
519 /**
520  *	dev_remove_offload	 - remove packet offload handler
521  *	@po: packet offload declaration
522  *
523  *	Remove a packet offload handler that was previously added to the kernel
524  *	offload handlers by dev_add_offload(). The passed &offload_type is
525  *	removed from the kernel lists and can be freed or reused once this
526  *	function returns.
527  *
528  *	This call sleeps to guarantee that no CPU is looking at the packet
529  *	type after return.
530  */
531 void dev_remove_offload(struct packet_offload *po)
532 {
533 	__dev_remove_offload(po);
534 
535 	synchronize_net();
536 }
537 EXPORT_SYMBOL(dev_remove_offload);
538 
539 /******************************************************************************
540 
541 		      Device Boot-time Settings Routines
542 
543 *******************************************************************************/
544 
545 /* Boot time configuration table */
546 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
547 
548 /**
549  *	netdev_boot_setup_add	- add new setup entry
550  *	@name: name of the device
551  *	@map: configured settings for the device
552  *
553  *	Adds new setup entry to the dev_boot_setup list.  The function
554  *	returns 0 on error and 1 on success.  This is a generic routine to
555  *	all netdevices.
556  */
557 static int netdev_boot_setup_add(char *name, struct ifmap *map)
558 {
559 	struct netdev_boot_setup *s;
560 	int i;
561 
562 	s = dev_boot_setup;
563 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
564 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
565 			memset(s[i].name, 0, sizeof(s[i].name));
566 			strlcpy(s[i].name, name, IFNAMSIZ);
567 			memcpy(&s[i].map, map, sizeof(s[i].map));
568 			break;
569 		}
570 	}
571 
572 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
573 }
574 
575 /**
576  *	netdev_boot_setup_check	- check boot time settings
577  *	@dev: the netdevice
578  *
579  * 	Check boot time settings for the device.
580  *	The found settings are set for the device to be used
581  *	later in the device probing.
582  *	Returns 0 if no settings found, 1 if they are.
583  */
584 int netdev_boot_setup_check(struct net_device *dev)
585 {
586 	struct netdev_boot_setup *s = dev_boot_setup;
587 	int i;
588 
589 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
590 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
591 		    !strcmp(dev->name, s[i].name)) {
592 			dev->irq 	= s[i].map.irq;
593 			dev->base_addr 	= s[i].map.base_addr;
594 			dev->mem_start 	= s[i].map.mem_start;
595 			dev->mem_end 	= s[i].map.mem_end;
596 			return 1;
597 		}
598 	}
599 	return 0;
600 }
601 EXPORT_SYMBOL(netdev_boot_setup_check);
602 
603 
604 /**
605  *	netdev_boot_base	- get address from boot time settings
606  *	@prefix: prefix for network device
607  *	@unit: id for network device
608  *
609  * 	Check boot time settings for the base address of device.
610  *	The found settings are set for the device to be used
611  *	later in the device probing.
612  *	Returns 0 if no settings found.
613  */
614 unsigned long netdev_boot_base(const char *prefix, int unit)
615 {
616 	const struct netdev_boot_setup *s = dev_boot_setup;
617 	char name[IFNAMSIZ];
618 	int i;
619 
620 	sprintf(name, "%s%d", prefix, unit);
621 
622 	/*
623 	 * If device already registered then return base of 1
624 	 * to indicate not to probe for this interface
625 	 */
626 	if (__dev_get_by_name(&init_net, name))
627 		return 1;
628 
629 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
630 		if (!strcmp(name, s[i].name))
631 			return s[i].map.base_addr;
632 	return 0;
633 }
634 
635 /*
636  * Saves at boot time configured settings for any netdevice.
637  */
638 int __init netdev_boot_setup(char *str)
639 {
640 	int ints[5];
641 	struct ifmap map;
642 
643 	str = get_options(str, ARRAY_SIZE(ints), ints);
644 	if (!str || !*str)
645 		return 0;
646 
647 	/* Save settings */
648 	memset(&map, 0, sizeof(map));
649 	if (ints[0] > 0)
650 		map.irq = ints[1];
651 	if (ints[0] > 1)
652 		map.base_addr = ints[2];
653 	if (ints[0] > 2)
654 		map.mem_start = ints[3];
655 	if (ints[0] > 3)
656 		map.mem_end = ints[4];
657 
658 	/* Add new entry to the list */
659 	return netdev_boot_setup_add(str, &map);
660 }
661 
662 __setup("netdev=", netdev_boot_setup);
663 
664 /*******************************************************************************
665 
666 			    Device Interface Subroutines
667 
668 *******************************************************************************/
669 
670 /**
671  *	dev_get_iflink	- get 'iflink' value of a interface
672  *	@dev: targeted interface
673  *
674  *	Indicates the ifindex the interface is linked to.
675  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
676  */
677 
678 int dev_get_iflink(const struct net_device *dev)
679 {
680 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
681 		return dev->netdev_ops->ndo_get_iflink(dev);
682 
683 	return dev->ifindex;
684 }
685 EXPORT_SYMBOL(dev_get_iflink);
686 
687 /**
688  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
689  *	@dev: targeted interface
690  *	@skb: The packet.
691  *
692  *	For better visibility of tunnel traffic OVS needs to retrieve
693  *	egress tunnel information for a packet. Following API allows
694  *	user to get this info.
695  */
696 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
697 {
698 	struct ip_tunnel_info *info;
699 
700 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
701 		return -EINVAL;
702 
703 	info = skb_tunnel_info_unclone(skb);
704 	if (!info)
705 		return -ENOMEM;
706 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
707 		return -EINVAL;
708 
709 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
710 }
711 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
712 
713 /**
714  *	__dev_get_by_name	- find a device by its name
715  *	@net: the applicable net namespace
716  *	@name: name to find
717  *
718  *	Find an interface by name. Must be called under RTNL semaphore
719  *	or @dev_base_lock. If the name is found a pointer to the device
720  *	is returned. If the name is not found then %NULL is returned. The
721  *	reference counters are not incremented so the caller must be
722  *	careful with locks.
723  */
724 
725 struct net_device *__dev_get_by_name(struct net *net, const char *name)
726 {
727 	struct net_device *dev;
728 	struct hlist_head *head = dev_name_hash(net, name);
729 
730 	hlist_for_each_entry(dev, head, name_hlist)
731 		if (!strncmp(dev->name, name, IFNAMSIZ))
732 			return dev;
733 
734 	return NULL;
735 }
736 EXPORT_SYMBOL(__dev_get_by_name);
737 
738 /**
739  *	dev_get_by_name_rcu	- find a device by its name
740  *	@net: the applicable net namespace
741  *	@name: name to find
742  *
743  *	Find an interface by name.
744  *	If the name is found a pointer to the device is returned.
745  * 	If the name is not found then %NULL is returned.
746  *	The reference counters are not incremented so the caller must be
747  *	careful with locks. The caller must hold RCU lock.
748  */
749 
750 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
751 {
752 	struct net_device *dev;
753 	struct hlist_head *head = dev_name_hash(net, name);
754 
755 	hlist_for_each_entry_rcu(dev, head, name_hlist)
756 		if (!strncmp(dev->name, name, IFNAMSIZ))
757 			return dev;
758 
759 	return NULL;
760 }
761 EXPORT_SYMBOL(dev_get_by_name_rcu);
762 
763 /**
764  *	dev_get_by_name		- find a device by its name
765  *	@net: the applicable net namespace
766  *	@name: name to find
767  *
768  *	Find an interface by name. This can be called from any
769  *	context and does its own locking. The returned handle has
770  *	the usage count incremented and the caller must use dev_put() to
771  *	release it when it is no longer needed. %NULL is returned if no
772  *	matching device is found.
773  */
774 
775 struct net_device *dev_get_by_name(struct net *net, const char *name)
776 {
777 	struct net_device *dev;
778 
779 	rcu_read_lock();
780 	dev = dev_get_by_name_rcu(net, name);
781 	if (dev)
782 		dev_hold(dev);
783 	rcu_read_unlock();
784 	return dev;
785 }
786 EXPORT_SYMBOL(dev_get_by_name);
787 
788 /**
789  *	__dev_get_by_index - find a device by its ifindex
790  *	@net: the applicable net namespace
791  *	@ifindex: index of device
792  *
793  *	Search for an interface by index. Returns %NULL if the device
794  *	is not found or a pointer to the device. The device has not
795  *	had its reference counter increased so the caller must be careful
796  *	about locking. The caller must hold either the RTNL semaphore
797  *	or @dev_base_lock.
798  */
799 
800 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
801 {
802 	struct net_device *dev;
803 	struct hlist_head *head = dev_index_hash(net, ifindex);
804 
805 	hlist_for_each_entry(dev, head, index_hlist)
806 		if (dev->ifindex == ifindex)
807 			return dev;
808 
809 	return NULL;
810 }
811 EXPORT_SYMBOL(__dev_get_by_index);
812 
813 /**
814  *	dev_get_by_index_rcu - find a device by its ifindex
815  *	@net: the applicable net namespace
816  *	@ifindex: index of device
817  *
818  *	Search for an interface by index. Returns %NULL if the device
819  *	is not found or a pointer to the device. The device has not
820  *	had its reference counter increased so the caller must be careful
821  *	about locking. The caller must hold RCU lock.
822  */
823 
824 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
825 {
826 	struct net_device *dev;
827 	struct hlist_head *head = dev_index_hash(net, ifindex);
828 
829 	hlist_for_each_entry_rcu(dev, head, index_hlist)
830 		if (dev->ifindex == ifindex)
831 			return dev;
832 
833 	return NULL;
834 }
835 EXPORT_SYMBOL(dev_get_by_index_rcu);
836 
837 
838 /**
839  *	dev_get_by_index - find a device by its ifindex
840  *	@net: the applicable net namespace
841  *	@ifindex: index of device
842  *
843  *	Search for an interface by index. Returns NULL if the device
844  *	is not found or a pointer to the device. The device returned has
845  *	had a reference added and the pointer is safe until the user calls
846  *	dev_put to indicate they have finished with it.
847  */
848 
849 struct net_device *dev_get_by_index(struct net *net, int ifindex)
850 {
851 	struct net_device *dev;
852 
853 	rcu_read_lock();
854 	dev = dev_get_by_index_rcu(net, ifindex);
855 	if (dev)
856 		dev_hold(dev);
857 	rcu_read_unlock();
858 	return dev;
859 }
860 EXPORT_SYMBOL(dev_get_by_index);
861 
862 /**
863  *	netdev_get_name - get a netdevice name, knowing its ifindex.
864  *	@net: network namespace
865  *	@name: a pointer to the buffer where the name will be stored.
866  *	@ifindex: the ifindex of the interface to get the name from.
867  *
868  *	The use of raw_seqcount_begin() and cond_resched() before
869  *	retrying is required as we want to give the writers a chance
870  *	to complete when CONFIG_PREEMPT is not set.
871  */
872 int netdev_get_name(struct net *net, char *name, int ifindex)
873 {
874 	struct net_device *dev;
875 	unsigned int seq;
876 
877 retry:
878 	seq = raw_seqcount_begin(&devnet_rename_seq);
879 	rcu_read_lock();
880 	dev = dev_get_by_index_rcu(net, ifindex);
881 	if (!dev) {
882 		rcu_read_unlock();
883 		return -ENODEV;
884 	}
885 
886 	strcpy(name, dev->name);
887 	rcu_read_unlock();
888 	if (read_seqcount_retry(&devnet_rename_seq, seq)) {
889 		cond_resched();
890 		goto retry;
891 	}
892 
893 	return 0;
894 }
895 
896 /**
897  *	dev_getbyhwaddr_rcu - find a device by its hardware address
898  *	@net: the applicable net namespace
899  *	@type: media type of device
900  *	@ha: hardware address
901  *
902  *	Search for an interface by MAC address. Returns NULL if the device
903  *	is not found or a pointer to the device.
904  *	The caller must hold RCU or RTNL.
905  *	The returned device has not had its ref count increased
906  *	and the caller must therefore be careful about locking
907  *
908  */
909 
910 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
911 				       const char *ha)
912 {
913 	struct net_device *dev;
914 
915 	for_each_netdev_rcu(net, dev)
916 		if (dev->type == type &&
917 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
918 			return dev;
919 
920 	return NULL;
921 }
922 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
923 
924 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
925 {
926 	struct net_device *dev;
927 
928 	ASSERT_RTNL();
929 	for_each_netdev(net, dev)
930 		if (dev->type == type)
931 			return dev;
932 
933 	return NULL;
934 }
935 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
936 
937 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
938 {
939 	struct net_device *dev, *ret = NULL;
940 
941 	rcu_read_lock();
942 	for_each_netdev_rcu(net, dev)
943 		if (dev->type == type) {
944 			dev_hold(dev);
945 			ret = dev;
946 			break;
947 		}
948 	rcu_read_unlock();
949 	return ret;
950 }
951 EXPORT_SYMBOL(dev_getfirstbyhwtype);
952 
953 /**
954  *	__dev_get_by_flags - find any device with given flags
955  *	@net: the applicable net namespace
956  *	@if_flags: IFF_* values
957  *	@mask: bitmask of bits in if_flags to check
958  *
959  *	Search for any interface with the given flags. Returns NULL if a device
960  *	is not found or a pointer to the device. Must be called inside
961  *	rtnl_lock(), and result refcount is unchanged.
962  */
963 
964 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
965 				      unsigned short mask)
966 {
967 	struct net_device *dev, *ret;
968 
969 	ASSERT_RTNL();
970 
971 	ret = NULL;
972 	for_each_netdev(net, dev) {
973 		if (((dev->flags ^ if_flags) & mask) == 0) {
974 			ret = dev;
975 			break;
976 		}
977 	}
978 	return ret;
979 }
980 EXPORT_SYMBOL(__dev_get_by_flags);
981 
982 /**
983  *	dev_valid_name - check if name is okay for network device
984  *	@name: name string
985  *
986  *	Network device names need to be valid file names to
987  *	to allow sysfs to work.  We also disallow any kind of
988  *	whitespace.
989  */
990 bool dev_valid_name(const char *name)
991 {
992 	if (*name == '\0')
993 		return false;
994 	if (strlen(name) >= IFNAMSIZ)
995 		return false;
996 	if (!strcmp(name, ".") || !strcmp(name, ".."))
997 		return false;
998 
999 	while (*name) {
1000 		if (*name == '/' || *name == ':' || isspace(*name))
1001 			return false;
1002 		name++;
1003 	}
1004 	return true;
1005 }
1006 EXPORT_SYMBOL(dev_valid_name);
1007 
1008 /**
1009  *	__dev_alloc_name - allocate a name for a device
1010  *	@net: network namespace to allocate the device name in
1011  *	@name: name format string
1012  *	@buf:  scratch buffer and result name string
1013  *
1014  *	Passed a format string - eg "lt%d" it will try and find a suitable
1015  *	id. It scans list of devices to build up a free map, then chooses
1016  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1017  *	while allocating the name and adding the device in order to avoid
1018  *	duplicates.
1019  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1020  *	Returns the number of the unit assigned or a negative errno code.
1021  */
1022 
1023 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1024 {
1025 	int i = 0;
1026 	const char *p;
1027 	const int max_netdevices = 8*PAGE_SIZE;
1028 	unsigned long *inuse;
1029 	struct net_device *d;
1030 
1031 	p = strnchr(name, IFNAMSIZ-1, '%');
1032 	if (p) {
1033 		/*
1034 		 * Verify the string as this thing may have come from
1035 		 * the user.  There must be either one "%d" and no other "%"
1036 		 * characters.
1037 		 */
1038 		if (p[1] != 'd' || strchr(p + 2, '%'))
1039 			return -EINVAL;
1040 
1041 		/* Use one page as a bit array of possible slots */
1042 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1043 		if (!inuse)
1044 			return -ENOMEM;
1045 
1046 		for_each_netdev(net, d) {
1047 			if (!sscanf(d->name, name, &i))
1048 				continue;
1049 			if (i < 0 || i >= max_netdevices)
1050 				continue;
1051 
1052 			/*  avoid cases where sscanf is not exact inverse of printf */
1053 			snprintf(buf, IFNAMSIZ, name, i);
1054 			if (!strncmp(buf, d->name, IFNAMSIZ))
1055 				set_bit(i, inuse);
1056 		}
1057 
1058 		i = find_first_zero_bit(inuse, max_netdevices);
1059 		free_page((unsigned long) inuse);
1060 	}
1061 
1062 	if (buf != name)
1063 		snprintf(buf, IFNAMSIZ, name, i);
1064 	if (!__dev_get_by_name(net, buf))
1065 		return i;
1066 
1067 	/* It is possible to run out of possible slots
1068 	 * when the name is long and there isn't enough space left
1069 	 * for the digits, or if all bits are used.
1070 	 */
1071 	return -ENFILE;
1072 }
1073 
1074 /**
1075  *	dev_alloc_name - allocate a name for a device
1076  *	@dev: device
1077  *	@name: name format string
1078  *
1079  *	Passed a format string - eg "lt%d" it will try and find a suitable
1080  *	id. It scans list of devices to build up a free map, then chooses
1081  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1082  *	while allocating the name and adding the device in order to avoid
1083  *	duplicates.
1084  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1085  *	Returns the number of the unit assigned or a negative errno code.
1086  */
1087 
1088 int dev_alloc_name(struct net_device *dev, const char *name)
1089 {
1090 	char buf[IFNAMSIZ];
1091 	struct net *net;
1092 	int ret;
1093 
1094 	BUG_ON(!dev_net(dev));
1095 	net = dev_net(dev);
1096 	ret = __dev_alloc_name(net, name, buf);
1097 	if (ret >= 0)
1098 		strlcpy(dev->name, buf, IFNAMSIZ);
1099 	return ret;
1100 }
1101 EXPORT_SYMBOL(dev_alloc_name);
1102 
1103 static int dev_alloc_name_ns(struct net *net,
1104 			     struct net_device *dev,
1105 			     const char *name)
1106 {
1107 	char buf[IFNAMSIZ];
1108 	int ret;
1109 
1110 	ret = __dev_alloc_name(net, name, buf);
1111 	if (ret >= 0)
1112 		strlcpy(dev->name, buf, IFNAMSIZ);
1113 	return ret;
1114 }
1115 
1116 static int dev_get_valid_name(struct net *net,
1117 			      struct net_device *dev,
1118 			      const char *name)
1119 {
1120 	BUG_ON(!net);
1121 
1122 	if (!dev_valid_name(name))
1123 		return -EINVAL;
1124 
1125 	if (strchr(name, '%'))
1126 		return dev_alloc_name_ns(net, dev, name);
1127 	else if (__dev_get_by_name(net, name))
1128 		return -EEXIST;
1129 	else if (dev->name != name)
1130 		strlcpy(dev->name, name, IFNAMSIZ);
1131 
1132 	return 0;
1133 }
1134 
1135 /**
1136  *	dev_change_name - change name of a device
1137  *	@dev: device
1138  *	@newname: name (or format string) must be at least IFNAMSIZ
1139  *
1140  *	Change name of a device, can pass format strings "eth%d".
1141  *	for wildcarding.
1142  */
1143 int dev_change_name(struct net_device *dev, const char *newname)
1144 {
1145 	unsigned char old_assign_type;
1146 	char oldname[IFNAMSIZ];
1147 	int err = 0;
1148 	int ret;
1149 	struct net *net;
1150 
1151 	ASSERT_RTNL();
1152 	BUG_ON(!dev_net(dev));
1153 
1154 	net = dev_net(dev);
1155 	if (dev->flags & IFF_UP)
1156 		return -EBUSY;
1157 
1158 	write_seqcount_begin(&devnet_rename_seq);
1159 
1160 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1161 		write_seqcount_end(&devnet_rename_seq);
1162 		return 0;
1163 	}
1164 
1165 	memcpy(oldname, dev->name, IFNAMSIZ);
1166 
1167 	err = dev_get_valid_name(net, dev, newname);
1168 	if (err < 0) {
1169 		write_seqcount_end(&devnet_rename_seq);
1170 		return err;
1171 	}
1172 
1173 	if (oldname[0] && !strchr(oldname, '%'))
1174 		netdev_info(dev, "renamed from %s\n", oldname);
1175 
1176 	old_assign_type = dev->name_assign_type;
1177 	dev->name_assign_type = NET_NAME_RENAMED;
1178 
1179 rollback:
1180 	ret = device_rename(&dev->dev, dev->name);
1181 	if (ret) {
1182 		memcpy(dev->name, oldname, IFNAMSIZ);
1183 		dev->name_assign_type = old_assign_type;
1184 		write_seqcount_end(&devnet_rename_seq);
1185 		return ret;
1186 	}
1187 
1188 	write_seqcount_end(&devnet_rename_seq);
1189 
1190 	netdev_adjacent_rename_links(dev, oldname);
1191 
1192 	write_lock_bh(&dev_base_lock);
1193 	hlist_del_rcu(&dev->name_hlist);
1194 	write_unlock_bh(&dev_base_lock);
1195 
1196 	synchronize_rcu();
1197 
1198 	write_lock_bh(&dev_base_lock);
1199 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1200 	write_unlock_bh(&dev_base_lock);
1201 
1202 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1203 	ret = notifier_to_errno(ret);
1204 
1205 	if (ret) {
1206 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1207 		if (err >= 0) {
1208 			err = ret;
1209 			write_seqcount_begin(&devnet_rename_seq);
1210 			memcpy(dev->name, oldname, IFNAMSIZ);
1211 			memcpy(oldname, newname, IFNAMSIZ);
1212 			dev->name_assign_type = old_assign_type;
1213 			old_assign_type = NET_NAME_RENAMED;
1214 			goto rollback;
1215 		} else {
1216 			pr_err("%s: name change rollback failed: %d\n",
1217 			       dev->name, ret);
1218 		}
1219 	}
1220 
1221 	return err;
1222 }
1223 
1224 /**
1225  *	dev_set_alias - change ifalias of a device
1226  *	@dev: device
1227  *	@alias: name up to IFALIASZ
1228  *	@len: limit of bytes to copy from info
1229  *
1230  *	Set ifalias for a device,
1231  */
1232 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1233 {
1234 	char *new_ifalias;
1235 
1236 	ASSERT_RTNL();
1237 
1238 	if (len >= IFALIASZ)
1239 		return -EINVAL;
1240 
1241 	if (!len) {
1242 		kfree(dev->ifalias);
1243 		dev->ifalias = NULL;
1244 		return 0;
1245 	}
1246 
1247 	new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1248 	if (!new_ifalias)
1249 		return -ENOMEM;
1250 	dev->ifalias = new_ifalias;
1251 
1252 	strlcpy(dev->ifalias, alias, len+1);
1253 	return len;
1254 }
1255 
1256 
1257 /**
1258  *	netdev_features_change - device changes features
1259  *	@dev: device to cause notification
1260  *
1261  *	Called to indicate a device has changed features.
1262  */
1263 void netdev_features_change(struct net_device *dev)
1264 {
1265 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1266 }
1267 EXPORT_SYMBOL(netdev_features_change);
1268 
1269 /**
1270  *	netdev_state_change - device changes state
1271  *	@dev: device to cause notification
1272  *
1273  *	Called to indicate a device has changed state. This function calls
1274  *	the notifier chains for netdev_chain and sends a NEWLINK message
1275  *	to the routing socket.
1276  */
1277 void netdev_state_change(struct net_device *dev)
1278 {
1279 	if (dev->flags & IFF_UP) {
1280 		struct netdev_notifier_change_info change_info;
1281 
1282 		change_info.flags_changed = 0;
1283 		call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1284 					      &change_info.info);
1285 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1286 	}
1287 }
1288 EXPORT_SYMBOL(netdev_state_change);
1289 
1290 /**
1291  * 	netdev_notify_peers - notify network peers about existence of @dev
1292  * 	@dev: network device
1293  *
1294  * Generate traffic such that interested network peers are aware of
1295  * @dev, such as by generating a gratuitous ARP. This may be used when
1296  * a device wants to inform the rest of the network about some sort of
1297  * reconfiguration such as a failover event or virtual machine
1298  * migration.
1299  */
1300 void netdev_notify_peers(struct net_device *dev)
1301 {
1302 	rtnl_lock();
1303 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1304 	rtnl_unlock();
1305 }
1306 EXPORT_SYMBOL(netdev_notify_peers);
1307 
1308 static int __dev_open(struct net_device *dev)
1309 {
1310 	const struct net_device_ops *ops = dev->netdev_ops;
1311 	int ret;
1312 
1313 	ASSERT_RTNL();
1314 
1315 	if (!netif_device_present(dev))
1316 		return -ENODEV;
1317 
1318 	/* Block netpoll from trying to do any rx path servicing.
1319 	 * If we don't do this there is a chance ndo_poll_controller
1320 	 * or ndo_poll may be running while we open the device
1321 	 */
1322 	netpoll_poll_disable(dev);
1323 
1324 	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1325 	ret = notifier_to_errno(ret);
1326 	if (ret)
1327 		return ret;
1328 
1329 	set_bit(__LINK_STATE_START, &dev->state);
1330 
1331 	if (ops->ndo_validate_addr)
1332 		ret = ops->ndo_validate_addr(dev);
1333 
1334 	if (!ret && ops->ndo_open)
1335 		ret = ops->ndo_open(dev);
1336 
1337 	netpoll_poll_enable(dev);
1338 
1339 	if (ret)
1340 		clear_bit(__LINK_STATE_START, &dev->state);
1341 	else {
1342 		dev->flags |= IFF_UP;
1343 		dev_set_rx_mode(dev);
1344 		dev_activate(dev);
1345 		add_device_randomness(dev->dev_addr, dev->addr_len);
1346 	}
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  *	dev_open	- prepare an interface for use.
1353  *	@dev:	device to open
1354  *
1355  *	Takes a device from down to up state. The device's private open
1356  *	function is invoked and then the multicast lists are loaded. Finally
1357  *	the device is moved into the up state and a %NETDEV_UP message is
1358  *	sent to the netdev notifier chain.
1359  *
1360  *	Calling this function on an active interface is a nop. On a failure
1361  *	a negative errno code is returned.
1362  */
1363 int dev_open(struct net_device *dev)
1364 {
1365 	int ret;
1366 
1367 	if (dev->flags & IFF_UP)
1368 		return 0;
1369 
1370 	ret = __dev_open(dev);
1371 	if (ret < 0)
1372 		return ret;
1373 
1374 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1375 	call_netdevice_notifiers(NETDEV_UP, dev);
1376 
1377 	return ret;
1378 }
1379 EXPORT_SYMBOL(dev_open);
1380 
1381 static int __dev_close_many(struct list_head *head)
1382 {
1383 	struct net_device *dev;
1384 
1385 	ASSERT_RTNL();
1386 	might_sleep();
1387 
1388 	list_for_each_entry(dev, head, close_list) {
1389 		/* Temporarily disable netpoll until the interface is down */
1390 		netpoll_poll_disable(dev);
1391 
1392 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1393 
1394 		clear_bit(__LINK_STATE_START, &dev->state);
1395 
1396 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1397 		 * can be even on different cpu. So just clear netif_running().
1398 		 *
1399 		 * dev->stop() will invoke napi_disable() on all of it's
1400 		 * napi_struct instances on this device.
1401 		 */
1402 		smp_mb__after_atomic(); /* Commit netif_running(). */
1403 	}
1404 
1405 	dev_deactivate_many(head);
1406 
1407 	list_for_each_entry(dev, head, close_list) {
1408 		const struct net_device_ops *ops = dev->netdev_ops;
1409 
1410 		/*
1411 		 *	Call the device specific close. This cannot fail.
1412 		 *	Only if device is UP
1413 		 *
1414 		 *	We allow it to be called even after a DETACH hot-plug
1415 		 *	event.
1416 		 */
1417 		if (ops->ndo_stop)
1418 			ops->ndo_stop(dev);
1419 
1420 		dev->flags &= ~IFF_UP;
1421 		netpoll_poll_enable(dev);
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 static int __dev_close(struct net_device *dev)
1428 {
1429 	int retval;
1430 	LIST_HEAD(single);
1431 
1432 	list_add(&dev->close_list, &single);
1433 	retval = __dev_close_many(&single);
1434 	list_del(&single);
1435 
1436 	return retval;
1437 }
1438 
1439 int dev_close_many(struct list_head *head, bool unlink)
1440 {
1441 	struct net_device *dev, *tmp;
1442 
1443 	/* Remove the devices that don't need to be closed */
1444 	list_for_each_entry_safe(dev, tmp, head, close_list)
1445 		if (!(dev->flags & IFF_UP))
1446 			list_del_init(&dev->close_list);
1447 
1448 	__dev_close_many(head);
1449 
1450 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1451 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1452 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1453 		if (unlink)
1454 			list_del_init(&dev->close_list);
1455 	}
1456 
1457 	return 0;
1458 }
1459 EXPORT_SYMBOL(dev_close_many);
1460 
1461 /**
1462  *	dev_close - shutdown an interface.
1463  *	@dev: device to shutdown
1464  *
1465  *	This function moves an active device into down state. A
1466  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1467  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1468  *	chain.
1469  */
1470 int dev_close(struct net_device *dev)
1471 {
1472 	if (dev->flags & IFF_UP) {
1473 		LIST_HEAD(single);
1474 
1475 		list_add(&dev->close_list, &single);
1476 		dev_close_many(&single, true);
1477 		list_del(&single);
1478 	}
1479 	return 0;
1480 }
1481 EXPORT_SYMBOL(dev_close);
1482 
1483 
1484 /**
1485  *	dev_disable_lro - disable Large Receive Offload on a device
1486  *	@dev: device
1487  *
1488  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1489  *	called under RTNL.  This is needed if received packets may be
1490  *	forwarded to another interface.
1491  */
1492 void dev_disable_lro(struct net_device *dev)
1493 {
1494 	struct net_device *lower_dev;
1495 	struct list_head *iter;
1496 
1497 	dev->wanted_features &= ~NETIF_F_LRO;
1498 	netdev_update_features(dev);
1499 
1500 	if (unlikely(dev->features & NETIF_F_LRO))
1501 		netdev_WARN(dev, "failed to disable LRO!\n");
1502 
1503 	netdev_for_each_lower_dev(dev, lower_dev, iter)
1504 		dev_disable_lro(lower_dev);
1505 }
1506 EXPORT_SYMBOL(dev_disable_lro);
1507 
1508 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1509 				   struct net_device *dev)
1510 {
1511 	struct netdev_notifier_info info;
1512 
1513 	netdev_notifier_info_init(&info, dev);
1514 	return nb->notifier_call(nb, val, &info);
1515 }
1516 
1517 static int dev_boot_phase = 1;
1518 
1519 /**
1520  *	register_netdevice_notifier - register a network notifier block
1521  *	@nb: notifier
1522  *
1523  *	Register a notifier to be called when network device events occur.
1524  *	The notifier passed is linked into the kernel structures and must
1525  *	not be reused until it has been unregistered. A negative errno code
1526  *	is returned on a failure.
1527  *
1528  * 	When registered all registration and up events are replayed
1529  *	to the new notifier to allow device to have a race free
1530  *	view of the network device list.
1531  */
1532 
1533 int register_netdevice_notifier(struct notifier_block *nb)
1534 {
1535 	struct net_device *dev;
1536 	struct net_device *last;
1537 	struct net *net;
1538 	int err;
1539 
1540 	rtnl_lock();
1541 	err = raw_notifier_chain_register(&netdev_chain, nb);
1542 	if (err)
1543 		goto unlock;
1544 	if (dev_boot_phase)
1545 		goto unlock;
1546 	for_each_net(net) {
1547 		for_each_netdev(net, dev) {
1548 			err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1549 			err = notifier_to_errno(err);
1550 			if (err)
1551 				goto rollback;
1552 
1553 			if (!(dev->flags & IFF_UP))
1554 				continue;
1555 
1556 			call_netdevice_notifier(nb, NETDEV_UP, dev);
1557 		}
1558 	}
1559 
1560 unlock:
1561 	rtnl_unlock();
1562 	return err;
1563 
1564 rollback:
1565 	last = dev;
1566 	for_each_net(net) {
1567 		for_each_netdev(net, dev) {
1568 			if (dev == last)
1569 				goto outroll;
1570 
1571 			if (dev->flags & IFF_UP) {
1572 				call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1573 							dev);
1574 				call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1575 			}
1576 			call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1577 		}
1578 	}
1579 
1580 outroll:
1581 	raw_notifier_chain_unregister(&netdev_chain, nb);
1582 	goto unlock;
1583 }
1584 EXPORT_SYMBOL(register_netdevice_notifier);
1585 
1586 /**
1587  *	unregister_netdevice_notifier - unregister a network notifier block
1588  *	@nb: notifier
1589  *
1590  *	Unregister a notifier previously registered by
1591  *	register_netdevice_notifier(). The notifier is unlinked into the
1592  *	kernel structures and may then be reused. A negative errno code
1593  *	is returned on a failure.
1594  *
1595  * 	After unregistering unregister and down device events are synthesized
1596  *	for all devices on the device list to the removed notifier to remove
1597  *	the need for special case cleanup code.
1598  */
1599 
1600 int unregister_netdevice_notifier(struct notifier_block *nb)
1601 {
1602 	struct net_device *dev;
1603 	struct net *net;
1604 	int err;
1605 
1606 	rtnl_lock();
1607 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1608 	if (err)
1609 		goto unlock;
1610 
1611 	for_each_net(net) {
1612 		for_each_netdev(net, dev) {
1613 			if (dev->flags & IFF_UP) {
1614 				call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1615 							dev);
1616 				call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1617 			}
1618 			call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1619 		}
1620 	}
1621 unlock:
1622 	rtnl_unlock();
1623 	return err;
1624 }
1625 EXPORT_SYMBOL(unregister_netdevice_notifier);
1626 
1627 /**
1628  *	call_netdevice_notifiers_info - call all network notifier blocks
1629  *	@val: value passed unmodified to notifier function
1630  *	@dev: net_device pointer passed unmodified to notifier function
1631  *	@info: notifier information data
1632  *
1633  *	Call all network notifier blocks.  Parameters and return value
1634  *	are as for raw_notifier_call_chain().
1635  */
1636 
1637 static int call_netdevice_notifiers_info(unsigned long val,
1638 					 struct net_device *dev,
1639 					 struct netdev_notifier_info *info)
1640 {
1641 	ASSERT_RTNL();
1642 	netdev_notifier_info_init(info, dev);
1643 	return raw_notifier_call_chain(&netdev_chain, val, info);
1644 }
1645 
1646 /**
1647  *	call_netdevice_notifiers - call all network notifier blocks
1648  *      @val: value passed unmodified to notifier function
1649  *      @dev: net_device pointer passed unmodified to notifier function
1650  *
1651  *	Call all network notifier blocks.  Parameters and return value
1652  *	are as for raw_notifier_call_chain().
1653  */
1654 
1655 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1656 {
1657 	struct netdev_notifier_info info;
1658 
1659 	return call_netdevice_notifiers_info(val, dev, &info);
1660 }
1661 EXPORT_SYMBOL(call_netdevice_notifiers);
1662 
1663 #ifdef CONFIG_NET_INGRESS
1664 static struct static_key ingress_needed __read_mostly;
1665 
1666 void net_inc_ingress_queue(void)
1667 {
1668 	static_key_slow_inc(&ingress_needed);
1669 }
1670 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1671 
1672 void net_dec_ingress_queue(void)
1673 {
1674 	static_key_slow_dec(&ingress_needed);
1675 }
1676 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1677 #endif
1678 
1679 #ifdef CONFIG_NET_EGRESS
1680 static struct static_key egress_needed __read_mostly;
1681 
1682 void net_inc_egress_queue(void)
1683 {
1684 	static_key_slow_inc(&egress_needed);
1685 }
1686 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1687 
1688 void net_dec_egress_queue(void)
1689 {
1690 	static_key_slow_dec(&egress_needed);
1691 }
1692 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1693 #endif
1694 
1695 static struct static_key netstamp_needed __read_mostly;
1696 #ifdef HAVE_JUMP_LABEL
1697 /* We are not allowed to call static_key_slow_dec() from irq context
1698  * If net_disable_timestamp() is called from irq context, defer the
1699  * static_key_slow_dec() calls.
1700  */
1701 static atomic_t netstamp_needed_deferred;
1702 #endif
1703 
1704 void net_enable_timestamp(void)
1705 {
1706 #ifdef HAVE_JUMP_LABEL
1707 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1708 
1709 	if (deferred) {
1710 		while (--deferred)
1711 			static_key_slow_dec(&netstamp_needed);
1712 		return;
1713 	}
1714 #endif
1715 	static_key_slow_inc(&netstamp_needed);
1716 }
1717 EXPORT_SYMBOL(net_enable_timestamp);
1718 
1719 void net_disable_timestamp(void)
1720 {
1721 #ifdef HAVE_JUMP_LABEL
1722 	if (in_interrupt()) {
1723 		atomic_inc(&netstamp_needed_deferred);
1724 		return;
1725 	}
1726 #endif
1727 	static_key_slow_dec(&netstamp_needed);
1728 }
1729 EXPORT_SYMBOL(net_disable_timestamp);
1730 
1731 static inline void net_timestamp_set(struct sk_buff *skb)
1732 {
1733 	skb->tstamp.tv64 = 0;
1734 	if (static_key_false(&netstamp_needed))
1735 		__net_timestamp(skb);
1736 }
1737 
1738 #define net_timestamp_check(COND, SKB)			\
1739 	if (static_key_false(&netstamp_needed)) {		\
1740 		if ((COND) && !(SKB)->tstamp.tv64)	\
1741 			__net_timestamp(SKB);		\
1742 	}						\
1743 
1744 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1745 {
1746 	unsigned int len;
1747 
1748 	if (!(dev->flags & IFF_UP))
1749 		return false;
1750 
1751 	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1752 	if (skb->len <= len)
1753 		return true;
1754 
1755 	/* if TSO is enabled, we don't care about the length as the packet
1756 	 * could be forwarded without being segmented before
1757 	 */
1758 	if (skb_is_gso(skb))
1759 		return true;
1760 
1761 	return false;
1762 }
1763 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1764 
1765 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1766 {
1767 	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1768 	    unlikely(!is_skb_forwardable(dev, skb))) {
1769 		atomic_long_inc(&dev->rx_dropped);
1770 		kfree_skb(skb);
1771 		return NET_RX_DROP;
1772 	}
1773 
1774 	skb_scrub_packet(skb, true);
1775 	skb->priority = 0;
1776 	skb->protocol = eth_type_trans(skb, dev);
1777 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1778 
1779 	return 0;
1780 }
1781 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1782 
1783 /**
1784  * dev_forward_skb - loopback an skb to another netif
1785  *
1786  * @dev: destination network device
1787  * @skb: buffer to forward
1788  *
1789  * return values:
1790  *	NET_RX_SUCCESS	(no congestion)
1791  *	NET_RX_DROP     (packet was dropped, but freed)
1792  *
1793  * dev_forward_skb can be used for injecting an skb from the
1794  * start_xmit function of one device into the receive queue
1795  * of another device.
1796  *
1797  * The receiving device may be in another namespace, so
1798  * we have to clear all information in the skb that could
1799  * impact namespace isolation.
1800  */
1801 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1802 {
1803 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1804 }
1805 EXPORT_SYMBOL_GPL(dev_forward_skb);
1806 
1807 static inline int deliver_skb(struct sk_buff *skb,
1808 			      struct packet_type *pt_prev,
1809 			      struct net_device *orig_dev)
1810 {
1811 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1812 		return -ENOMEM;
1813 	atomic_inc(&skb->users);
1814 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1815 }
1816 
1817 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1818 					  struct packet_type **pt,
1819 					  struct net_device *orig_dev,
1820 					  __be16 type,
1821 					  struct list_head *ptype_list)
1822 {
1823 	struct packet_type *ptype, *pt_prev = *pt;
1824 
1825 	list_for_each_entry_rcu(ptype, ptype_list, list) {
1826 		if (ptype->type != type)
1827 			continue;
1828 		if (pt_prev)
1829 			deliver_skb(skb, pt_prev, orig_dev);
1830 		pt_prev = ptype;
1831 	}
1832 	*pt = pt_prev;
1833 }
1834 
1835 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1836 {
1837 	if (!ptype->af_packet_priv || !skb->sk)
1838 		return false;
1839 
1840 	if (ptype->id_match)
1841 		return ptype->id_match(ptype, skb->sk);
1842 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1843 		return true;
1844 
1845 	return false;
1846 }
1847 
1848 /*
1849  *	Support routine. Sends outgoing frames to any network
1850  *	taps currently in use.
1851  */
1852 
1853 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1854 {
1855 	struct packet_type *ptype;
1856 	struct sk_buff *skb2 = NULL;
1857 	struct packet_type *pt_prev = NULL;
1858 	struct list_head *ptype_list = &ptype_all;
1859 
1860 	rcu_read_lock();
1861 again:
1862 	list_for_each_entry_rcu(ptype, ptype_list, list) {
1863 		/* Never send packets back to the socket
1864 		 * they originated from - MvS (miquels@drinkel.ow.org)
1865 		 */
1866 		if (skb_loop_sk(ptype, skb))
1867 			continue;
1868 
1869 		if (pt_prev) {
1870 			deliver_skb(skb2, pt_prev, skb->dev);
1871 			pt_prev = ptype;
1872 			continue;
1873 		}
1874 
1875 		/* need to clone skb, done only once */
1876 		skb2 = skb_clone(skb, GFP_ATOMIC);
1877 		if (!skb2)
1878 			goto out_unlock;
1879 
1880 		net_timestamp_set(skb2);
1881 
1882 		/* skb->nh should be correctly
1883 		 * set by sender, so that the second statement is
1884 		 * just protection against buggy protocols.
1885 		 */
1886 		skb_reset_mac_header(skb2);
1887 
1888 		if (skb_network_header(skb2) < skb2->data ||
1889 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1890 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1891 					     ntohs(skb2->protocol),
1892 					     dev->name);
1893 			skb_reset_network_header(skb2);
1894 		}
1895 
1896 		skb2->transport_header = skb2->network_header;
1897 		skb2->pkt_type = PACKET_OUTGOING;
1898 		pt_prev = ptype;
1899 	}
1900 
1901 	if (ptype_list == &ptype_all) {
1902 		ptype_list = &dev->ptype_all;
1903 		goto again;
1904 	}
1905 out_unlock:
1906 	if (pt_prev)
1907 		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1908 	rcu_read_unlock();
1909 }
1910 
1911 /**
1912  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1913  * @dev: Network device
1914  * @txq: number of queues available
1915  *
1916  * If real_num_tx_queues is changed the tc mappings may no longer be
1917  * valid. To resolve this verify the tc mapping remains valid and if
1918  * not NULL the mapping. With no priorities mapping to this
1919  * offset/count pair it will no longer be used. In the worst case TC0
1920  * is invalid nothing can be done so disable priority mappings. If is
1921  * expected that drivers will fix this mapping if they can before
1922  * calling netif_set_real_num_tx_queues.
1923  */
1924 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1925 {
1926 	int i;
1927 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1928 
1929 	/* If TC0 is invalidated disable TC mapping */
1930 	if (tc->offset + tc->count > txq) {
1931 		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1932 		dev->num_tc = 0;
1933 		return;
1934 	}
1935 
1936 	/* Invalidated prio to tc mappings set to TC0 */
1937 	for (i = 1; i < TC_BITMASK + 1; i++) {
1938 		int q = netdev_get_prio_tc_map(dev, i);
1939 
1940 		tc = &dev->tc_to_txq[q];
1941 		if (tc->offset + tc->count > txq) {
1942 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1943 				i, q);
1944 			netdev_set_prio_tc_map(dev, i, 0);
1945 		}
1946 	}
1947 }
1948 
1949 #ifdef CONFIG_XPS
1950 static DEFINE_MUTEX(xps_map_mutex);
1951 #define xmap_dereference(P)		\
1952 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1953 
1954 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1955 					int cpu, u16 index)
1956 {
1957 	struct xps_map *map = NULL;
1958 	int pos;
1959 
1960 	if (dev_maps)
1961 		map = xmap_dereference(dev_maps->cpu_map[cpu]);
1962 
1963 	for (pos = 0; map && pos < map->len; pos++) {
1964 		if (map->queues[pos] == index) {
1965 			if (map->len > 1) {
1966 				map->queues[pos] = map->queues[--map->len];
1967 			} else {
1968 				RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1969 				kfree_rcu(map, rcu);
1970 				map = NULL;
1971 			}
1972 			break;
1973 		}
1974 	}
1975 
1976 	return map;
1977 }
1978 
1979 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1980 {
1981 	struct xps_dev_maps *dev_maps;
1982 	int cpu, i;
1983 	bool active = false;
1984 
1985 	mutex_lock(&xps_map_mutex);
1986 	dev_maps = xmap_dereference(dev->xps_maps);
1987 
1988 	if (!dev_maps)
1989 		goto out_no_maps;
1990 
1991 	for_each_possible_cpu(cpu) {
1992 		for (i = index; i < dev->num_tx_queues; i++) {
1993 			if (!remove_xps_queue(dev_maps, cpu, i))
1994 				break;
1995 		}
1996 		if (i == dev->num_tx_queues)
1997 			active = true;
1998 	}
1999 
2000 	if (!active) {
2001 		RCU_INIT_POINTER(dev->xps_maps, NULL);
2002 		kfree_rcu(dev_maps, rcu);
2003 	}
2004 
2005 	for (i = index; i < dev->num_tx_queues; i++)
2006 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2007 					     NUMA_NO_NODE);
2008 
2009 out_no_maps:
2010 	mutex_unlock(&xps_map_mutex);
2011 }
2012 
2013 static struct xps_map *expand_xps_map(struct xps_map *map,
2014 				      int cpu, u16 index)
2015 {
2016 	struct xps_map *new_map;
2017 	int alloc_len = XPS_MIN_MAP_ALLOC;
2018 	int i, pos;
2019 
2020 	for (pos = 0; map && pos < map->len; pos++) {
2021 		if (map->queues[pos] != index)
2022 			continue;
2023 		return map;
2024 	}
2025 
2026 	/* Need to add queue to this CPU's existing map */
2027 	if (map) {
2028 		if (pos < map->alloc_len)
2029 			return map;
2030 
2031 		alloc_len = map->alloc_len * 2;
2032 	}
2033 
2034 	/* Need to allocate new map to store queue on this CPU's map */
2035 	new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2036 			       cpu_to_node(cpu));
2037 	if (!new_map)
2038 		return NULL;
2039 
2040 	for (i = 0; i < pos; i++)
2041 		new_map->queues[i] = map->queues[i];
2042 	new_map->alloc_len = alloc_len;
2043 	new_map->len = pos;
2044 
2045 	return new_map;
2046 }
2047 
2048 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2049 			u16 index)
2050 {
2051 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2052 	struct xps_map *map, *new_map;
2053 	int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
2054 	int cpu, numa_node_id = -2;
2055 	bool active = false;
2056 
2057 	mutex_lock(&xps_map_mutex);
2058 
2059 	dev_maps = xmap_dereference(dev->xps_maps);
2060 
2061 	/* allocate memory for queue storage */
2062 	for_each_online_cpu(cpu) {
2063 		if (!cpumask_test_cpu(cpu, mask))
2064 			continue;
2065 
2066 		if (!new_dev_maps)
2067 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2068 		if (!new_dev_maps) {
2069 			mutex_unlock(&xps_map_mutex);
2070 			return -ENOMEM;
2071 		}
2072 
2073 		map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2074 				 NULL;
2075 
2076 		map = expand_xps_map(map, cpu, index);
2077 		if (!map)
2078 			goto error;
2079 
2080 		RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2081 	}
2082 
2083 	if (!new_dev_maps)
2084 		goto out_no_new_maps;
2085 
2086 	for_each_possible_cpu(cpu) {
2087 		if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2088 			/* add queue to CPU maps */
2089 			int pos = 0;
2090 
2091 			map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2092 			while ((pos < map->len) && (map->queues[pos] != index))
2093 				pos++;
2094 
2095 			if (pos == map->len)
2096 				map->queues[map->len++] = index;
2097 #ifdef CONFIG_NUMA
2098 			if (numa_node_id == -2)
2099 				numa_node_id = cpu_to_node(cpu);
2100 			else if (numa_node_id != cpu_to_node(cpu))
2101 				numa_node_id = -1;
2102 #endif
2103 		} else if (dev_maps) {
2104 			/* fill in the new device map from the old device map */
2105 			map = xmap_dereference(dev_maps->cpu_map[cpu]);
2106 			RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2107 		}
2108 
2109 	}
2110 
2111 	rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2112 
2113 	/* Cleanup old maps */
2114 	if (dev_maps) {
2115 		for_each_possible_cpu(cpu) {
2116 			new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2117 			map = xmap_dereference(dev_maps->cpu_map[cpu]);
2118 			if (map && map != new_map)
2119 				kfree_rcu(map, rcu);
2120 		}
2121 
2122 		kfree_rcu(dev_maps, rcu);
2123 	}
2124 
2125 	dev_maps = new_dev_maps;
2126 	active = true;
2127 
2128 out_no_new_maps:
2129 	/* update Tx queue numa node */
2130 	netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2131 				     (numa_node_id >= 0) ? numa_node_id :
2132 				     NUMA_NO_NODE);
2133 
2134 	if (!dev_maps)
2135 		goto out_no_maps;
2136 
2137 	/* removes queue from unused CPUs */
2138 	for_each_possible_cpu(cpu) {
2139 		if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2140 			continue;
2141 
2142 		if (remove_xps_queue(dev_maps, cpu, index))
2143 			active = true;
2144 	}
2145 
2146 	/* free map if not active */
2147 	if (!active) {
2148 		RCU_INIT_POINTER(dev->xps_maps, NULL);
2149 		kfree_rcu(dev_maps, rcu);
2150 	}
2151 
2152 out_no_maps:
2153 	mutex_unlock(&xps_map_mutex);
2154 
2155 	return 0;
2156 error:
2157 	/* remove any maps that we added */
2158 	for_each_possible_cpu(cpu) {
2159 		new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2160 		map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2161 				 NULL;
2162 		if (new_map && new_map != map)
2163 			kfree(new_map);
2164 	}
2165 
2166 	mutex_unlock(&xps_map_mutex);
2167 
2168 	kfree(new_dev_maps);
2169 	return -ENOMEM;
2170 }
2171 EXPORT_SYMBOL(netif_set_xps_queue);
2172 
2173 #endif
2174 /*
2175  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2176  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2177  */
2178 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2179 {
2180 	int rc;
2181 
2182 	if (txq < 1 || txq > dev->num_tx_queues)
2183 		return -EINVAL;
2184 
2185 	if (dev->reg_state == NETREG_REGISTERED ||
2186 	    dev->reg_state == NETREG_UNREGISTERING) {
2187 		ASSERT_RTNL();
2188 
2189 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2190 						  txq);
2191 		if (rc)
2192 			return rc;
2193 
2194 		if (dev->num_tc)
2195 			netif_setup_tc(dev, txq);
2196 
2197 		if (txq < dev->real_num_tx_queues) {
2198 			qdisc_reset_all_tx_gt(dev, txq);
2199 #ifdef CONFIG_XPS
2200 			netif_reset_xps_queues_gt(dev, txq);
2201 #endif
2202 		}
2203 	}
2204 
2205 	dev->real_num_tx_queues = txq;
2206 	return 0;
2207 }
2208 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2209 
2210 #ifdef CONFIG_SYSFS
2211 /**
2212  *	netif_set_real_num_rx_queues - set actual number of RX queues used
2213  *	@dev: Network device
2214  *	@rxq: Actual number of RX queues
2215  *
2216  *	This must be called either with the rtnl_lock held or before
2217  *	registration of the net device.  Returns 0 on success, or a
2218  *	negative error code.  If called before registration, it always
2219  *	succeeds.
2220  */
2221 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2222 {
2223 	int rc;
2224 
2225 	if (rxq < 1 || rxq > dev->num_rx_queues)
2226 		return -EINVAL;
2227 
2228 	if (dev->reg_state == NETREG_REGISTERED) {
2229 		ASSERT_RTNL();
2230 
2231 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2232 						  rxq);
2233 		if (rc)
2234 			return rc;
2235 	}
2236 
2237 	dev->real_num_rx_queues = rxq;
2238 	return 0;
2239 }
2240 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2241 #endif
2242 
2243 /**
2244  * netif_get_num_default_rss_queues - default number of RSS queues
2245  *
2246  * This routine should set an upper limit on the number of RSS queues
2247  * used by default by multiqueue devices.
2248  */
2249 int netif_get_num_default_rss_queues(void)
2250 {
2251 	return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2252 }
2253 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2254 
2255 static inline void __netif_reschedule(struct Qdisc *q)
2256 {
2257 	struct softnet_data *sd;
2258 	unsigned long flags;
2259 
2260 	local_irq_save(flags);
2261 	sd = this_cpu_ptr(&softnet_data);
2262 	q->next_sched = NULL;
2263 	*sd->output_queue_tailp = q;
2264 	sd->output_queue_tailp = &q->next_sched;
2265 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
2266 	local_irq_restore(flags);
2267 }
2268 
2269 void __netif_schedule(struct Qdisc *q)
2270 {
2271 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2272 		__netif_reschedule(q);
2273 }
2274 EXPORT_SYMBOL(__netif_schedule);
2275 
2276 struct dev_kfree_skb_cb {
2277 	enum skb_free_reason reason;
2278 };
2279 
2280 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2281 {
2282 	return (struct dev_kfree_skb_cb *)skb->cb;
2283 }
2284 
2285 void netif_schedule_queue(struct netdev_queue *txq)
2286 {
2287 	rcu_read_lock();
2288 	if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2289 		struct Qdisc *q = rcu_dereference(txq->qdisc);
2290 
2291 		__netif_schedule(q);
2292 	}
2293 	rcu_read_unlock();
2294 }
2295 EXPORT_SYMBOL(netif_schedule_queue);
2296 
2297 /**
2298  *	netif_wake_subqueue - allow sending packets on subqueue
2299  *	@dev: network device
2300  *	@queue_index: sub queue index
2301  *
2302  * Resume individual transmit queue of a device with multiple transmit queues.
2303  */
2304 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2305 {
2306 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2307 
2308 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2309 		struct Qdisc *q;
2310 
2311 		rcu_read_lock();
2312 		q = rcu_dereference(txq->qdisc);
2313 		__netif_schedule(q);
2314 		rcu_read_unlock();
2315 	}
2316 }
2317 EXPORT_SYMBOL(netif_wake_subqueue);
2318 
2319 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2320 {
2321 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2322 		struct Qdisc *q;
2323 
2324 		rcu_read_lock();
2325 		q = rcu_dereference(dev_queue->qdisc);
2326 		__netif_schedule(q);
2327 		rcu_read_unlock();
2328 	}
2329 }
2330 EXPORT_SYMBOL(netif_tx_wake_queue);
2331 
2332 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2333 {
2334 	unsigned long flags;
2335 
2336 	if (likely(atomic_read(&skb->users) == 1)) {
2337 		smp_rmb();
2338 		atomic_set(&skb->users, 0);
2339 	} else if (likely(!atomic_dec_and_test(&skb->users))) {
2340 		return;
2341 	}
2342 	get_kfree_skb_cb(skb)->reason = reason;
2343 	local_irq_save(flags);
2344 	skb->next = __this_cpu_read(softnet_data.completion_queue);
2345 	__this_cpu_write(softnet_data.completion_queue, skb);
2346 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
2347 	local_irq_restore(flags);
2348 }
2349 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2350 
2351 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2352 {
2353 	if (in_irq() || irqs_disabled())
2354 		__dev_kfree_skb_irq(skb, reason);
2355 	else
2356 		dev_kfree_skb(skb);
2357 }
2358 EXPORT_SYMBOL(__dev_kfree_skb_any);
2359 
2360 
2361 /**
2362  * netif_device_detach - mark device as removed
2363  * @dev: network device
2364  *
2365  * Mark device as removed from system and therefore no longer available.
2366  */
2367 void netif_device_detach(struct net_device *dev)
2368 {
2369 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2370 	    netif_running(dev)) {
2371 		netif_tx_stop_all_queues(dev);
2372 	}
2373 }
2374 EXPORT_SYMBOL(netif_device_detach);
2375 
2376 /**
2377  * netif_device_attach - mark device as attached
2378  * @dev: network device
2379  *
2380  * Mark device as attached from system and restart if needed.
2381  */
2382 void netif_device_attach(struct net_device *dev)
2383 {
2384 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2385 	    netif_running(dev)) {
2386 		netif_tx_wake_all_queues(dev);
2387 		__netdev_watchdog_up(dev);
2388 	}
2389 }
2390 EXPORT_SYMBOL(netif_device_attach);
2391 
2392 /*
2393  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2394  * to be used as a distribution range.
2395  */
2396 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2397 		  unsigned int num_tx_queues)
2398 {
2399 	u32 hash;
2400 	u16 qoffset = 0;
2401 	u16 qcount = num_tx_queues;
2402 
2403 	if (skb_rx_queue_recorded(skb)) {
2404 		hash = skb_get_rx_queue(skb);
2405 		while (unlikely(hash >= num_tx_queues))
2406 			hash -= num_tx_queues;
2407 		return hash;
2408 	}
2409 
2410 	if (dev->num_tc) {
2411 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2412 		qoffset = dev->tc_to_txq[tc].offset;
2413 		qcount = dev->tc_to_txq[tc].count;
2414 	}
2415 
2416 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2417 }
2418 EXPORT_SYMBOL(__skb_tx_hash);
2419 
2420 static void skb_warn_bad_offload(const struct sk_buff *skb)
2421 {
2422 	static const netdev_features_t null_features = 0;
2423 	struct net_device *dev = skb->dev;
2424 	const char *name = "";
2425 
2426 	if (!net_ratelimit())
2427 		return;
2428 
2429 	if (dev) {
2430 		if (dev->dev.parent)
2431 			name = dev_driver_string(dev->dev.parent);
2432 		else
2433 			name = netdev_name(dev);
2434 	}
2435 	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2436 	     "gso_type=%d ip_summed=%d\n",
2437 	     name, dev ? &dev->features : &null_features,
2438 	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
2439 	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2440 	     skb_shinfo(skb)->gso_type, skb->ip_summed);
2441 }
2442 
2443 /*
2444  * Invalidate hardware checksum when packet is to be mangled, and
2445  * complete checksum manually on outgoing path.
2446  */
2447 int skb_checksum_help(struct sk_buff *skb)
2448 {
2449 	__wsum csum;
2450 	int ret = 0, offset;
2451 
2452 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2453 		goto out_set_summed;
2454 
2455 	if (unlikely(skb_shinfo(skb)->gso_size)) {
2456 		skb_warn_bad_offload(skb);
2457 		return -EINVAL;
2458 	}
2459 
2460 	/* Before computing a checksum, we should make sure no frag could
2461 	 * be modified by an external entity : checksum could be wrong.
2462 	 */
2463 	if (skb_has_shared_frag(skb)) {
2464 		ret = __skb_linearize(skb);
2465 		if (ret)
2466 			goto out;
2467 	}
2468 
2469 	offset = skb_checksum_start_offset(skb);
2470 	BUG_ON(offset >= skb_headlen(skb));
2471 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
2472 
2473 	offset += skb->csum_offset;
2474 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2475 
2476 	if (skb_cloned(skb) &&
2477 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2478 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479 		if (ret)
2480 			goto out;
2481 	}
2482 
2483 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
2484 out_set_summed:
2485 	skb->ip_summed = CHECKSUM_NONE;
2486 out:
2487 	return ret;
2488 }
2489 EXPORT_SYMBOL(skb_checksum_help);
2490 
2491 /* skb_csum_offload_check - Driver helper function to determine if a device
2492  * with limited checksum offload capabilities is able to offload the checksum
2493  * for a given packet.
2494  *
2495  * Arguments:
2496  *   skb - sk_buff for the packet in question
2497  *   spec - contains the description of what device can offload
2498  *   csum_encapped - returns true if the checksum being offloaded is
2499  *	      encpasulated. That is it is checksum for the transport header
2500  *	      in the inner headers.
2501  *   checksum_help - when set indicates that helper function should
2502  *	      call skb_checksum_help if offload checks fail
2503  *
2504  * Returns:
2505  *   true: Packet has passed the checksum checks and should be offloadable to
2506  *	   the device (a driver may still need to check for additional
2507  *	   restrictions of its device)
2508  *   false: Checksum is not offloadable. If checksum_help was set then
2509  *	   skb_checksum_help was called to resolve checksum for non-GSO
2510  *	   packets and when IP protocol is not SCTP
2511  */
2512 bool __skb_csum_offload_chk(struct sk_buff *skb,
2513 			    const struct skb_csum_offl_spec *spec,
2514 			    bool *csum_encapped,
2515 			    bool csum_help)
2516 {
2517 	struct iphdr *iph;
2518 	struct ipv6hdr *ipv6;
2519 	void *nhdr;
2520 	int protocol;
2521 	u8 ip_proto;
2522 
2523 	if (skb->protocol == htons(ETH_P_8021Q) ||
2524 	    skb->protocol == htons(ETH_P_8021AD)) {
2525 		if (!spec->vlan_okay)
2526 			goto need_help;
2527 	}
2528 
2529 	/* We check whether the checksum refers to a transport layer checksum in
2530 	 * the outermost header or an encapsulated transport layer checksum that
2531 	 * corresponds to the inner headers of the skb. If the checksum is for
2532 	 * something else in the packet we need help.
2533 	 */
2534 	if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) {
2535 		/* Non-encapsulated checksum */
2536 		protocol = eproto_to_ipproto(vlan_get_protocol(skb));
2537 		nhdr = skb_network_header(skb);
2538 		*csum_encapped = false;
2539 		if (spec->no_not_encapped)
2540 			goto need_help;
2541 	} else if (skb->encapsulation && spec->encap_okay &&
2542 		   skb_checksum_start_offset(skb) ==
2543 		   skb_inner_transport_offset(skb)) {
2544 		/* Encapsulated checksum */
2545 		*csum_encapped = true;
2546 		switch (skb->inner_protocol_type) {
2547 		case ENCAP_TYPE_ETHER:
2548 			protocol = eproto_to_ipproto(skb->inner_protocol);
2549 			break;
2550 		case ENCAP_TYPE_IPPROTO:
2551 			protocol = skb->inner_protocol;
2552 			break;
2553 		}
2554 		nhdr = skb_inner_network_header(skb);
2555 	} else {
2556 		goto need_help;
2557 	}
2558 
2559 	switch (protocol) {
2560 	case IPPROTO_IP:
2561 		if (!spec->ipv4_okay)
2562 			goto need_help;
2563 		iph = nhdr;
2564 		ip_proto = iph->protocol;
2565 		if (iph->ihl != 5 && !spec->ip_options_okay)
2566 			goto need_help;
2567 		break;
2568 	case IPPROTO_IPV6:
2569 		if (!spec->ipv6_okay)
2570 			goto need_help;
2571 		if (spec->no_encapped_ipv6 && *csum_encapped)
2572 			goto need_help;
2573 		ipv6 = nhdr;
2574 		nhdr += sizeof(*ipv6);
2575 		ip_proto = ipv6->nexthdr;
2576 		break;
2577 	default:
2578 		goto need_help;
2579 	}
2580 
2581 ip_proto_again:
2582 	switch (ip_proto) {
2583 	case IPPROTO_TCP:
2584 		if (!spec->tcp_okay ||
2585 		    skb->csum_offset != offsetof(struct tcphdr, check))
2586 			goto need_help;
2587 		break;
2588 	case IPPROTO_UDP:
2589 		if (!spec->udp_okay ||
2590 		    skb->csum_offset != offsetof(struct udphdr, check))
2591 			goto need_help;
2592 		break;
2593 	case IPPROTO_SCTP:
2594 		if (!spec->sctp_okay ||
2595 		    skb->csum_offset != offsetof(struct sctphdr, checksum))
2596 			goto cant_help;
2597 		break;
2598 	case NEXTHDR_HOP:
2599 	case NEXTHDR_ROUTING:
2600 	case NEXTHDR_DEST: {
2601 		u8 *opthdr = nhdr;
2602 
2603 		if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay)
2604 			goto need_help;
2605 
2606 		ip_proto = opthdr[0];
2607 		nhdr += (opthdr[1] + 1) << 3;
2608 
2609 		goto ip_proto_again;
2610 	}
2611 	default:
2612 		goto need_help;
2613 	}
2614 
2615 	/* Passed the tests for offloading checksum */
2616 	return true;
2617 
2618 need_help:
2619 	if (csum_help && !skb_shinfo(skb)->gso_size)
2620 		skb_checksum_help(skb);
2621 cant_help:
2622 	return false;
2623 }
2624 EXPORT_SYMBOL(__skb_csum_offload_chk);
2625 
2626 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2627 {
2628 	__be16 type = skb->protocol;
2629 
2630 	/* Tunnel gso handlers can set protocol to ethernet. */
2631 	if (type == htons(ETH_P_TEB)) {
2632 		struct ethhdr *eth;
2633 
2634 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2635 			return 0;
2636 
2637 		eth = (struct ethhdr *)skb_mac_header(skb);
2638 		type = eth->h_proto;
2639 	}
2640 
2641 	return __vlan_get_protocol(skb, type, depth);
2642 }
2643 
2644 /**
2645  *	skb_mac_gso_segment - mac layer segmentation handler.
2646  *	@skb: buffer to segment
2647  *	@features: features for the output path (see dev->features)
2648  */
2649 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2650 				    netdev_features_t features)
2651 {
2652 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2653 	struct packet_offload *ptype;
2654 	int vlan_depth = skb->mac_len;
2655 	__be16 type = skb_network_protocol(skb, &vlan_depth);
2656 
2657 	if (unlikely(!type))
2658 		return ERR_PTR(-EINVAL);
2659 
2660 	__skb_pull(skb, vlan_depth);
2661 
2662 	rcu_read_lock();
2663 	list_for_each_entry_rcu(ptype, &offload_base, list) {
2664 		if (ptype->type == type && ptype->callbacks.gso_segment) {
2665 			segs = ptype->callbacks.gso_segment(skb, features);
2666 			break;
2667 		}
2668 	}
2669 	rcu_read_unlock();
2670 
2671 	__skb_push(skb, skb->data - skb_mac_header(skb));
2672 
2673 	return segs;
2674 }
2675 EXPORT_SYMBOL(skb_mac_gso_segment);
2676 
2677 
2678 /* openvswitch calls this on rx path, so we need a different check.
2679  */
2680 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2681 {
2682 	if (tx_path)
2683 		return skb->ip_summed != CHECKSUM_PARTIAL;
2684 	else
2685 		return skb->ip_summed == CHECKSUM_NONE;
2686 }
2687 
2688 /**
2689  *	__skb_gso_segment - Perform segmentation on skb.
2690  *	@skb: buffer to segment
2691  *	@features: features for the output path (see dev->features)
2692  *	@tx_path: whether it is called in TX path
2693  *
2694  *	This function segments the given skb and returns a list of segments.
2695  *
2696  *	It may return NULL if the skb requires no segmentation.  This is
2697  *	only possible when GSO is used for verifying header integrity.
2698  *
2699  *	Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2700  */
2701 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2702 				  netdev_features_t features, bool tx_path)
2703 {
2704 	if (unlikely(skb_needs_check(skb, tx_path))) {
2705 		int err;
2706 
2707 		skb_warn_bad_offload(skb);
2708 
2709 		err = skb_cow_head(skb, 0);
2710 		if (err < 0)
2711 			return ERR_PTR(err);
2712 	}
2713 
2714 	BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2715 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2716 
2717 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2718 	SKB_GSO_CB(skb)->encap_level = 0;
2719 
2720 	skb_reset_mac_header(skb);
2721 	skb_reset_mac_len(skb);
2722 
2723 	return skb_mac_gso_segment(skb, features);
2724 }
2725 EXPORT_SYMBOL(__skb_gso_segment);
2726 
2727 /* Take action when hardware reception checksum errors are detected. */
2728 #ifdef CONFIG_BUG
2729 void netdev_rx_csum_fault(struct net_device *dev)
2730 {
2731 	if (net_ratelimit()) {
2732 		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2733 		dump_stack();
2734 	}
2735 }
2736 EXPORT_SYMBOL(netdev_rx_csum_fault);
2737 #endif
2738 
2739 /* Actually, we should eliminate this check as soon as we know, that:
2740  * 1. IOMMU is present and allows to map all the memory.
2741  * 2. No high memory really exists on this machine.
2742  */
2743 
2744 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2745 {
2746 #ifdef CONFIG_HIGHMEM
2747 	int i;
2748 	if (!(dev->features & NETIF_F_HIGHDMA)) {
2749 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2750 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2751 			if (PageHighMem(skb_frag_page(frag)))
2752 				return 1;
2753 		}
2754 	}
2755 
2756 	if (PCI_DMA_BUS_IS_PHYS) {
2757 		struct device *pdev = dev->dev.parent;
2758 
2759 		if (!pdev)
2760 			return 0;
2761 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2762 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2763 			dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2764 			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2765 				return 1;
2766 		}
2767 	}
2768 #endif
2769 	return 0;
2770 }
2771 
2772 /* If MPLS offload request, verify we are testing hardware MPLS features
2773  * instead of standard features for the netdev.
2774  */
2775 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2776 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2777 					   netdev_features_t features,
2778 					   __be16 type)
2779 {
2780 	if (eth_p_mpls(type))
2781 		features &= skb->dev->mpls_features;
2782 
2783 	return features;
2784 }
2785 #else
2786 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2787 					   netdev_features_t features,
2788 					   __be16 type)
2789 {
2790 	return features;
2791 }
2792 #endif
2793 
2794 static netdev_features_t harmonize_features(struct sk_buff *skb,
2795 	netdev_features_t features)
2796 {
2797 	int tmp;
2798 	__be16 type;
2799 
2800 	type = skb_network_protocol(skb, &tmp);
2801 	features = net_mpls_features(skb, features, type);
2802 
2803 	if (skb->ip_summed != CHECKSUM_NONE &&
2804 	    !can_checksum_protocol(features, type)) {
2805 		features &= ~NETIF_F_CSUM_MASK;
2806 	} else if (illegal_highdma(skb->dev, skb)) {
2807 		features &= ~NETIF_F_SG;
2808 	}
2809 
2810 	return features;
2811 }
2812 
2813 netdev_features_t passthru_features_check(struct sk_buff *skb,
2814 					  struct net_device *dev,
2815 					  netdev_features_t features)
2816 {
2817 	return features;
2818 }
2819 EXPORT_SYMBOL(passthru_features_check);
2820 
2821 static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2822 					     struct net_device *dev,
2823 					     netdev_features_t features)
2824 {
2825 	return vlan_features_check(skb, features);
2826 }
2827 
2828 netdev_features_t netif_skb_features(struct sk_buff *skb)
2829 {
2830 	struct net_device *dev = skb->dev;
2831 	netdev_features_t features = dev->features;
2832 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
2833 
2834 	if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2835 		features &= ~NETIF_F_GSO_MASK;
2836 
2837 	/* If encapsulation offload request, verify we are testing
2838 	 * hardware encapsulation features instead of standard
2839 	 * features for the netdev
2840 	 */
2841 	if (skb->encapsulation)
2842 		features &= dev->hw_enc_features;
2843 
2844 	if (skb_vlan_tagged(skb))
2845 		features = netdev_intersect_features(features,
2846 						     dev->vlan_features |
2847 						     NETIF_F_HW_VLAN_CTAG_TX |
2848 						     NETIF_F_HW_VLAN_STAG_TX);
2849 
2850 	if (dev->netdev_ops->ndo_features_check)
2851 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
2852 								features);
2853 	else
2854 		features &= dflt_features_check(skb, dev, features);
2855 
2856 	return harmonize_features(skb, features);
2857 }
2858 EXPORT_SYMBOL(netif_skb_features);
2859 
2860 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2861 		    struct netdev_queue *txq, bool more)
2862 {
2863 	unsigned int len;
2864 	int rc;
2865 
2866 	if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2867 		dev_queue_xmit_nit(skb, dev);
2868 
2869 	len = skb->len;
2870 	trace_net_dev_start_xmit(skb, dev);
2871 	rc = netdev_start_xmit(skb, dev, txq, more);
2872 	trace_net_dev_xmit(skb, rc, dev, len);
2873 
2874 	return rc;
2875 }
2876 
2877 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2878 				    struct netdev_queue *txq, int *ret)
2879 {
2880 	struct sk_buff *skb = first;
2881 	int rc = NETDEV_TX_OK;
2882 
2883 	while (skb) {
2884 		struct sk_buff *next = skb->next;
2885 
2886 		skb->next = NULL;
2887 		rc = xmit_one(skb, dev, txq, next != NULL);
2888 		if (unlikely(!dev_xmit_complete(rc))) {
2889 			skb->next = next;
2890 			goto out;
2891 		}
2892 
2893 		skb = next;
2894 		if (netif_xmit_stopped(txq) && skb) {
2895 			rc = NETDEV_TX_BUSY;
2896 			break;
2897 		}
2898 	}
2899 
2900 out:
2901 	*ret = rc;
2902 	return skb;
2903 }
2904 
2905 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2906 					  netdev_features_t features)
2907 {
2908 	if (skb_vlan_tag_present(skb) &&
2909 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
2910 		skb = __vlan_hwaccel_push_inside(skb);
2911 	return skb;
2912 }
2913 
2914 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2915 {
2916 	netdev_features_t features;
2917 
2918 	if (skb->next)
2919 		return skb;
2920 
2921 	features = netif_skb_features(skb);
2922 	skb = validate_xmit_vlan(skb, features);
2923 	if (unlikely(!skb))
2924 		goto out_null;
2925 
2926 	if (netif_needs_gso(skb, features)) {
2927 		struct sk_buff *segs;
2928 
2929 		segs = skb_gso_segment(skb, features);
2930 		if (IS_ERR(segs)) {
2931 			goto out_kfree_skb;
2932 		} else if (segs) {
2933 			consume_skb(skb);
2934 			skb = segs;
2935 		}
2936 	} else {
2937 		if (skb_needs_linearize(skb, features) &&
2938 		    __skb_linearize(skb))
2939 			goto out_kfree_skb;
2940 
2941 		/* If packet is not checksummed and device does not
2942 		 * support checksumming for this protocol, complete
2943 		 * checksumming here.
2944 		 */
2945 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2946 			if (skb->encapsulation)
2947 				skb_set_inner_transport_header(skb,
2948 							       skb_checksum_start_offset(skb));
2949 			else
2950 				skb_set_transport_header(skb,
2951 							 skb_checksum_start_offset(skb));
2952 			if (!(features & NETIF_F_CSUM_MASK) &&
2953 			    skb_checksum_help(skb))
2954 				goto out_kfree_skb;
2955 		}
2956 	}
2957 
2958 	return skb;
2959 
2960 out_kfree_skb:
2961 	kfree_skb(skb);
2962 out_null:
2963 	return NULL;
2964 }
2965 
2966 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2967 {
2968 	struct sk_buff *next, *head = NULL, *tail;
2969 
2970 	for (; skb != NULL; skb = next) {
2971 		next = skb->next;
2972 		skb->next = NULL;
2973 
2974 		/* in case skb wont be segmented, point to itself */
2975 		skb->prev = skb;
2976 
2977 		skb = validate_xmit_skb(skb, dev);
2978 		if (!skb)
2979 			continue;
2980 
2981 		if (!head)
2982 			head = skb;
2983 		else
2984 			tail->next = skb;
2985 		/* If skb was segmented, skb->prev points to
2986 		 * the last segment. If not, it still contains skb.
2987 		 */
2988 		tail = skb->prev;
2989 	}
2990 	return head;
2991 }
2992 
2993 static void qdisc_pkt_len_init(struct sk_buff *skb)
2994 {
2995 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2996 
2997 	qdisc_skb_cb(skb)->pkt_len = skb->len;
2998 
2999 	/* To get more precise estimation of bytes sent on wire,
3000 	 * we add to pkt_len the headers size of all segments
3001 	 */
3002 	if (shinfo->gso_size)  {
3003 		unsigned int hdr_len;
3004 		u16 gso_segs = shinfo->gso_segs;
3005 
3006 		/* mac layer + network layer */
3007 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3008 
3009 		/* + transport layer */
3010 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3011 			hdr_len += tcp_hdrlen(skb);
3012 		else
3013 			hdr_len += sizeof(struct udphdr);
3014 
3015 		if (shinfo->gso_type & SKB_GSO_DODGY)
3016 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3017 						shinfo->gso_size);
3018 
3019 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3020 	}
3021 }
3022 
3023 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3024 				 struct net_device *dev,
3025 				 struct netdev_queue *txq)
3026 {
3027 	spinlock_t *root_lock = qdisc_lock(q);
3028 	bool contended;
3029 	int rc;
3030 
3031 	qdisc_calculate_pkt_len(skb, q);
3032 	/*
3033 	 * Heuristic to force contended enqueues to serialize on a
3034 	 * separate lock before trying to get qdisc main lock.
3035 	 * This permits __QDISC___STATE_RUNNING owner to get the lock more
3036 	 * often and dequeue packets faster.
3037 	 */
3038 	contended = qdisc_is_running(q);
3039 	if (unlikely(contended))
3040 		spin_lock(&q->busylock);
3041 
3042 	spin_lock(root_lock);
3043 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3044 		kfree_skb(skb);
3045 		rc = NET_XMIT_DROP;
3046 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3047 		   qdisc_run_begin(q)) {
3048 		/*
3049 		 * This is a work-conserving queue; there are no old skbs
3050 		 * waiting to be sent out; and the qdisc is not running -
3051 		 * xmit the skb directly.
3052 		 */
3053 
3054 		qdisc_bstats_update(q, skb);
3055 
3056 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3057 			if (unlikely(contended)) {
3058 				spin_unlock(&q->busylock);
3059 				contended = false;
3060 			}
3061 			__qdisc_run(q);
3062 		} else
3063 			qdisc_run_end(q);
3064 
3065 		rc = NET_XMIT_SUCCESS;
3066 	} else {
3067 		rc = q->enqueue(skb, q) & NET_XMIT_MASK;
3068 		if (qdisc_run_begin(q)) {
3069 			if (unlikely(contended)) {
3070 				spin_unlock(&q->busylock);
3071 				contended = false;
3072 			}
3073 			__qdisc_run(q);
3074 		}
3075 	}
3076 	spin_unlock(root_lock);
3077 	if (unlikely(contended))
3078 		spin_unlock(&q->busylock);
3079 	return rc;
3080 }
3081 
3082 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3083 static void skb_update_prio(struct sk_buff *skb)
3084 {
3085 	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
3086 
3087 	if (!skb->priority && skb->sk && map) {
3088 		unsigned int prioidx =
3089 			sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
3090 
3091 		if (prioidx < map->priomap_len)
3092 			skb->priority = map->priomap[prioidx];
3093 	}
3094 }
3095 #else
3096 #define skb_update_prio(skb)
3097 #endif
3098 
3099 DEFINE_PER_CPU(int, xmit_recursion);
3100 EXPORT_SYMBOL(xmit_recursion);
3101 
3102 #define RECURSION_LIMIT 10
3103 
3104 /**
3105  *	dev_loopback_xmit - loop back @skb
3106  *	@net: network namespace this loopback is happening in
3107  *	@sk:  sk needed to be a netfilter okfn
3108  *	@skb: buffer to transmit
3109  */
3110 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3111 {
3112 	skb_reset_mac_header(skb);
3113 	__skb_pull(skb, skb_network_offset(skb));
3114 	skb->pkt_type = PACKET_LOOPBACK;
3115 	skb->ip_summed = CHECKSUM_UNNECESSARY;
3116 	WARN_ON(!skb_dst(skb));
3117 	skb_dst_force(skb);
3118 	netif_rx_ni(skb);
3119 	return 0;
3120 }
3121 EXPORT_SYMBOL(dev_loopback_xmit);
3122 
3123 #ifdef CONFIG_NET_EGRESS
3124 static struct sk_buff *
3125 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3126 {
3127 	struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3128 	struct tcf_result cl_res;
3129 
3130 	if (!cl)
3131 		return skb;
3132 
3133 	/* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
3134 	 * earlier by the caller.
3135 	 */
3136 	qdisc_bstats_cpu_update(cl->q, skb);
3137 
3138 	switch (tc_classify(skb, cl, &cl_res, false)) {
3139 	case TC_ACT_OK:
3140 	case TC_ACT_RECLASSIFY:
3141 		skb->tc_index = TC_H_MIN(cl_res.classid);
3142 		break;
3143 	case TC_ACT_SHOT:
3144 		qdisc_qstats_cpu_drop(cl->q);
3145 		*ret = NET_XMIT_DROP;
3146 		goto drop;
3147 	case TC_ACT_STOLEN:
3148 	case TC_ACT_QUEUED:
3149 		*ret = NET_XMIT_SUCCESS;
3150 drop:
3151 		kfree_skb(skb);
3152 		return NULL;
3153 	case TC_ACT_REDIRECT:
3154 		/* No need to push/pop skb's mac_header here on egress! */
3155 		skb_do_redirect(skb);
3156 		*ret = NET_XMIT_SUCCESS;
3157 		return NULL;
3158 	default:
3159 		break;
3160 	}
3161 
3162 	return skb;
3163 }
3164 #endif /* CONFIG_NET_EGRESS */
3165 
3166 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3167 {
3168 #ifdef CONFIG_XPS
3169 	struct xps_dev_maps *dev_maps;
3170 	struct xps_map *map;
3171 	int queue_index = -1;
3172 
3173 	rcu_read_lock();
3174 	dev_maps = rcu_dereference(dev->xps_maps);
3175 	if (dev_maps) {
3176 		map = rcu_dereference(
3177 		    dev_maps->cpu_map[skb->sender_cpu - 1]);
3178 		if (map) {
3179 			if (map->len == 1)
3180 				queue_index = map->queues[0];
3181 			else
3182 				queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3183 									   map->len)];
3184 			if (unlikely(queue_index >= dev->real_num_tx_queues))
3185 				queue_index = -1;
3186 		}
3187 	}
3188 	rcu_read_unlock();
3189 
3190 	return queue_index;
3191 #else
3192 	return -1;
3193 #endif
3194 }
3195 
3196 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3197 {
3198 	struct sock *sk = skb->sk;
3199 	int queue_index = sk_tx_queue_get(sk);
3200 
3201 	if (queue_index < 0 || skb->ooo_okay ||
3202 	    queue_index >= dev->real_num_tx_queues) {
3203 		int new_index = get_xps_queue(dev, skb);
3204 		if (new_index < 0)
3205 			new_index = skb_tx_hash(dev, skb);
3206 
3207 		if (queue_index != new_index && sk &&
3208 		    sk_fullsock(sk) &&
3209 		    rcu_access_pointer(sk->sk_dst_cache))
3210 			sk_tx_queue_set(sk, new_index);
3211 
3212 		queue_index = new_index;
3213 	}
3214 
3215 	return queue_index;
3216 }
3217 
3218 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3219 				    struct sk_buff *skb,
3220 				    void *accel_priv)
3221 {
3222 	int queue_index = 0;
3223 
3224 #ifdef CONFIG_XPS
3225 	u32 sender_cpu = skb->sender_cpu - 1;
3226 
3227 	if (sender_cpu >= (u32)NR_CPUS)
3228 		skb->sender_cpu = raw_smp_processor_id() + 1;
3229 #endif
3230 
3231 	if (dev->real_num_tx_queues != 1) {
3232 		const struct net_device_ops *ops = dev->netdev_ops;
3233 		if (ops->ndo_select_queue)
3234 			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3235 							    __netdev_pick_tx);
3236 		else
3237 			queue_index = __netdev_pick_tx(dev, skb);
3238 
3239 		if (!accel_priv)
3240 			queue_index = netdev_cap_txqueue(dev, queue_index);
3241 	}
3242 
3243 	skb_set_queue_mapping(skb, queue_index);
3244 	return netdev_get_tx_queue(dev, queue_index);
3245 }
3246 
3247 /**
3248  *	__dev_queue_xmit - transmit a buffer
3249  *	@skb: buffer to transmit
3250  *	@accel_priv: private data used for L2 forwarding offload
3251  *
3252  *	Queue a buffer for transmission to a network device. The caller must
3253  *	have set the device and priority and built the buffer before calling
3254  *	this function. The function can be called from an interrupt.
3255  *
3256  *	A negative errno code is returned on a failure. A success does not
3257  *	guarantee the frame will be transmitted as it may be dropped due
3258  *	to congestion or traffic shaping.
3259  *
3260  * -----------------------------------------------------------------------------------
3261  *      I notice this method can also return errors from the queue disciplines,
3262  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
3263  *      be positive.
3264  *
3265  *      Regardless of the return value, the skb is consumed, so it is currently
3266  *      difficult to retry a send to this method.  (You can bump the ref count
3267  *      before sending to hold a reference for retry if you are careful.)
3268  *
3269  *      When calling this method, interrupts MUST be enabled.  This is because
3270  *      the BH enable code must have IRQs enabled so that it will not deadlock.
3271  *          --BLG
3272  */
3273 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
3274 {
3275 	struct net_device *dev = skb->dev;
3276 	struct netdev_queue *txq;
3277 	struct Qdisc *q;
3278 	int rc = -ENOMEM;
3279 
3280 	skb_reset_mac_header(skb);
3281 
3282 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3283 		__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3284 
3285 	/* Disable soft irqs for various locks below. Also
3286 	 * stops preemption for RCU.
3287 	 */
3288 	rcu_read_lock_bh();
3289 
3290 	skb_update_prio(skb);
3291 
3292 	qdisc_pkt_len_init(skb);
3293 #ifdef CONFIG_NET_CLS_ACT
3294 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
3295 # ifdef CONFIG_NET_EGRESS
3296 	if (static_key_false(&egress_needed)) {
3297 		skb = sch_handle_egress(skb, &rc, dev);
3298 		if (!skb)
3299 			goto out;
3300 	}
3301 # endif
3302 #endif
3303 	/* If device/qdisc don't need skb->dst, release it right now while
3304 	 * its hot in this cpu cache.
3305 	 */
3306 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3307 		skb_dst_drop(skb);
3308 	else
3309 		skb_dst_force(skb);
3310 
3311 #ifdef CONFIG_NET_SWITCHDEV
3312 	/* Don't forward if offload device already forwarded */
3313 	if (skb->offload_fwd_mark &&
3314 	    skb->offload_fwd_mark == dev->offload_fwd_mark) {
3315 		consume_skb(skb);
3316 		rc = NET_XMIT_SUCCESS;
3317 		goto out;
3318 	}
3319 #endif
3320 
3321 	txq = netdev_pick_tx(dev, skb, accel_priv);
3322 	q = rcu_dereference_bh(txq->qdisc);
3323 
3324 	trace_net_dev_queue(skb);
3325 	if (q->enqueue) {
3326 		rc = __dev_xmit_skb(skb, q, dev, txq);
3327 		goto out;
3328 	}
3329 
3330 	/* The device has no queue. Common case for software devices:
3331 	   loopback, all the sorts of tunnels...
3332 
3333 	   Really, it is unlikely that netif_tx_lock protection is necessary
3334 	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics
3335 	   counters.)
3336 	   However, it is possible, that they rely on protection
3337 	   made by us here.
3338 
3339 	   Check this and shot the lock. It is not prone from deadlocks.
3340 	   Either shot noqueue qdisc, it is even simpler 8)
3341 	 */
3342 	if (dev->flags & IFF_UP) {
3343 		int cpu = smp_processor_id(); /* ok because BHs are off */
3344 
3345 		if (txq->xmit_lock_owner != cpu) {
3346 
3347 			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3348 				goto recursion_alert;
3349 
3350 			skb = validate_xmit_skb(skb, dev);
3351 			if (!skb)
3352 				goto drop;
3353 
3354 			HARD_TX_LOCK(dev, txq, cpu);
3355 
3356 			if (!netif_xmit_stopped(txq)) {
3357 				__this_cpu_inc(xmit_recursion);
3358 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3359 				__this_cpu_dec(xmit_recursion);
3360 				if (dev_xmit_complete(rc)) {
3361 					HARD_TX_UNLOCK(dev, txq);
3362 					goto out;
3363 				}
3364 			}
3365 			HARD_TX_UNLOCK(dev, txq);
3366 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3367 					     dev->name);
3368 		} else {
3369 			/* Recursion is detected! It is possible,
3370 			 * unfortunately
3371 			 */
3372 recursion_alert:
3373 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3374 					     dev->name);
3375 		}
3376 	}
3377 
3378 	rc = -ENETDOWN;
3379 drop:
3380 	rcu_read_unlock_bh();
3381 
3382 	atomic_long_inc(&dev->tx_dropped);
3383 	kfree_skb_list(skb);
3384 	return rc;
3385 out:
3386 	rcu_read_unlock_bh();
3387 	return rc;
3388 }
3389 
3390 int dev_queue_xmit(struct sk_buff *skb)
3391 {
3392 	return __dev_queue_xmit(skb, NULL);
3393 }
3394 EXPORT_SYMBOL(dev_queue_xmit);
3395 
3396 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3397 {
3398 	return __dev_queue_xmit(skb, accel_priv);
3399 }
3400 EXPORT_SYMBOL(dev_queue_xmit_accel);
3401 
3402 
3403 /*=======================================================================
3404 			Receiver routines
3405   =======================================================================*/
3406 
3407 int netdev_max_backlog __read_mostly = 1000;
3408 EXPORT_SYMBOL(netdev_max_backlog);
3409 
3410 int netdev_tstamp_prequeue __read_mostly = 1;
3411 int netdev_budget __read_mostly = 300;
3412 int weight_p __read_mostly = 64;            /* old backlog weight */
3413 
3414 /* Called with irq disabled */
3415 static inline void ____napi_schedule(struct softnet_data *sd,
3416 				     struct napi_struct *napi)
3417 {
3418 	list_add_tail(&napi->poll_list, &sd->poll_list);
3419 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
3420 }
3421 
3422 #ifdef CONFIG_RPS
3423 
3424 /* One global table that all flow-based protocols share. */
3425 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3426 EXPORT_SYMBOL(rps_sock_flow_table);
3427 u32 rps_cpu_mask __read_mostly;
3428 EXPORT_SYMBOL(rps_cpu_mask);
3429 
3430 struct static_key rps_needed __read_mostly;
3431 
3432 static struct rps_dev_flow *
3433 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3434 	    struct rps_dev_flow *rflow, u16 next_cpu)
3435 {
3436 	if (next_cpu < nr_cpu_ids) {
3437 #ifdef CONFIG_RFS_ACCEL
3438 		struct netdev_rx_queue *rxqueue;
3439 		struct rps_dev_flow_table *flow_table;
3440 		struct rps_dev_flow *old_rflow;
3441 		u32 flow_id;
3442 		u16 rxq_index;
3443 		int rc;
3444 
3445 		/* Should we steer this flow to a different hardware queue? */
3446 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3447 		    !(dev->features & NETIF_F_NTUPLE))
3448 			goto out;
3449 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3450 		if (rxq_index == skb_get_rx_queue(skb))
3451 			goto out;
3452 
3453 		rxqueue = dev->_rx + rxq_index;
3454 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
3455 		if (!flow_table)
3456 			goto out;
3457 		flow_id = skb_get_hash(skb) & flow_table->mask;
3458 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3459 							rxq_index, flow_id);
3460 		if (rc < 0)
3461 			goto out;
3462 		old_rflow = rflow;
3463 		rflow = &flow_table->flows[flow_id];
3464 		rflow->filter = rc;
3465 		if (old_rflow->filter == rflow->filter)
3466 			old_rflow->filter = RPS_NO_FILTER;
3467 	out:
3468 #endif
3469 		rflow->last_qtail =
3470 			per_cpu(softnet_data, next_cpu).input_queue_head;
3471 	}
3472 
3473 	rflow->cpu = next_cpu;
3474 	return rflow;
3475 }
3476 
3477 /*
3478  * get_rps_cpu is called from netif_receive_skb and returns the target
3479  * CPU from the RPS map of the receiving queue for a given skb.
3480  * rcu_read_lock must be held on entry.
3481  */
3482 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3483 		       struct rps_dev_flow **rflowp)
3484 {
3485 	const struct rps_sock_flow_table *sock_flow_table;
3486 	struct netdev_rx_queue *rxqueue = dev->_rx;
3487 	struct rps_dev_flow_table *flow_table;
3488 	struct rps_map *map;
3489 	int cpu = -1;
3490 	u32 tcpu;
3491 	u32 hash;
3492 
3493 	if (skb_rx_queue_recorded(skb)) {
3494 		u16 index = skb_get_rx_queue(skb);
3495 
3496 		if (unlikely(index >= dev->real_num_rx_queues)) {
3497 			WARN_ONCE(dev->real_num_rx_queues > 1,
3498 				  "%s received packet on queue %u, but number "
3499 				  "of RX queues is %u\n",
3500 				  dev->name, index, dev->real_num_rx_queues);
3501 			goto done;
3502 		}
3503 		rxqueue += index;
3504 	}
3505 
3506 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3507 
3508 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
3509 	map = rcu_dereference(rxqueue->rps_map);
3510 	if (!flow_table && !map)
3511 		goto done;
3512 
3513 	skb_reset_network_header(skb);
3514 	hash = skb_get_hash(skb);
3515 	if (!hash)
3516 		goto done;
3517 
3518 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
3519 	if (flow_table && sock_flow_table) {
3520 		struct rps_dev_flow *rflow;
3521 		u32 next_cpu;
3522 		u32 ident;
3523 
3524 		/* First check into global flow table if there is a match */
3525 		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3526 		if ((ident ^ hash) & ~rps_cpu_mask)
3527 			goto try_rps;
3528 
3529 		next_cpu = ident & rps_cpu_mask;
3530 
3531 		/* OK, now we know there is a match,
3532 		 * we can look at the local (per receive queue) flow table
3533 		 */
3534 		rflow = &flow_table->flows[hash & flow_table->mask];
3535 		tcpu = rflow->cpu;
3536 
3537 		/*
3538 		 * If the desired CPU (where last recvmsg was done) is
3539 		 * different from current CPU (one in the rx-queue flow
3540 		 * table entry), switch if one of the following holds:
3541 		 *   - Current CPU is unset (>= nr_cpu_ids).
3542 		 *   - Current CPU is offline.
3543 		 *   - The current CPU's queue tail has advanced beyond the
3544 		 *     last packet that was enqueued using this table entry.
3545 		 *     This guarantees that all previous packets for the flow
3546 		 *     have been dequeued, thus preserving in order delivery.
3547 		 */
3548 		if (unlikely(tcpu != next_cpu) &&
3549 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3550 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3551 		      rflow->last_qtail)) >= 0)) {
3552 			tcpu = next_cpu;
3553 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3554 		}
3555 
3556 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3557 			*rflowp = rflow;
3558 			cpu = tcpu;
3559 			goto done;
3560 		}
3561 	}
3562 
3563 try_rps:
3564 
3565 	if (map) {
3566 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3567 		if (cpu_online(tcpu)) {
3568 			cpu = tcpu;
3569 			goto done;
3570 		}
3571 	}
3572 
3573 done:
3574 	return cpu;
3575 }
3576 
3577 #ifdef CONFIG_RFS_ACCEL
3578 
3579 /**
3580  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3581  * @dev: Device on which the filter was set
3582  * @rxq_index: RX queue index
3583  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3584  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3585  *
3586  * Drivers that implement ndo_rx_flow_steer() should periodically call
3587  * this function for each installed filter and remove the filters for
3588  * which it returns %true.
3589  */
3590 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3591 			 u32 flow_id, u16 filter_id)
3592 {
3593 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3594 	struct rps_dev_flow_table *flow_table;
3595 	struct rps_dev_flow *rflow;
3596 	bool expire = true;
3597 	unsigned int cpu;
3598 
3599 	rcu_read_lock();
3600 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
3601 	if (flow_table && flow_id <= flow_table->mask) {
3602 		rflow = &flow_table->flows[flow_id];
3603 		cpu = ACCESS_ONCE(rflow->cpu);
3604 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3605 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3606 			   rflow->last_qtail) <
3607 		     (int)(10 * flow_table->mask)))
3608 			expire = false;
3609 	}
3610 	rcu_read_unlock();
3611 	return expire;
3612 }
3613 EXPORT_SYMBOL(rps_may_expire_flow);
3614 
3615 #endif /* CONFIG_RFS_ACCEL */
3616 
3617 /* Called from hardirq (IPI) context */
3618 static void rps_trigger_softirq(void *data)
3619 {
3620 	struct softnet_data *sd = data;
3621 
3622 	____napi_schedule(sd, &sd->backlog);
3623 	sd->received_rps++;
3624 }
3625 
3626 #endif /* CONFIG_RPS */
3627 
3628 /*
3629  * Check if this softnet_data structure is another cpu one
3630  * If yes, queue it to our IPI list and return 1
3631  * If no, return 0
3632  */
3633 static int rps_ipi_queued(struct softnet_data *sd)
3634 {
3635 #ifdef CONFIG_RPS
3636 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3637 
3638 	if (sd != mysd) {
3639 		sd->rps_ipi_next = mysd->rps_ipi_list;
3640 		mysd->rps_ipi_list = sd;
3641 
3642 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
3643 		return 1;
3644 	}
3645 #endif /* CONFIG_RPS */
3646 	return 0;
3647 }
3648 
3649 #ifdef CONFIG_NET_FLOW_LIMIT
3650 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3651 #endif
3652 
3653 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3654 {
3655 #ifdef CONFIG_NET_FLOW_LIMIT
3656 	struct sd_flow_limit *fl;
3657 	struct softnet_data *sd;
3658 	unsigned int old_flow, new_flow;
3659 
3660 	if (qlen < (netdev_max_backlog >> 1))
3661 		return false;
3662 
3663 	sd = this_cpu_ptr(&softnet_data);
3664 
3665 	rcu_read_lock();
3666 	fl = rcu_dereference(sd->flow_limit);
3667 	if (fl) {
3668 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3669 		old_flow = fl->history[fl->history_head];
3670 		fl->history[fl->history_head] = new_flow;
3671 
3672 		fl->history_head++;
3673 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3674 
3675 		if (likely(fl->buckets[old_flow]))
3676 			fl->buckets[old_flow]--;
3677 
3678 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3679 			fl->count++;
3680 			rcu_read_unlock();
3681 			return true;
3682 		}
3683 	}
3684 	rcu_read_unlock();
3685 #endif
3686 	return false;
3687 }
3688 
3689 /*
3690  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3691  * queue (may be a remote CPU queue).
3692  */
3693 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3694 			      unsigned int *qtail)
3695 {
3696 	struct softnet_data *sd;
3697 	unsigned long flags;
3698 	unsigned int qlen;
3699 
3700 	sd = &per_cpu(softnet_data, cpu);
3701 
3702 	local_irq_save(flags);
3703 
3704 	rps_lock(sd);
3705 	if (!netif_running(skb->dev))
3706 		goto drop;
3707 	qlen = skb_queue_len(&sd->input_pkt_queue);
3708 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3709 		if (qlen) {
3710 enqueue:
3711 			__skb_queue_tail(&sd->input_pkt_queue, skb);
3712 			input_queue_tail_incr_save(sd, qtail);
3713 			rps_unlock(sd);
3714 			local_irq_restore(flags);
3715 			return NET_RX_SUCCESS;
3716 		}
3717 
3718 		/* Schedule NAPI for backlog device
3719 		 * We can use non atomic operation since we own the queue lock
3720 		 */
3721 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3722 			if (!rps_ipi_queued(sd))
3723 				____napi_schedule(sd, &sd->backlog);
3724 		}
3725 		goto enqueue;
3726 	}
3727 
3728 drop:
3729 	sd->dropped++;
3730 	rps_unlock(sd);
3731 
3732 	local_irq_restore(flags);
3733 
3734 	atomic_long_inc(&skb->dev->rx_dropped);
3735 	kfree_skb(skb);
3736 	return NET_RX_DROP;
3737 }
3738 
3739 static int netif_rx_internal(struct sk_buff *skb)
3740 {
3741 	int ret;
3742 
3743 	net_timestamp_check(netdev_tstamp_prequeue, skb);
3744 
3745 	trace_netif_rx(skb);
3746 #ifdef CONFIG_RPS
3747 	if (static_key_false(&rps_needed)) {
3748 		struct rps_dev_flow voidflow, *rflow = &voidflow;
3749 		int cpu;
3750 
3751 		preempt_disable();
3752 		rcu_read_lock();
3753 
3754 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
3755 		if (cpu < 0)
3756 			cpu = smp_processor_id();
3757 
3758 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3759 
3760 		rcu_read_unlock();
3761 		preempt_enable();
3762 	} else
3763 #endif
3764 	{
3765 		unsigned int qtail;
3766 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3767 		put_cpu();
3768 	}
3769 	return ret;
3770 }
3771 
3772 /**
3773  *	netif_rx	-	post buffer to the network code
3774  *	@skb: buffer to post
3775  *
3776  *	This function receives a packet from a device driver and queues it for
3777  *	the upper (protocol) levels to process.  It always succeeds. The buffer
3778  *	may be dropped during processing for congestion control or by the
3779  *	protocol layers.
3780  *
3781  *	return values:
3782  *	NET_RX_SUCCESS	(no congestion)
3783  *	NET_RX_DROP     (packet was dropped)
3784  *
3785  */
3786 
3787 int netif_rx(struct sk_buff *skb)
3788 {
3789 	trace_netif_rx_entry(skb);
3790 
3791 	return netif_rx_internal(skb);
3792 }
3793 EXPORT_SYMBOL(netif_rx);
3794 
3795 int netif_rx_ni(struct sk_buff *skb)
3796 {
3797 	int err;
3798 
3799 	trace_netif_rx_ni_entry(skb);
3800 
3801 	preempt_disable();
3802 	err = netif_rx_internal(skb);
3803 	if (local_softirq_pending())
3804 		do_softirq();
3805 	preempt_enable();
3806 
3807 	return err;
3808 }
3809 EXPORT_SYMBOL(netif_rx_ni);
3810 
3811 static void net_tx_action(struct softirq_action *h)
3812 {
3813 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3814 
3815 	if (sd->completion_queue) {
3816 		struct sk_buff *clist;
3817 
3818 		local_irq_disable();
3819 		clist = sd->completion_queue;
3820 		sd->completion_queue = NULL;
3821 		local_irq_enable();
3822 
3823 		while (clist) {
3824 			struct sk_buff *skb = clist;
3825 			clist = clist->next;
3826 
3827 			WARN_ON(atomic_read(&skb->users));
3828 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3829 				trace_consume_skb(skb);
3830 			else
3831 				trace_kfree_skb(skb, net_tx_action);
3832 			__kfree_skb(skb);
3833 		}
3834 	}
3835 
3836 	if (sd->output_queue) {
3837 		struct Qdisc *head;
3838 
3839 		local_irq_disable();
3840 		head = sd->output_queue;
3841 		sd->output_queue = NULL;
3842 		sd->output_queue_tailp = &sd->output_queue;
3843 		local_irq_enable();
3844 
3845 		while (head) {
3846 			struct Qdisc *q = head;
3847 			spinlock_t *root_lock;
3848 
3849 			head = head->next_sched;
3850 
3851 			root_lock = qdisc_lock(q);
3852 			if (spin_trylock(root_lock)) {
3853 				smp_mb__before_atomic();
3854 				clear_bit(__QDISC_STATE_SCHED,
3855 					  &q->state);
3856 				qdisc_run(q);
3857 				spin_unlock(root_lock);
3858 			} else {
3859 				if (!test_bit(__QDISC_STATE_DEACTIVATED,
3860 					      &q->state)) {
3861 					__netif_reschedule(q);
3862 				} else {
3863 					smp_mb__before_atomic();
3864 					clear_bit(__QDISC_STATE_SCHED,
3865 						  &q->state);
3866 				}
3867 			}
3868 		}
3869 	}
3870 }
3871 
3872 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3873     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3874 /* This hook is defined here for ATM LANE */
3875 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3876 			     unsigned char *addr) __read_mostly;
3877 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3878 #endif
3879 
3880 static inline struct sk_buff *
3881 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3882 		   struct net_device *orig_dev)
3883 {
3884 #ifdef CONFIG_NET_CLS_ACT
3885 	struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3886 	struct tcf_result cl_res;
3887 
3888 	/* If there's at least one ingress present somewhere (so
3889 	 * we get here via enabled static key), remaining devices
3890 	 * that are not configured with an ingress qdisc will bail
3891 	 * out here.
3892 	 */
3893 	if (!cl)
3894 		return skb;
3895 	if (*pt_prev) {
3896 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
3897 		*pt_prev = NULL;
3898 	}
3899 
3900 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3901 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3902 	qdisc_bstats_cpu_update(cl->q, skb);
3903 
3904 	switch (tc_classify(skb, cl, &cl_res, false)) {
3905 	case TC_ACT_OK:
3906 	case TC_ACT_RECLASSIFY:
3907 		skb->tc_index = TC_H_MIN(cl_res.classid);
3908 		break;
3909 	case TC_ACT_SHOT:
3910 		qdisc_qstats_cpu_drop(cl->q);
3911 	case TC_ACT_STOLEN:
3912 	case TC_ACT_QUEUED:
3913 		kfree_skb(skb);
3914 		return NULL;
3915 	case TC_ACT_REDIRECT:
3916 		/* skb_mac_header check was done by cls/act_bpf, so
3917 		 * we can safely push the L2 header back before
3918 		 * redirecting to another netdev
3919 		 */
3920 		__skb_push(skb, skb->mac_len);
3921 		skb_do_redirect(skb);
3922 		return NULL;
3923 	default:
3924 		break;
3925 	}
3926 #endif /* CONFIG_NET_CLS_ACT */
3927 	return skb;
3928 }
3929 
3930 /**
3931  *	netdev_rx_handler_register - register receive handler
3932  *	@dev: device to register a handler for
3933  *	@rx_handler: receive handler to register
3934  *	@rx_handler_data: data pointer that is used by rx handler
3935  *
3936  *	Register a receive handler for a device. This handler will then be
3937  *	called from __netif_receive_skb. A negative errno code is returned
3938  *	on a failure.
3939  *
3940  *	The caller must hold the rtnl_mutex.
3941  *
3942  *	For a general description of rx_handler, see enum rx_handler_result.
3943  */
3944 int netdev_rx_handler_register(struct net_device *dev,
3945 			       rx_handler_func_t *rx_handler,
3946 			       void *rx_handler_data)
3947 {
3948 	ASSERT_RTNL();
3949 
3950 	if (dev->rx_handler)
3951 		return -EBUSY;
3952 
3953 	/* Note: rx_handler_data must be set before rx_handler */
3954 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3955 	rcu_assign_pointer(dev->rx_handler, rx_handler);
3956 
3957 	return 0;
3958 }
3959 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3960 
3961 /**
3962  *	netdev_rx_handler_unregister - unregister receive handler
3963  *	@dev: device to unregister a handler from
3964  *
3965  *	Unregister a receive handler from a device.
3966  *
3967  *	The caller must hold the rtnl_mutex.
3968  */
3969 void netdev_rx_handler_unregister(struct net_device *dev)
3970 {
3971 
3972 	ASSERT_RTNL();
3973 	RCU_INIT_POINTER(dev->rx_handler, NULL);
3974 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3975 	 * section has a guarantee to see a non NULL rx_handler_data
3976 	 * as well.
3977 	 */
3978 	synchronize_net();
3979 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3980 }
3981 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3982 
3983 /*
3984  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3985  * the special handling of PFMEMALLOC skbs.
3986  */
3987 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3988 {
3989 	switch (skb->protocol) {
3990 	case htons(ETH_P_ARP):
3991 	case htons(ETH_P_IP):
3992 	case htons(ETH_P_IPV6):
3993 	case htons(ETH_P_8021Q):
3994 	case htons(ETH_P_8021AD):
3995 		return true;
3996 	default:
3997 		return false;
3998 	}
3999 }
4000 
4001 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4002 			     int *ret, struct net_device *orig_dev)
4003 {
4004 #ifdef CONFIG_NETFILTER_INGRESS
4005 	if (nf_hook_ingress_active(skb)) {
4006 		if (*pt_prev) {
4007 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
4008 			*pt_prev = NULL;
4009 		}
4010 
4011 		return nf_hook_ingress(skb);
4012 	}
4013 #endif /* CONFIG_NETFILTER_INGRESS */
4014 	return 0;
4015 }
4016 
4017 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
4018 {
4019 	struct packet_type *ptype, *pt_prev;
4020 	rx_handler_func_t *rx_handler;
4021 	struct net_device *orig_dev;
4022 	bool deliver_exact = false;
4023 	int ret = NET_RX_DROP;
4024 	__be16 type;
4025 
4026 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
4027 
4028 	trace_netif_receive_skb(skb);
4029 
4030 	orig_dev = skb->dev;
4031 
4032 	skb_reset_network_header(skb);
4033 	if (!skb_transport_header_was_set(skb))
4034 		skb_reset_transport_header(skb);
4035 	skb_reset_mac_len(skb);
4036 
4037 	pt_prev = NULL;
4038 
4039 another_round:
4040 	skb->skb_iif = skb->dev->ifindex;
4041 
4042 	__this_cpu_inc(softnet_data.processed);
4043 
4044 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4045 	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4046 		skb = skb_vlan_untag(skb);
4047 		if (unlikely(!skb))
4048 			goto out;
4049 	}
4050 
4051 #ifdef CONFIG_NET_CLS_ACT
4052 	if (skb->tc_verd & TC_NCLS) {
4053 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
4054 		goto ncls;
4055 	}
4056 #endif
4057 
4058 	if (pfmemalloc)
4059 		goto skip_taps;
4060 
4061 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
4062 		if (pt_prev)
4063 			ret = deliver_skb(skb, pt_prev, orig_dev);
4064 		pt_prev = ptype;
4065 	}
4066 
4067 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4068 		if (pt_prev)
4069 			ret = deliver_skb(skb, pt_prev, orig_dev);
4070 		pt_prev = ptype;
4071 	}
4072 
4073 skip_taps:
4074 #ifdef CONFIG_NET_INGRESS
4075 	if (static_key_false(&ingress_needed)) {
4076 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4077 		if (!skb)
4078 			goto out;
4079 
4080 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
4081 			goto out;
4082 	}
4083 #endif
4084 #ifdef CONFIG_NET_CLS_ACT
4085 	skb->tc_verd = 0;
4086 ncls:
4087 #endif
4088 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4089 		goto drop;
4090 
4091 	if (skb_vlan_tag_present(skb)) {
4092 		if (pt_prev) {
4093 			ret = deliver_skb(skb, pt_prev, orig_dev);
4094 			pt_prev = NULL;
4095 		}
4096 		if (vlan_do_receive(&skb))
4097 			goto another_round;
4098 		else if (unlikely(!skb))
4099 			goto out;
4100 	}
4101 
4102 	rx_handler = rcu_dereference(skb->dev->rx_handler);
4103 	if (rx_handler) {
4104 		if (pt_prev) {
4105 			ret = deliver_skb(skb, pt_prev, orig_dev);
4106 			pt_prev = NULL;
4107 		}
4108 		switch (rx_handler(&skb)) {
4109 		case RX_HANDLER_CONSUMED:
4110 			ret = NET_RX_SUCCESS;
4111 			goto out;
4112 		case RX_HANDLER_ANOTHER:
4113 			goto another_round;
4114 		case RX_HANDLER_EXACT:
4115 			deliver_exact = true;
4116 		case RX_HANDLER_PASS:
4117 			break;
4118 		default:
4119 			BUG();
4120 		}
4121 	}
4122 
4123 	if (unlikely(skb_vlan_tag_present(skb))) {
4124 		if (skb_vlan_tag_get_id(skb))
4125 			skb->pkt_type = PACKET_OTHERHOST;
4126 		/* Note: we might in the future use prio bits
4127 		 * and set skb->priority like in vlan_do_receive()
4128 		 * For the time being, just ignore Priority Code Point
4129 		 */
4130 		skb->vlan_tci = 0;
4131 	}
4132 
4133 	type = skb->protocol;
4134 
4135 	/* deliver only exact match when indicated */
4136 	if (likely(!deliver_exact)) {
4137 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4138 				       &ptype_base[ntohs(type) &
4139 						   PTYPE_HASH_MASK]);
4140 	}
4141 
4142 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4143 			       &orig_dev->ptype_specific);
4144 
4145 	if (unlikely(skb->dev != orig_dev)) {
4146 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4147 				       &skb->dev->ptype_specific);
4148 	}
4149 
4150 	if (pt_prev) {
4151 		if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
4152 			goto drop;
4153 		else
4154 			ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4155 	} else {
4156 drop:
4157 		atomic_long_inc(&skb->dev->rx_dropped);
4158 		kfree_skb(skb);
4159 		/* Jamal, now you will not able to escape explaining
4160 		 * me how you were going to use this. :-)
4161 		 */
4162 		ret = NET_RX_DROP;
4163 	}
4164 
4165 out:
4166 	return ret;
4167 }
4168 
4169 static int __netif_receive_skb(struct sk_buff *skb)
4170 {
4171 	int ret;
4172 
4173 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4174 		unsigned long pflags = current->flags;
4175 
4176 		/*
4177 		 * PFMEMALLOC skbs are special, they should
4178 		 * - be delivered to SOCK_MEMALLOC sockets only
4179 		 * - stay away from userspace
4180 		 * - have bounded memory usage
4181 		 *
4182 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
4183 		 * context down to all allocation sites.
4184 		 */
4185 		current->flags |= PF_MEMALLOC;
4186 		ret = __netif_receive_skb_core(skb, true);
4187 		tsk_restore_flags(current, pflags, PF_MEMALLOC);
4188 	} else
4189 		ret = __netif_receive_skb_core(skb, false);
4190 
4191 	return ret;
4192 }
4193 
4194 static int netif_receive_skb_internal(struct sk_buff *skb)
4195 {
4196 	int ret;
4197 
4198 	net_timestamp_check(netdev_tstamp_prequeue, skb);
4199 
4200 	if (skb_defer_rx_timestamp(skb))
4201 		return NET_RX_SUCCESS;
4202 
4203 	rcu_read_lock();
4204 
4205 #ifdef CONFIG_RPS
4206 	if (static_key_false(&rps_needed)) {
4207 		struct rps_dev_flow voidflow, *rflow = &voidflow;
4208 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
4209 
4210 		if (cpu >= 0) {
4211 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4212 			rcu_read_unlock();
4213 			return ret;
4214 		}
4215 	}
4216 #endif
4217 	ret = __netif_receive_skb(skb);
4218 	rcu_read_unlock();
4219 	return ret;
4220 }
4221 
4222 /**
4223  *	netif_receive_skb - process receive buffer from network
4224  *	@skb: buffer to process
4225  *
4226  *	netif_receive_skb() is the main receive data processing function.
4227  *	It always succeeds. The buffer may be dropped during processing
4228  *	for congestion control or by the protocol layers.
4229  *
4230  *	This function may only be called from softirq context and interrupts
4231  *	should be enabled.
4232  *
4233  *	Return values (usually ignored):
4234  *	NET_RX_SUCCESS: no congestion
4235  *	NET_RX_DROP: packet was dropped
4236  */
4237 int netif_receive_skb(struct sk_buff *skb)
4238 {
4239 	trace_netif_receive_skb_entry(skb);
4240 
4241 	return netif_receive_skb_internal(skb);
4242 }
4243 EXPORT_SYMBOL(netif_receive_skb);
4244 
4245 /* Network device is going away, flush any packets still pending
4246  * Called with irqs disabled.
4247  */
4248 static void flush_backlog(void *arg)
4249 {
4250 	struct net_device *dev = arg;
4251 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4252 	struct sk_buff *skb, *tmp;
4253 
4254 	rps_lock(sd);
4255 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
4256 		if (skb->dev == dev) {
4257 			__skb_unlink(skb, &sd->input_pkt_queue);
4258 			kfree_skb(skb);
4259 			input_queue_head_incr(sd);
4260 		}
4261 	}
4262 	rps_unlock(sd);
4263 
4264 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4265 		if (skb->dev == dev) {
4266 			__skb_unlink(skb, &sd->process_queue);
4267 			kfree_skb(skb);
4268 			input_queue_head_incr(sd);
4269 		}
4270 	}
4271 }
4272 
4273 static int napi_gro_complete(struct sk_buff *skb)
4274 {
4275 	struct packet_offload *ptype;
4276 	__be16 type = skb->protocol;
4277 	struct list_head *head = &offload_base;
4278 	int err = -ENOENT;
4279 
4280 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4281 
4282 	if (NAPI_GRO_CB(skb)->count == 1) {
4283 		skb_shinfo(skb)->gso_size = 0;
4284 		goto out;
4285 	}
4286 
4287 	rcu_read_lock();
4288 	list_for_each_entry_rcu(ptype, head, list) {
4289 		if (ptype->type != type || !ptype->callbacks.gro_complete)
4290 			continue;
4291 
4292 		err = ptype->callbacks.gro_complete(skb, 0);
4293 		break;
4294 	}
4295 	rcu_read_unlock();
4296 
4297 	if (err) {
4298 		WARN_ON(&ptype->list == head);
4299 		kfree_skb(skb);
4300 		return NET_RX_SUCCESS;
4301 	}
4302 
4303 out:
4304 	return netif_receive_skb_internal(skb);
4305 }
4306 
4307 /* napi->gro_list contains packets ordered by age.
4308  * youngest packets at the head of it.
4309  * Complete skbs in reverse order to reduce latencies.
4310  */
4311 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
4312 {
4313 	struct sk_buff *skb, *prev = NULL;
4314 
4315 	/* scan list and build reverse chain */
4316 	for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4317 		skb->prev = prev;
4318 		prev = skb;
4319 	}
4320 
4321 	for (skb = prev; skb; skb = prev) {
4322 		skb->next = NULL;
4323 
4324 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4325 			return;
4326 
4327 		prev = skb->prev;
4328 		napi_gro_complete(skb);
4329 		napi->gro_count--;
4330 	}
4331 
4332 	napi->gro_list = NULL;
4333 }
4334 EXPORT_SYMBOL(napi_gro_flush);
4335 
4336 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4337 {
4338 	struct sk_buff *p;
4339 	unsigned int maclen = skb->dev->hard_header_len;
4340 	u32 hash = skb_get_hash_raw(skb);
4341 
4342 	for (p = napi->gro_list; p; p = p->next) {
4343 		unsigned long diffs;
4344 
4345 		NAPI_GRO_CB(p)->flush = 0;
4346 
4347 		if (hash != skb_get_hash_raw(p)) {
4348 			NAPI_GRO_CB(p)->same_flow = 0;
4349 			continue;
4350 		}
4351 
4352 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4353 		diffs |= p->vlan_tci ^ skb->vlan_tci;
4354 		if (maclen == ETH_HLEN)
4355 			diffs |= compare_ether_header(skb_mac_header(p),
4356 						      skb_mac_header(skb));
4357 		else if (!diffs)
4358 			diffs = memcmp(skb_mac_header(p),
4359 				       skb_mac_header(skb),
4360 				       maclen);
4361 		NAPI_GRO_CB(p)->same_flow = !diffs;
4362 	}
4363 }
4364 
4365 static void skb_gro_reset_offset(struct sk_buff *skb)
4366 {
4367 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
4368 	const skb_frag_t *frag0 = &pinfo->frags[0];
4369 
4370 	NAPI_GRO_CB(skb)->data_offset = 0;
4371 	NAPI_GRO_CB(skb)->frag0 = NULL;
4372 	NAPI_GRO_CB(skb)->frag0_len = 0;
4373 
4374 	if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4375 	    pinfo->nr_frags &&
4376 	    !PageHighMem(skb_frag_page(frag0))) {
4377 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4378 		NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
4379 	}
4380 }
4381 
4382 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4383 {
4384 	struct skb_shared_info *pinfo = skb_shinfo(skb);
4385 
4386 	BUG_ON(skb->end - skb->tail < grow);
4387 
4388 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4389 
4390 	skb->data_len -= grow;
4391 	skb->tail += grow;
4392 
4393 	pinfo->frags[0].page_offset += grow;
4394 	skb_frag_size_sub(&pinfo->frags[0], grow);
4395 
4396 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4397 		skb_frag_unref(skb, 0);
4398 		memmove(pinfo->frags, pinfo->frags + 1,
4399 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
4400 	}
4401 }
4402 
4403 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4404 {
4405 	struct sk_buff **pp = NULL;
4406 	struct packet_offload *ptype;
4407 	__be16 type = skb->protocol;
4408 	struct list_head *head = &offload_base;
4409 	int same_flow;
4410 	enum gro_result ret;
4411 	int grow;
4412 
4413 	if (!(skb->dev->features & NETIF_F_GRO))
4414 		goto normal;
4415 
4416 	if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4417 		goto normal;
4418 
4419 	gro_list_prepare(napi, skb);
4420 
4421 	rcu_read_lock();
4422 	list_for_each_entry_rcu(ptype, head, list) {
4423 		if (ptype->type != type || !ptype->callbacks.gro_receive)
4424 			continue;
4425 
4426 		skb_set_network_header(skb, skb_gro_offset(skb));
4427 		skb_reset_mac_len(skb);
4428 		NAPI_GRO_CB(skb)->same_flow = 0;
4429 		NAPI_GRO_CB(skb)->flush = 0;
4430 		NAPI_GRO_CB(skb)->free = 0;
4431 		NAPI_GRO_CB(skb)->udp_mark = 0;
4432 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4433 
4434 		/* Setup for GRO checksum validation */
4435 		switch (skb->ip_summed) {
4436 		case CHECKSUM_COMPLETE:
4437 			NAPI_GRO_CB(skb)->csum = skb->csum;
4438 			NAPI_GRO_CB(skb)->csum_valid = 1;
4439 			NAPI_GRO_CB(skb)->csum_cnt = 0;
4440 			break;
4441 		case CHECKSUM_UNNECESSARY:
4442 			NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4443 			NAPI_GRO_CB(skb)->csum_valid = 0;
4444 			break;
4445 		default:
4446 			NAPI_GRO_CB(skb)->csum_cnt = 0;
4447 			NAPI_GRO_CB(skb)->csum_valid = 0;
4448 		}
4449 
4450 		pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4451 		break;
4452 	}
4453 	rcu_read_unlock();
4454 
4455 	if (&ptype->list == head)
4456 		goto normal;
4457 
4458 	same_flow = NAPI_GRO_CB(skb)->same_flow;
4459 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4460 
4461 	if (pp) {
4462 		struct sk_buff *nskb = *pp;
4463 
4464 		*pp = nskb->next;
4465 		nskb->next = NULL;
4466 		napi_gro_complete(nskb);
4467 		napi->gro_count--;
4468 	}
4469 
4470 	if (same_flow)
4471 		goto ok;
4472 
4473 	if (NAPI_GRO_CB(skb)->flush)
4474 		goto normal;
4475 
4476 	if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4477 		struct sk_buff *nskb = napi->gro_list;
4478 
4479 		/* locate the end of the list to select the 'oldest' flow */
4480 		while (nskb->next) {
4481 			pp = &nskb->next;
4482 			nskb = *pp;
4483 		}
4484 		*pp = NULL;
4485 		nskb->next = NULL;
4486 		napi_gro_complete(nskb);
4487 	} else {
4488 		napi->gro_count++;
4489 	}
4490 	NAPI_GRO_CB(skb)->count = 1;
4491 	NAPI_GRO_CB(skb)->age = jiffies;
4492 	NAPI_GRO_CB(skb)->last = skb;
4493 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4494 	skb->next = napi->gro_list;
4495 	napi->gro_list = skb;
4496 	ret = GRO_HELD;
4497 
4498 pull:
4499 	grow = skb_gro_offset(skb) - skb_headlen(skb);
4500 	if (grow > 0)
4501 		gro_pull_from_frag0(skb, grow);
4502 ok:
4503 	return ret;
4504 
4505 normal:
4506 	ret = GRO_NORMAL;
4507 	goto pull;
4508 }
4509 
4510 struct packet_offload *gro_find_receive_by_type(__be16 type)
4511 {
4512 	struct list_head *offload_head = &offload_base;
4513 	struct packet_offload *ptype;
4514 
4515 	list_for_each_entry_rcu(ptype, offload_head, list) {
4516 		if (ptype->type != type || !ptype->callbacks.gro_receive)
4517 			continue;
4518 		return ptype;
4519 	}
4520 	return NULL;
4521 }
4522 EXPORT_SYMBOL(gro_find_receive_by_type);
4523 
4524 struct packet_offload *gro_find_complete_by_type(__be16 type)
4525 {
4526 	struct list_head *offload_head = &offload_base;
4527 	struct packet_offload *ptype;
4528 
4529 	list_for_each_entry_rcu(ptype, offload_head, list) {
4530 		if (ptype->type != type || !ptype->callbacks.gro_complete)
4531 			continue;
4532 		return ptype;
4533 	}
4534 	return NULL;
4535 }
4536 EXPORT_SYMBOL(gro_find_complete_by_type);
4537 
4538 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4539 {
4540 	switch (ret) {
4541 	case GRO_NORMAL:
4542 		if (netif_receive_skb_internal(skb))
4543 			ret = GRO_DROP;
4544 		break;
4545 
4546 	case GRO_DROP:
4547 		kfree_skb(skb);
4548 		break;
4549 
4550 	case GRO_MERGED_FREE:
4551 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4552 			kmem_cache_free(skbuff_head_cache, skb);
4553 		else
4554 			__kfree_skb(skb);
4555 		break;
4556 
4557 	case GRO_HELD:
4558 	case GRO_MERGED:
4559 		break;
4560 	}
4561 
4562 	return ret;
4563 }
4564 
4565 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4566 {
4567 	skb_mark_napi_id(skb, napi);
4568 	trace_napi_gro_receive_entry(skb);
4569 
4570 	skb_gro_reset_offset(skb);
4571 
4572 	return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4573 }
4574 EXPORT_SYMBOL(napi_gro_receive);
4575 
4576 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4577 {
4578 	if (unlikely(skb->pfmemalloc)) {
4579 		consume_skb(skb);
4580 		return;
4581 	}
4582 	__skb_pull(skb, skb_headlen(skb));
4583 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
4584 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4585 	skb->vlan_tci = 0;
4586 	skb->dev = napi->dev;
4587 	skb->skb_iif = 0;
4588 	skb->encapsulation = 0;
4589 	skb_shinfo(skb)->gso_type = 0;
4590 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4591 
4592 	napi->skb = skb;
4593 }
4594 
4595 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4596 {
4597 	struct sk_buff *skb = napi->skb;
4598 
4599 	if (!skb) {
4600 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4601 		if (skb) {
4602 			napi->skb = skb;
4603 			skb_mark_napi_id(skb, napi);
4604 		}
4605 	}
4606 	return skb;
4607 }
4608 EXPORT_SYMBOL(napi_get_frags);
4609 
4610 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4611 				      struct sk_buff *skb,
4612 				      gro_result_t ret)
4613 {
4614 	switch (ret) {
4615 	case GRO_NORMAL:
4616 	case GRO_HELD:
4617 		__skb_push(skb, ETH_HLEN);
4618 		skb->protocol = eth_type_trans(skb, skb->dev);
4619 		if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4620 			ret = GRO_DROP;
4621 		break;
4622 
4623 	case GRO_DROP:
4624 	case GRO_MERGED_FREE:
4625 		napi_reuse_skb(napi, skb);
4626 		break;
4627 
4628 	case GRO_MERGED:
4629 		break;
4630 	}
4631 
4632 	return ret;
4633 }
4634 
4635 /* Upper GRO stack assumes network header starts at gro_offset=0
4636  * Drivers could call both napi_gro_frags() and napi_gro_receive()
4637  * We copy ethernet header into skb->data to have a common layout.
4638  */
4639 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4640 {
4641 	struct sk_buff *skb = napi->skb;
4642 	const struct ethhdr *eth;
4643 	unsigned int hlen = sizeof(*eth);
4644 
4645 	napi->skb = NULL;
4646 
4647 	skb_reset_mac_header(skb);
4648 	skb_gro_reset_offset(skb);
4649 
4650 	eth = skb_gro_header_fast(skb, 0);
4651 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
4652 		eth = skb_gro_header_slow(skb, hlen, 0);
4653 		if (unlikely(!eth)) {
4654 			napi_reuse_skb(napi, skb);
4655 			return NULL;
4656 		}
4657 	} else {
4658 		gro_pull_from_frag0(skb, hlen);
4659 		NAPI_GRO_CB(skb)->frag0 += hlen;
4660 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
4661 	}
4662 	__skb_pull(skb, hlen);
4663 
4664 	/*
4665 	 * This works because the only protocols we care about don't require
4666 	 * special handling.
4667 	 * We'll fix it up properly in napi_frags_finish()
4668 	 */
4669 	skb->protocol = eth->h_proto;
4670 
4671 	return skb;
4672 }
4673 
4674 gro_result_t napi_gro_frags(struct napi_struct *napi)
4675 {
4676 	struct sk_buff *skb = napi_frags_skb(napi);
4677 
4678 	if (!skb)
4679 		return GRO_DROP;
4680 
4681 	trace_napi_gro_frags_entry(skb);
4682 
4683 	return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4684 }
4685 EXPORT_SYMBOL(napi_gro_frags);
4686 
4687 /* Compute the checksum from gro_offset and return the folded value
4688  * after adding in any pseudo checksum.
4689  */
4690 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4691 {
4692 	__wsum wsum;
4693 	__sum16 sum;
4694 
4695 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4696 
4697 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4698 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4699 	if (likely(!sum)) {
4700 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4701 		    !skb->csum_complete_sw)
4702 			netdev_rx_csum_fault(skb->dev);
4703 	}
4704 
4705 	NAPI_GRO_CB(skb)->csum = wsum;
4706 	NAPI_GRO_CB(skb)->csum_valid = 1;
4707 
4708 	return sum;
4709 }
4710 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4711 
4712 /*
4713  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4714  * Note: called with local irq disabled, but exits with local irq enabled.
4715  */
4716 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4717 {
4718 #ifdef CONFIG_RPS
4719 	struct softnet_data *remsd = sd->rps_ipi_list;
4720 
4721 	if (remsd) {
4722 		sd->rps_ipi_list = NULL;
4723 
4724 		local_irq_enable();
4725 
4726 		/* Send pending IPI's to kick RPS processing on remote cpus. */
4727 		while (remsd) {
4728 			struct softnet_data *next = remsd->rps_ipi_next;
4729 
4730 			if (cpu_online(remsd->cpu))
4731 				smp_call_function_single_async(remsd->cpu,
4732 							   &remsd->csd);
4733 			remsd = next;
4734 		}
4735 	} else
4736 #endif
4737 		local_irq_enable();
4738 }
4739 
4740 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4741 {
4742 #ifdef CONFIG_RPS
4743 	return sd->rps_ipi_list != NULL;
4744 #else
4745 	return false;
4746 #endif
4747 }
4748 
4749 static int process_backlog(struct napi_struct *napi, int quota)
4750 {
4751 	int work = 0;
4752 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4753 
4754 	/* Check if we have pending ipi, its better to send them now,
4755 	 * not waiting net_rx_action() end.
4756 	 */
4757 	if (sd_has_rps_ipi_waiting(sd)) {
4758 		local_irq_disable();
4759 		net_rps_action_and_irq_enable(sd);
4760 	}
4761 
4762 	napi->weight = weight_p;
4763 	local_irq_disable();
4764 	while (1) {
4765 		struct sk_buff *skb;
4766 
4767 		while ((skb = __skb_dequeue(&sd->process_queue))) {
4768 			rcu_read_lock();
4769 			local_irq_enable();
4770 			__netif_receive_skb(skb);
4771 			rcu_read_unlock();
4772 			local_irq_disable();
4773 			input_queue_head_incr(sd);
4774 			if (++work >= quota) {
4775 				local_irq_enable();
4776 				return work;
4777 			}
4778 		}
4779 
4780 		rps_lock(sd);
4781 		if (skb_queue_empty(&sd->input_pkt_queue)) {
4782 			/*
4783 			 * Inline a custom version of __napi_complete().
4784 			 * only current cpu owns and manipulates this napi,
4785 			 * and NAPI_STATE_SCHED is the only possible flag set
4786 			 * on backlog.
4787 			 * We can use a plain write instead of clear_bit(),
4788 			 * and we dont need an smp_mb() memory barrier.
4789 			 */
4790 			napi->state = 0;
4791 			rps_unlock(sd);
4792 
4793 			break;
4794 		}
4795 
4796 		skb_queue_splice_tail_init(&sd->input_pkt_queue,
4797 					   &sd->process_queue);
4798 		rps_unlock(sd);
4799 	}
4800 	local_irq_enable();
4801 
4802 	return work;
4803 }
4804 
4805 /**
4806  * __napi_schedule - schedule for receive
4807  * @n: entry to schedule
4808  *
4809  * The entry's receive function will be scheduled to run.
4810  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4811  */
4812 void __napi_schedule(struct napi_struct *n)
4813 {
4814 	unsigned long flags;
4815 
4816 	local_irq_save(flags);
4817 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
4818 	local_irq_restore(flags);
4819 }
4820 EXPORT_SYMBOL(__napi_schedule);
4821 
4822 /**
4823  * __napi_schedule_irqoff - schedule for receive
4824  * @n: entry to schedule
4825  *
4826  * Variant of __napi_schedule() assuming hard irqs are masked
4827  */
4828 void __napi_schedule_irqoff(struct napi_struct *n)
4829 {
4830 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
4831 }
4832 EXPORT_SYMBOL(__napi_schedule_irqoff);
4833 
4834 void __napi_complete(struct napi_struct *n)
4835 {
4836 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4837 
4838 	list_del_init(&n->poll_list);
4839 	smp_mb__before_atomic();
4840 	clear_bit(NAPI_STATE_SCHED, &n->state);
4841 }
4842 EXPORT_SYMBOL(__napi_complete);
4843 
4844 void napi_complete_done(struct napi_struct *n, int work_done)
4845 {
4846 	unsigned long flags;
4847 
4848 	/*
4849 	 * don't let napi dequeue from the cpu poll list
4850 	 * just in case its running on a different cpu
4851 	 */
4852 	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4853 		return;
4854 
4855 	if (n->gro_list) {
4856 		unsigned long timeout = 0;
4857 
4858 		if (work_done)
4859 			timeout = n->dev->gro_flush_timeout;
4860 
4861 		if (timeout)
4862 			hrtimer_start(&n->timer, ns_to_ktime(timeout),
4863 				      HRTIMER_MODE_REL_PINNED);
4864 		else
4865 			napi_gro_flush(n, false);
4866 	}
4867 	if (likely(list_empty(&n->poll_list))) {
4868 		WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4869 	} else {
4870 		/* If n->poll_list is not empty, we need to mask irqs */
4871 		local_irq_save(flags);
4872 		__napi_complete(n);
4873 		local_irq_restore(flags);
4874 	}
4875 }
4876 EXPORT_SYMBOL(napi_complete_done);
4877 
4878 /* must be called under rcu_read_lock(), as we dont take a reference */
4879 static struct napi_struct *napi_by_id(unsigned int napi_id)
4880 {
4881 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4882 	struct napi_struct *napi;
4883 
4884 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4885 		if (napi->napi_id == napi_id)
4886 			return napi;
4887 
4888 	return NULL;
4889 }
4890 
4891 #if defined(CONFIG_NET_RX_BUSY_POLL)
4892 #define BUSY_POLL_BUDGET 8
4893 bool sk_busy_loop(struct sock *sk, int nonblock)
4894 {
4895 	unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
4896 	int (*busy_poll)(struct napi_struct *dev);
4897 	struct napi_struct *napi;
4898 	int rc = false;
4899 
4900 	rcu_read_lock();
4901 
4902 	napi = napi_by_id(sk->sk_napi_id);
4903 	if (!napi)
4904 		goto out;
4905 
4906 	/* Note: ndo_busy_poll method is optional in linux-4.5 */
4907 	busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
4908 
4909 	do {
4910 		rc = 0;
4911 		local_bh_disable();
4912 		if (busy_poll) {
4913 			rc = busy_poll(napi);
4914 		} else if (napi_schedule_prep(napi)) {
4915 			void *have = netpoll_poll_lock(napi);
4916 
4917 			if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
4918 				rc = napi->poll(napi, BUSY_POLL_BUDGET);
4919 				trace_napi_poll(napi);
4920 				if (rc == BUSY_POLL_BUDGET) {
4921 					napi_complete_done(napi, rc);
4922 					napi_schedule(napi);
4923 				}
4924 			}
4925 			netpoll_poll_unlock(have);
4926 		}
4927 		if (rc > 0)
4928 			NET_ADD_STATS_BH(sock_net(sk),
4929 					 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
4930 		local_bh_enable();
4931 
4932 		if (rc == LL_FLUSH_FAILED)
4933 			break; /* permanent failure */
4934 
4935 		cpu_relax();
4936 	} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
4937 		 !need_resched() && !busy_loop_timeout(end_time));
4938 
4939 	rc = !skb_queue_empty(&sk->sk_receive_queue);
4940 out:
4941 	rcu_read_unlock();
4942 	return rc;
4943 }
4944 EXPORT_SYMBOL(sk_busy_loop);
4945 
4946 #endif /* CONFIG_NET_RX_BUSY_POLL */
4947 
4948 void napi_hash_add(struct napi_struct *napi)
4949 {
4950 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
4951 	    test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
4952 		return;
4953 
4954 	spin_lock(&napi_hash_lock);
4955 
4956 	/* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4957 	do {
4958 		if (unlikely(++napi_gen_id < NR_CPUS + 1))
4959 			napi_gen_id = NR_CPUS + 1;
4960 	} while (napi_by_id(napi_gen_id));
4961 	napi->napi_id = napi_gen_id;
4962 
4963 	hlist_add_head_rcu(&napi->napi_hash_node,
4964 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4965 
4966 	spin_unlock(&napi_hash_lock);
4967 }
4968 EXPORT_SYMBOL_GPL(napi_hash_add);
4969 
4970 /* Warning : caller is responsible to make sure rcu grace period
4971  * is respected before freeing memory containing @napi
4972  */
4973 bool napi_hash_del(struct napi_struct *napi)
4974 {
4975 	bool rcu_sync_needed = false;
4976 
4977 	spin_lock(&napi_hash_lock);
4978 
4979 	if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
4980 		rcu_sync_needed = true;
4981 		hlist_del_rcu(&napi->napi_hash_node);
4982 	}
4983 	spin_unlock(&napi_hash_lock);
4984 	return rcu_sync_needed;
4985 }
4986 EXPORT_SYMBOL_GPL(napi_hash_del);
4987 
4988 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4989 {
4990 	struct napi_struct *napi;
4991 
4992 	napi = container_of(timer, struct napi_struct, timer);
4993 	if (napi->gro_list)
4994 		napi_schedule(napi);
4995 
4996 	return HRTIMER_NORESTART;
4997 }
4998 
4999 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5000 		    int (*poll)(struct napi_struct *, int), int weight)
5001 {
5002 	INIT_LIST_HEAD(&napi->poll_list);
5003 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5004 	napi->timer.function = napi_watchdog;
5005 	napi->gro_count = 0;
5006 	napi->gro_list = NULL;
5007 	napi->skb = NULL;
5008 	napi->poll = poll;
5009 	if (weight > NAPI_POLL_WEIGHT)
5010 		pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5011 			    weight, dev->name);
5012 	napi->weight = weight;
5013 	list_add(&napi->dev_list, &dev->napi_list);
5014 	napi->dev = dev;
5015 #ifdef CONFIG_NETPOLL
5016 	spin_lock_init(&napi->poll_lock);
5017 	napi->poll_owner = -1;
5018 #endif
5019 	set_bit(NAPI_STATE_SCHED, &napi->state);
5020 	napi_hash_add(napi);
5021 }
5022 EXPORT_SYMBOL(netif_napi_add);
5023 
5024 void napi_disable(struct napi_struct *n)
5025 {
5026 	might_sleep();
5027 	set_bit(NAPI_STATE_DISABLE, &n->state);
5028 
5029 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5030 		msleep(1);
5031 	while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5032 		msleep(1);
5033 
5034 	hrtimer_cancel(&n->timer);
5035 
5036 	clear_bit(NAPI_STATE_DISABLE, &n->state);
5037 }
5038 EXPORT_SYMBOL(napi_disable);
5039 
5040 /* Must be called in process context */
5041 void netif_napi_del(struct napi_struct *napi)
5042 {
5043 	might_sleep();
5044 	if (napi_hash_del(napi))
5045 		synchronize_net();
5046 	list_del_init(&napi->dev_list);
5047 	napi_free_frags(napi);
5048 
5049 	kfree_skb_list(napi->gro_list);
5050 	napi->gro_list = NULL;
5051 	napi->gro_count = 0;
5052 }
5053 EXPORT_SYMBOL(netif_napi_del);
5054 
5055 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5056 {
5057 	void *have;
5058 	int work, weight;
5059 
5060 	list_del_init(&n->poll_list);
5061 
5062 	have = netpoll_poll_lock(n);
5063 
5064 	weight = n->weight;
5065 
5066 	/* This NAPI_STATE_SCHED test is for avoiding a race
5067 	 * with netpoll's poll_napi().  Only the entity which
5068 	 * obtains the lock and sees NAPI_STATE_SCHED set will
5069 	 * actually make the ->poll() call.  Therefore we avoid
5070 	 * accidentally calling ->poll() when NAPI is not scheduled.
5071 	 */
5072 	work = 0;
5073 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5074 		work = n->poll(n, weight);
5075 		trace_napi_poll(n);
5076 	}
5077 
5078 	WARN_ON_ONCE(work > weight);
5079 
5080 	if (likely(work < weight))
5081 		goto out_unlock;
5082 
5083 	/* Drivers must not modify the NAPI state if they
5084 	 * consume the entire weight.  In such cases this code
5085 	 * still "owns" the NAPI instance and therefore can
5086 	 * move the instance around on the list at-will.
5087 	 */
5088 	if (unlikely(napi_disable_pending(n))) {
5089 		napi_complete(n);
5090 		goto out_unlock;
5091 	}
5092 
5093 	if (n->gro_list) {
5094 		/* flush too old packets
5095 		 * If HZ < 1000, flush all packets.
5096 		 */
5097 		napi_gro_flush(n, HZ >= 1000);
5098 	}
5099 
5100 	/* Some drivers may have called napi_schedule
5101 	 * prior to exhausting their budget.
5102 	 */
5103 	if (unlikely(!list_empty(&n->poll_list))) {
5104 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5105 			     n->dev ? n->dev->name : "backlog");
5106 		goto out_unlock;
5107 	}
5108 
5109 	list_add_tail(&n->poll_list, repoll);
5110 
5111 out_unlock:
5112 	netpoll_poll_unlock(have);
5113 
5114 	return work;
5115 }
5116 
5117 static void net_rx_action(struct softirq_action *h)
5118 {
5119 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5120 	unsigned long time_limit = jiffies + 2;
5121 	int budget = netdev_budget;
5122 	LIST_HEAD(list);
5123 	LIST_HEAD(repoll);
5124 
5125 	local_irq_disable();
5126 	list_splice_init(&sd->poll_list, &list);
5127 	local_irq_enable();
5128 
5129 	for (;;) {
5130 		struct napi_struct *n;
5131 
5132 		if (list_empty(&list)) {
5133 			if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
5134 				return;
5135 			break;
5136 		}
5137 
5138 		n = list_first_entry(&list, struct napi_struct, poll_list);
5139 		budget -= napi_poll(n, &repoll);
5140 
5141 		/* If softirq window is exhausted then punt.
5142 		 * Allow this to run for 2 jiffies since which will allow
5143 		 * an average latency of 1.5/HZ.
5144 		 */
5145 		if (unlikely(budget <= 0 ||
5146 			     time_after_eq(jiffies, time_limit))) {
5147 			sd->time_squeeze++;
5148 			break;
5149 		}
5150 	}
5151 
5152 	local_irq_disable();
5153 
5154 	list_splice_tail_init(&sd->poll_list, &list);
5155 	list_splice_tail(&repoll, &list);
5156 	list_splice(&list, &sd->poll_list);
5157 	if (!list_empty(&sd->poll_list))
5158 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
5159 
5160 	net_rps_action_and_irq_enable(sd);
5161 }
5162 
5163 struct netdev_adjacent {
5164 	struct net_device *dev;
5165 
5166 	/* upper master flag, there can only be one master device per list */
5167 	bool master;
5168 
5169 	/* counter for the number of times this device was added to us */
5170 	u16 ref_nr;
5171 
5172 	/* private field for the users */
5173 	void *private;
5174 
5175 	struct list_head list;
5176 	struct rcu_head rcu;
5177 };
5178 
5179 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
5180 						 struct list_head *adj_list)
5181 {
5182 	struct netdev_adjacent *adj;
5183 
5184 	list_for_each_entry(adj, adj_list, list) {
5185 		if (adj->dev == adj_dev)
5186 			return adj;
5187 	}
5188 	return NULL;
5189 }
5190 
5191 /**
5192  * netdev_has_upper_dev - Check if device is linked to an upper device
5193  * @dev: device
5194  * @upper_dev: upper device to check
5195  *
5196  * Find out if a device is linked to specified upper device and return true
5197  * in case it is. Note that this checks only immediate upper device,
5198  * not through a complete stack of devices. The caller must hold the RTNL lock.
5199  */
5200 bool netdev_has_upper_dev(struct net_device *dev,
5201 			  struct net_device *upper_dev)
5202 {
5203 	ASSERT_RTNL();
5204 
5205 	return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
5206 }
5207 EXPORT_SYMBOL(netdev_has_upper_dev);
5208 
5209 /**
5210  * netdev_has_any_upper_dev - Check if device is linked to some device
5211  * @dev: device
5212  *
5213  * Find out if a device is linked to an upper device and return true in case
5214  * it is. The caller must hold the RTNL lock.
5215  */
5216 static bool netdev_has_any_upper_dev(struct net_device *dev)
5217 {
5218 	ASSERT_RTNL();
5219 
5220 	return !list_empty(&dev->all_adj_list.upper);
5221 }
5222 
5223 /**
5224  * netdev_master_upper_dev_get - Get master upper device
5225  * @dev: device
5226  *
5227  * Find a master upper device and return pointer to it or NULL in case
5228  * it's not there. The caller must hold the RTNL lock.
5229  */
5230 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5231 {
5232 	struct netdev_adjacent *upper;
5233 
5234 	ASSERT_RTNL();
5235 
5236 	if (list_empty(&dev->adj_list.upper))
5237 		return NULL;
5238 
5239 	upper = list_first_entry(&dev->adj_list.upper,
5240 				 struct netdev_adjacent, list);
5241 	if (likely(upper->master))
5242 		return upper->dev;
5243 	return NULL;
5244 }
5245 EXPORT_SYMBOL(netdev_master_upper_dev_get);
5246 
5247 void *netdev_adjacent_get_private(struct list_head *adj_list)
5248 {
5249 	struct netdev_adjacent *adj;
5250 
5251 	adj = list_entry(adj_list, struct netdev_adjacent, list);
5252 
5253 	return adj->private;
5254 }
5255 EXPORT_SYMBOL(netdev_adjacent_get_private);
5256 
5257 /**
5258  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5259  * @dev: device
5260  * @iter: list_head ** of the current position
5261  *
5262  * Gets the next device from the dev's upper list, starting from iter
5263  * position. The caller must hold RCU read lock.
5264  */
5265 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5266 						 struct list_head **iter)
5267 {
5268 	struct netdev_adjacent *upper;
5269 
5270 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5271 
5272 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5273 
5274 	if (&upper->list == &dev->adj_list.upper)
5275 		return NULL;
5276 
5277 	*iter = &upper->list;
5278 
5279 	return upper->dev;
5280 }
5281 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5282 
5283 /**
5284  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
5285  * @dev: device
5286  * @iter: list_head ** of the current position
5287  *
5288  * Gets the next device from the dev's upper list, starting from iter
5289  * position. The caller must hold RCU read lock.
5290  */
5291 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
5292 						     struct list_head **iter)
5293 {
5294 	struct netdev_adjacent *upper;
5295 
5296 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5297 
5298 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5299 
5300 	if (&upper->list == &dev->all_adj_list.upper)
5301 		return NULL;
5302 
5303 	*iter = &upper->list;
5304 
5305 	return upper->dev;
5306 }
5307 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
5308 
5309 /**
5310  * netdev_lower_get_next_private - Get the next ->private from the
5311  *				   lower neighbour list
5312  * @dev: device
5313  * @iter: list_head ** of the current position
5314  *
5315  * Gets the next netdev_adjacent->private from the dev's lower neighbour
5316  * list, starting from iter position. The caller must hold either hold the
5317  * RTNL lock or its own locking that guarantees that the neighbour lower
5318  * list will remain unchanged.
5319  */
5320 void *netdev_lower_get_next_private(struct net_device *dev,
5321 				    struct list_head **iter)
5322 {
5323 	struct netdev_adjacent *lower;
5324 
5325 	lower = list_entry(*iter, struct netdev_adjacent, list);
5326 
5327 	if (&lower->list == &dev->adj_list.lower)
5328 		return NULL;
5329 
5330 	*iter = lower->list.next;
5331 
5332 	return lower->private;
5333 }
5334 EXPORT_SYMBOL(netdev_lower_get_next_private);
5335 
5336 /**
5337  * netdev_lower_get_next_private_rcu - Get the next ->private from the
5338  *				       lower neighbour list, RCU
5339  *				       variant
5340  * @dev: device
5341  * @iter: list_head ** of the current position
5342  *
5343  * Gets the next netdev_adjacent->private from the dev's lower neighbour
5344  * list, starting from iter position. The caller must hold RCU read lock.
5345  */
5346 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5347 					struct list_head **iter)
5348 {
5349 	struct netdev_adjacent *lower;
5350 
5351 	WARN_ON_ONCE(!rcu_read_lock_held());
5352 
5353 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5354 
5355 	if (&lower->list == &dev->adj_list.lower)
5356 		return NULL;
5357 
5358 	*iter = &lower->list;
5359 
5360 	return lower->private;
5361 }
5362 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5363 
5364 /**
5365  * netdev_lower_get_next - Get the next device from the lower neighbour
5366  *                         list
5367  * @dev: device
5368  * @iter: list_head ** of the current position
5369  *
5370  * Gets the next netdev_adjacent from the dev's lower neighbour
5371  * list, starting from iter position. The caller must hold RTNL lock or
5372  * its own locking that guarantees that the neighbour lower
5373  * list will remain unchanged.
5374  */
5375 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5376 {
5377 	struct netdev_adjacent *lower;
5378 
5379 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5380 
5381 	if (&lower->list == &dev->adj_list.lower)
5382 		return NULL;
5383 
5384 	*iter = &lower->list;
5385 
5386 	return lower->dev;
5387 }
5388 EXPORT_SYMBOL(netdev_lower_get_next);
5389 
5390 /**
5391  * netdev_lower_get_first_private_rcu - Get the first ->private from the
5392  *				       lower neighbour list, RCU
5393  *				       variant
5394  * @dev: device
5395  *
5396  * Gets the first netdev_adjacent->private from the dev's lower neighbour
5397  * list. The caller must hold RCU read lock.
5398  */
5399 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5400 {
5401 	struct netdev_adjacent *lower;
5402 
5403 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
5404 			struct netdev_adjacent, list);
5405 	if (lower)
5406 		return lower->private;
5407 	return NULL;
5408 }
5409 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5410 
5411 /**
5412  * netdev_master_upper_dev_get_rcu - Get master upper device
5413  * @dev: device
5414  *
5415  * Find a master upper device and return pointer to it or NULL in case
5416  * it's not there. The caller must hold the RCU read lock.
5417  */
5418 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5419 {
5420 	struct netdev_adjacent *upper;
5421 
5422 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
5423 				       struct netdev_adjacent, list);
5424 	if (upper && likely(upper->master))
5425 		return upper->dev;
5426 	return NULL;
5427 }
5428 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5429 
5430 static int netdev_adjacent_sysfs_add(struct net_device *dev,
5431 			      struct net_device *adj_dev,
5432 			      struct list_head *dev_list)
5433 {
5434 	char linkname[IFNAMSIZ+7];
5435 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
5436 		"upper_%s" : "lower_%s", adj_dev->name);
5437 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5438 				 linkname);
5439 }
5440 static void netdev_adjacent_sysfs_del(struct net_device *dev,
5441 			       char *name,
5442 			       struct list_head *dev_list)
5443 {
5444 	char linkname[IFNAMSIZ+7];
5445 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
5446 		"upper_%s" : "lower_%s", name);
5447 	sysfs_remove_link(&(dev->dev.kobj), linkname);
5448 }
5449 
5450 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5451 						 struct net_device *adj_dev,
5452 						 struct list_head *dev_list)
5453 {
5454 	return (dev_list == &dev->adj_list.upper ||
5455 		dev_list == &dev->adj_list.lower) &&
5456 		net_eq(dev_net(dev), dev_net(adj_dev));
5457 }
5458 
5459 static int __netdev_adjacent_dev_insert(struct net_device *dev,
5460 					struct net_device *adj_dev,
5461 					struct list_head *dev_list,
5462 					void *private, bool master)
5463 {
5464 	struct netdev_adjacent *adj;
5465 	int ret;
5466 
5467 	adj = __netdev_find_adj(adj_dev, dev_list);
5468 
5469 	if (adj) {
5470 		adj->ref_nr++;
5471 		return 0;
5472 	}
5473 
5474 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5475 	if (!adj)
5476 		return -ENOMEM;
5477 
5478 	adj->dev = adj_dev;
5479 	adj->master = master;
5480 	adj->ref_nr = 1;
5481 	adj->private = private;
5482 	dev_hold(adj_dev);
5483 
5484 	pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5485 		 adj_dev->name, dev->name, adj_dev->name);
5486 
5487 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
5488 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5489 		if (ret)
5490 			goto free_adj;
5491 	}
5492 
5493 	/* Ensure that master link is always the first item in list. */
5494 	if (master) {
5495 		ret = sysfs_create_link(&(dev->dev.kobj),
5496 					&(adj_dev->dev.kobj), "master");
5497 		if (ret)
5498 			goto remove_symlinks;
5499 
5500 		list_add_rcu(&adj->list, dev_list);
5501 	} else {
5502 		list_add_tail_rcu(&adj->list, dev_list);
5503 	}
5504 
5505 	return 0;
5506 
5507 remove_symlinks:
5508 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5509 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5510 free_adj:
5511 	kfree(adj);
5512 	dev_put(adj_dev);
5513 
5514 	return ret;
5515 }
5516 
5517 static void __netdev_adjacent_dev_remove(struct net_device *dev,
5518 					 struct net_device *adj_dev,
5519 					 struct list_head *dev_list)
5520 {
5521 	struct netdev_adjacent *adj;
5522 
5523 	adj = __netdev_find_adj(adj_dev, dev_list);
5524 
5525 	if (!adj) {
5526 		pr_err("tried to remove device %s from %s\n",
5527 		       dev->name, adj_dev->name);
5528 		BUG();
5529 	}
5530 
5531 	if (adj->ref_nr > 1) {
5532 		pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5533 			 adj->ref_nr-1);
5534 		adj->ref_nr--;
5535 		return;
5536 	}
5537 
5538 	if (adj->master)
5539 		sysfs_remove_link(&(dev->dev.kobj), "master");
5540 
5541 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5542 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5543 
5544 	list_del_rcu(&adj->list);
5545 	pr_debug("dev_put for %s, because link removed from %s to %s\n",
5546 		 adj_dev->name, dev->name, adj_dev->name);
5547 	dev_put(adj_dev);
5548 	kfree_rcu(adj, rcu);
5549 }
5550 
5551 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5552 					    struct net_device *upper_dev,
5553 					    struct list_head *up_list,
5554 					    struct list_head *down_list,
5555 					    void *private, bool master)
5556 {
5557 	int ret;
5558 
5559 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5560 					   master);
5561 	if (ret)
5562 		return ret;
5563 
5564 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5565 					   false);
5566 	if (ret) {
5567 		__netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5568 		return ret;
5569 	}
5570 
5571 	return 0;
5572 }
5573 
5574 static int __netdev_adjacent_dev_link(struct net_device *dev,
5575 				      struct net_device *upper_dev)
5576 {
5577 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5578 						&dev->all_adj_list.upper,
5579 						&upper_dev->all_adj_list.lower,
5580 						NULL, false);
5581 }
5582 
5583 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5584 					       struct net_device *upper_dev,
5585 					       struct list_head *up_list,
5586 					       struct list_head *down_list)
5587 {
5588 	__netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5589 	__netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5590 }
5591 
5592 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5593 					 struct net_device *upper_dev)
5594 {
5595 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5596 					   &dev->all_adj_list.upper,
5597 					   &upper_dev->all_adj_list.lower);
5598 }
5599 
5600 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5601 						struct net_device *upper_dev,
5602 						void *private, bool master)
5603 {
5604 	int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5605 
5606 	if (ret)
5607 		return ret;
5608 
5609 	ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5610 					       &dev->adj_list.upper,
5611 					       &upper_dev->adj_list.lower,
5612 					       private, master);
5613 	if (ret) {
5614 		__netdev_adjacent_dev_unlink(dev, upper_dev);
5615 		return ret;
5616 	}
5617 
5618 	return 0;
5619 }
5620 
5621 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5622 						   struct net_device *upper_dev)
5623 {
5624 	__netdev_adjacent_dev_unlink(dev, upper_dev);
5625 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5626 					   &dev->adj_list.upper,
5627 					   &upper_dev->adj_list.lower);
5628 }
5629 
5630 static int __netdev_upper_dev_link(struct net_device *dev,
5631 				   struct net_device *upper_dev, bool master,
5632 				   void *upper_priv, void *upper_info)
5633 {
5634 	struct netdev_notifier_changeupper_info changeupper_info;
5635 	struct netdev_adjacent *i, *j, *to_i, *to_j;
5636 	int ret = 0;
5637 
5638 	ASSERT_RTNL();
5639 
5640 	if (dev == upper_dev)
5641 		return -EBUSY;
5642 
5643 	/* To prevent loops, check if dev is not upper device to upper_dev. */
5644 	if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
5645 		return -EBUSY;
5646 
5647 	if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
5648 		return -EEXIST;
5649 
5650 	if (master && netdev_master_upper_dev_get(dev))
5651 		return -EBUSY;
5652 
5653 	changeupper_info.upper_dev = upper_dev;
5654 	changeupper_info.master = master;
5655 	changeupper_info.linking = true;
5656 	changeupper_info.upper_info = upper_info;
5657 
5658 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5659 					    &changeupper_info.info);
5660 	ret = notifier_to_errno(ret);
5661 	if (ret)
5662 		return ret;
5663 
5664 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
5665 						   master);
5666 	if (ret)
5667 		return ret;
5668 
5669 	/* Now that we linked these devs, make all the upper_dev's
5670 	 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5671 	 * versa, and don't forget the devices itself. All of these
5672 	 * links are non-neighbours.
5673 	 */
5674 	list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5675 		list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5676 			pr_debug("Interlinking %s with %s, non-neighbour\n",
5677 				 i->dev->name, j->dev->name);
5678 			ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5679 			if (ret)
5680 				goto rollback_mesh;
5681 		}
5682 	}
5683 
5684 	/* add dev to every upper_dev's upper device */
5685 	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5686 		pr_debug("linking %s's upper device %s with %s\n",
5687 			 upper_dev->name, i->dev->name, dev->name);
5688 		ret = __netdev_adjacent_dev_link(dev, i->dev);
5689 		if (ret)
5690 			goto rollback_upper_mesh;
5691 	}
5692 
5693 	/* add upper_dev to every dev's lower device */
5694 	list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5695 		pr_debug("linking %s's lower device %s with %s\n", dev->name,
5696 			 i->dev->name, upper_dev->name);
5697 		ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5698 		if (ret)
5699 			goto rollback_lower_mesh;
5700 	}
5701 
5702 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5703 					    &changeupper_info.info);
5704 	ret = notifier_to_errno(ret);
5705 	if (ret)
5706 		goto rollback_lower_mesh;
5707 
5708 	return 0;
5709 
5710 rollback_lower_mesh:
5711 	to_i = i;
5712 	list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5713 		if (i == to_i)
5714 			break;
5715 		__netdev_adjacent_dev_unlink(i->dev, upper_dev);
5716 	}
5717 
5718 	i = NULL;
5719 
5720 rollback_upper_mesh:
5721 	to_i = i;
5722 	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5723 		if (i == to_i)
5724 			break;
5725 		__netdev_adjacent_dev_unlink(dev, i->dev);
5726 	}
5727 
5728 	i = j = NULL;
5729 
5730 rollback_mesh:
5731 	to_i = i;
5732 	to_j = j;
5733 	list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5734 		list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5735 			if (i == to_i && j == to_j)
5736 				break;
5737 			__netdev_adjacent_dev_unlink(i->dev, j->dev);
5738 		}
5739 		if (i == to_i)
5740 			break;
5741 	}
5742 
5743 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5744 
5745 	return ret;
5746 }
5747 
5748 /**
5749  * netdev_upper_dev_link - Add a link to the upper device
5750  * @dev: device
5751  * @upper_dev: new upper device
5752  *
5753  * Adds a link to device which is upper to this one. The caller must hold
5754  * the RTNL lock. On a failure a negative errno code is returned.
5755  * On success the reference counts are adjusted and the function
5756  * returns zero.
5757  */
5758 int netdev_upper_dev_link(struct net_device *dev,
5759 			  struct net_device *upper_dev)
5760 {
5761 	return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
5762 }
5763 EXPORT_SYMBOL(netdev_upper_dev_link);
5764 
5765 /**
5766  * netdev_master_upper_dev_link - Add a master link to the upper device
5767  * @dev: device
5768  * @upper_dev: new upper device
5769  * @upper_priv: upper device private
5770  * @upper_info: upper info to be passed down via notifier
5771  *
5772  * Adds a link to device which is upper to this one. In this case, only
5773  * one master upper device can be linked, although other non-master devices
5774  * might be linked as well. The caller must hold the RTNL lock.
5775  * On a failure a negative errno code is returned. On success the reference
5776  * counts are adjusted and the function returns zero.
5777  */
5778 int netdev_master_upper_dev_link(struct net_device *dev,
5779 				 struct net_device *upper_dev,
5780 				 void *upper_priv, void *upper_info)
5781 {
5782 	return __netdev_upper_dev_link(dev, upper_dev, true,
5783 				       upper_priv, upper_info);
5784 }
5785 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5786 
5787 /**
5788  * netdev_upper_dev_unlink - Removes a link to upper device
5789  * @dev: device
5790  * @upper_dev: new upper device
5791  *
5792  * Removes a link to device which is upper to this one. The caller must hold
5793  * the RTNL lock.
5794  */
5795 void netdev_upper_dev_unlink(struct net_device *dev,
5796 			     struct net_device *upper_dev)
5797 {
5798 	struct netdev_notifier_changeupper_info changeupper_info;
5799 	struct netdev_adjacent *i, *j;
5800 	ASSERT_RTNL();
5801 
5802 	changeupper_info.upper_dev = upper_dev;
5803 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5804 	changeupper_info.linking = false;
5805 
5806 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5807 				      &changeupper_info.info);
5808 
5809 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5810 
5811 	/* Here is the tricky part. We must remove all dev's lower
5812 	 * devices from all upper_dev's upper devices and vice
5813 	 * versa, to maintain the graph relationship.
5814 	 */
5815 	list_for_each_entry(i, &dev->all_adj_list.lower, list)
5816 		list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5817 			__netdev_adjacent_dev_unlink(i->dev, j->dev);
5818 
5819 	/* remove also the devices itself from lower/upper device
5820 	 * list
5821 	 */
5822 	list_for_each_entry(i, &dev->all_adj_list.lower, list)
5823 		__netdev_adjacent_dev_unlink(i->dev, upper_dev);
5824 
5825 	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5826 		__netdev_adjacent_dev_unlink(dev, i->dev);
5827 
5828 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5829 				      &changeupper_info.info);
5830 }
5831 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5832 
5833 /**
5834  * netdev_bonding_info_change - Dispatch event about slave change
5835  * @dev: device
5836  * @bonding_info: info to dispatch
5837  *
5838  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5839  * The caller must hold the RTNL lock.
5840  */
5841 void netdev_bonding_info_change(struct net_device *dev,
5842 				struct netdev_bonding_info *bonding_info)
5843 {
5844 	struct netdev_notifier_bonding_info	info;
5845 
5846 	memcpy(&info.bonding_info, bonding_info,
5847 	       sizeof(struct netdev_bonding_info));
5848 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5849 				      &info.info);
5850 }
5851 EXPORT_SYMBOL(netdev_bonding_info_change);
5852 
5853 static void netdev_adjacent_add_links(struct net_device *dev)
5854 {
5855 	struct netdev_adjacent *iter;
5856 
5857 	struct net *net = dev_net(dev);
5858 
5859 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
5860 		if (!net_eq(net,dev_net(iter->dev)))
5861 			continue;
5862 		netdev_adjacent_sysfs_add(iter->dev, dev,
5863 					  &iter->dev->adj_list.lower);
5864 		netdev_adjacent_sysfs_add(dev, iter->dev,
5865 					  &dev->adj_list.upper);
5866 	}
5867 
5868 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
5869 		if (!net_eq(net,dev_net(iter->dev)))
5870 			continue;
5871 		netdev_adjacent_sysfs_add(iter->dev, dev,
5872 					  &iter->dev->adj_list.upper);
5873 		netdev_adjacent_sysfs_add(dev, iter->dev,
5874 					  &dev->adj_list.lower);
5875 	}
5876 }
5877 
5878 static void netdev_adjacent_del_links(struct net_device *dev)
5879 {
5880 	struct netdev_adjacent *iter;
5881 
5882 	struct net *net = dev_net(dev);
5883 
5884 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
5885 		if (!net_eq(net,dev_net(iter->dev)))
5886 			continue;
5887 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
5888 					  &iter->dev->adj_list.lower);
5889 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
5890 					  &dev->adj_list.upper);
5891 	}
5892 
5893 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
5894 		if (!net_eq(net,dev_net(iter->dev)))
5895 			continue;
5896 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
5897 					  &iter->dev->adj_list.upper);
5898 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
5899 					  &dev->adj_list.lower);
5900 	}
5901 }
5902 
5903 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5904 {
5905 	struct netdev_adjacent *iter;
5906 
5907 	struct net *net = dev_net(dev);
5908 
5909 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
5910 		if (!net_eq(net,dev_net(iter->dev)))
5911 			continue;
5912 		netdev_adjacent_sysfs_del(iter->dev, oldname,
5913 					  &iter->dev->adj_list.lower);
5914 		netdev_adjacent_sysfs_add(iter->dev, dev,
5915 					  &iter->dev->adj_list.lower);
5916 	}
5917 
5918 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
5919 		if (!net_eq(net,dev_net(iter->dev)))
5920 			continue;
5921 		netdev_adjacent_sysfs_del(iter->dev, oldname,
5922 					  &iter->dev->adj_list.upper);
5923 		netdev_adjacent_sysfs_add(iter->dev, dev,
5924 					  &iter->dev->adj_list.upper);
5925 	}
5926 }
5927 
5928 void *netdev_lower_dev_get_private(struct net_device *dev,
5929 				   struct net_device *lower_dev)
5930 {
5931 	struct netdev_adjacent *lower;
5932 
5933 	if (!lower_dev)
5934 		return NULL;
5935 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
5936 	if (!lower)
5937 		return NULL;
5938 
5939 	return lower->private;
5940 }
5941 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5942 
5943 
5944 int dev_get_nest_level(struct net_device *dev,
5945 		       bool (*type_check)(const struct net_device *dev))
5946 {
5947 	struct net_device *lower = NULL;
5948 	struct list_head *iter;
5949 	int max_nest = -1;
5950 	int nest;
5951 
5952 	ASSERT_RTNL();
5953 
5954 	netdev_for_each_lower_dev(dev, lower, iter) {
5955 		nest = dev_get_nest_level(lower, type_check);
5956 		if (max_nest < nest)
5957 			max_nest = nest;
5958 	}
5959 
5960 	if (type_check(dev))
5961 		max_nest++;
5962 
5963 	return max_nest;
5964 }
5965 EXPORT_SYMBOL(dev_get_nest_level);
5966 
5967 /**
5968  * netdev_lower_change - Dispatch event about lower device state change
5969  * @lower_dev: device
5970  * @lower_state_info: state to dispatch
5971  *
5972  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
5973  * The caller must hold the RTNL lock.
5974  */
5975 void netdev_lower_state_changed(struct net_device *lower_dev,
5976 				void *lower_state_info)
5977 {
5978 	struct netdev_notifier_changelowerstate_info changelowerstate_info;
5979 
5980 	ASSERT_RTNL();
5981 	changelowerstate_info.lower_state_info = lower_state_info;
5982 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
5983 				      &changelowerstate_info.info);
5984 }
5985 EXPORT_SYMBOL(netdev_lower_state_changed);
5986 
5987 static void dev_change_rx_flags(struct net_device *dev, int flags)
5988 {
5989 	const struct net_device_ops *ops = dev->netdev_ops;
5990 
5991 	if (ops->ndo_change_rx_flags)
5992 		ops->ndo_change_rx_flags(dev, flags);
5993 }
5994 
5995 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5996 {
5997 	unsigned int old_flags = dev->flags;
5998 	kuid_t uid;
5999 	kgid_t gid;
6000 
6001 	ASSERT_RTNL();
6002 
6003 	dev->flags |= IFF_PROMISC;
6004 	dev->promiscuity += inc;
6005 	if (dev->promiscuity == 0) {
6006 		/*
6007 		 * Avoid overflow.
6008 		 * If inc causes overflow, untouch promisc and return error.
6009 		 */
6010 		if (inc < 0)
6011 			dev->flags &= ~IFF_PROMISC;
6012 		else {
6013 			dev->promiscuity -= inc;
6014 			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6015 				dev->name);
6016 			return -EOVERFLOW;
6017 		}
6018 	}
6019 	if (dev->flags != old_flags) {
6020 		pr_info("device %s %s promiscuous mode\n",
6021 			dev->name,
6022 			dev->flags & IFF_PROMISC ? "entered" : "left");
6023 		if (audit_enabled) {
6024 			current_uid_gid(&uid, &gid);
6025 			audit_log(current->audit_context, GFP_ATOMIC,
6026 				AUDIT_ANOM_PROMISCUOUS,
6027 				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6028 				dev->name, (dev->flags & IFF_PROMISC),
6029 				(old_flags & IFF_PROMISC),
6030 				from_kuid(&init_user_ns, audit_get_loginuid(current)),
6031 				from_kuid(&init_user_ns, uid),
6032 				from_kgid(&init_user_ns, gid),
6033 				audit_get_sessionid(current));
6034 		}
6035 
6036 		dev_change_rx_flags(dev, IFF_PROMISC);
6037 	}
6038 	if (notify)
6039 		__dev_notify_flags(dev, old_flags, IFF_PROMISC);
6040 	return 0;
6041 }
6042 
6043 /**
6044  *	dev_set_promiscuity	- update promiscuity count on a device
6045  *	@dev: device
6046  *	@inc: modifier
6047  *
6048  *	Add or remove promiscuity from a device. While the count in the device
6049  *	remains above zero the interface remains promiscuous. Once it hits zero
6050  *	the device reverts back to normal filtering operation. A negative inc
6051  *	value is used to drop promiscuity on the device.
6052  *	Return 0 if successful or a negative errno code on error.
6053  */
6054 int dev_set_promiscuity(struct net_device *dev, int inc)
6055 {
6056 	unsigned int old_flags = dev->flags;
6057 	int err;
6058 
6059 	err = __dev_set_promiscuity(dev, inc, true);
6060 	if (err < 0)
6061 		return err;
6062 	if (dev->flags != old_flags)
6063 		dev_set_rx_mode(dev);
6064 	return err;
6065 }
6066 EXPORT_SYMBOL(dev_set_promiscuity);
6067 
6068 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
6069 {
6070 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
6071 
6072 	ASSERT_RTNL();
6073 
6074 	dev->flags |= IFF_ALLMULTI;
6075 	dev->allmulti += inc;
6076 	if (dev->allmulti == 0) {
6077 		/*
6078 		 * Avoid overflow.
6079 		 * If inc causes overflow, untouch allmulti and return error.
6080 		 */
6081 		if (inc < 0)
6082 			dev->flags &= ~IFF_ALLMULTI;
6083 		else {
6084 			dev->allmulti -= inc;
6085 			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6086 				dev->name);
6087 			return -EOVERFLOW;
6088 		}
6089 	}
6090 	if (dev->flags ^ old_flags) {
6091 		dev_change_rx_flags(dev, IFF_ALLMULTI);
6092 		dev_set_rx_mode(dev);
6093 		if (notify)
6094 			__dev_notify_flags(dev, old_flags,
6095 					   dev->gflags ^ old_gflags);
6096 	}
6097 	return 0;
6098 }
6099 
6100 /**
6101  *	dev_set_allmulti	- update allmulti count on a device
6102  *	@dev: device
6103  *	@inc: modifier
6104  *
6105  *	Add or remove reception of all multicast frames to a device. While the
6106  *	count in the device remains above zero the interface remains listening
6107  *	to all interfaces. Once it hits zero the device reverts back to normal
6108  *	filtering operation. A negative @inc value is used to drop the counter
6109  *	when releasing a resource needing all multicasts.
6110  *	Return 0 if successful or a negative errno code on error.
6111  */
6112 
6113 int dev_set_allmulti(struct net_device *dev, int inc)
6114 {
6115 	return __dev_set_allmulti(dev, inc, true);
6116 }
6117 EXPORT_SYMBOL(dev_set_allmulti);
6118 
6119 /*
6120  *	Upload unicast and multicast address lists to device and
6121  *	configure RX filtering. When the device doesn't support unicast
6122  *	filtering it is put in promiscuous mode while unicast addresses
6123  *	are present.
6124  */
6125 void __dev_set_rx_mode(struct net_device *dev)
6126 {
6127 	const struct net_device_ops *ops = dev->netdev_ops;
6128 
6129 	/* dev_open will call this function so the list will stay sane. */
6130 	if (!(dev->flags&IFF_UP))
6131 		return;
6132 
6133 	if (!netif_device_present(dev))
6134 		return;
6135 
6136 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
6137 		/* Unicast addresses changes may only happen under the rtnl,
6138 		 * therefore calling __dev_set_promiscuity here is safe.
6139 		 */
6140 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
6141 			__dev_set_promiscuity(dev, 1, false);
6142 			dev->uc_promisc = true;
6143 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
6144 			__dev_set_promiscuity(dev, -1, false);
6145 			dev->uc_promisc = false;
6146 		}
6147 	}
6148 
6149 	if (ops->ndo_set_rx_mode)
6150 		ops->ndo_set_rx_mode(dev);
6151 }
6152 
6153 void dev_set_rx_mode(struct net_device *dev)
6154 {
6155 	netif_addr_lock_bh(dev);
6156 	__dev_set_rx_mode(dev);
6157 	netif_addr_unlock_bh(dev);
6158 }
6159 
6160 /**
6161  *	dev_get_flags - get flags reported to userspace
6162  *	@dev: device
6163  *
6164  *	Get the combination of flag bits exported through APIs to userspace.
6165  */
6166 unsigned int dev_get_flags(const struct net_device *dev)
6167 {
6168 	unsigned int flags;
6169 
6170 	flags = (dev->flags & ~(IFF_PROMISC |
6171 				IFF_ALLMULTI |
6172 				IFF_RUNNING |
6173 				IFF_LOWER_UP |
6174 				IFF_DORMANT)) |
6175 		(dev->gflags & (IFF_PROMISC |
6176 				IFF_ALLMULTI));
6177 
6178 	if (netif_running(dev)) {
6179 		if (netif_oper_up(dev))
6180 			flags |= IFF_RUNNING;
6181 		if (netif_carrier_ok(dev))
6182 			flags |= IFF_LOWER_UP;
6183 		if (netif_dormant(dev))
6184 			flags |= IFF_DORMANT;
6185 	}
6186 
6187 	return flags;
6188 }
6189 EXPORT_SYMBOL(dev_get_flags);
6190 
6191 int __dev_change_flags(struct net_device *dev, unsigned int flags)
6192 {
6193 	unsigned int old_flags = dev->flags;
6194 	int ret;
6195 
6196 	ASSERT_RTNL();
6197 
6198 	/*
6199 	 *	Set the flags on our device.
6200 	 */
6201 
6202 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6203 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6204 			       IFF_AUTOMEDIA)) |
6205 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6206 				    IFF_ALLMULTI));
6207 
6208 	/*
6209 	 *	Load in the correct multicast list now the flags have changed.
6210 	 */
6211 
6212 	if ((old_flags ^ flags) & IFF_MULTICAST)
6213 		dev_change_rx_flags(dev, IFF_MULTICAST);
6214 
6215 	dev_set_rx_mode(dev);
6216 
6217 	/*
6218 	 *	Have we downed the interface. We handle IFF_UP ourselves
6219 	 *	according to user attempts to set it, rather than blindly
6220 	 *	setting it.
6221 	 */
6222 
6223 	ret = 0;
6224 	if ((old_flags ^ flags) & IFF_UP)
6225 		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
6226 
6227 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
6228 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
6229 		unsigned int old_flags = dev->flags;
6230 
6231 		dev->gflags ^= IFF_PROMISC;
6232 
6233 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
6234 			if (dev->flags != old_flags)
6235 				dev_set_rx_mode(dev);
6236 	}
6237 
6238 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6239 	   is important. Some (broken) drivers set IFF_PROMISC, when
6240 	   IFF_ALLMULTI is requested not asking us and not reporting.
6241 	 */
6242 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
6243 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6244 
6245 		dev->gflags ^= IFF_ALLMULTI;
6246 		__dev_set_allmulti(dev, inc, false);
6247 	}
6248 
6249 	return ret;
6250 }
6251 
6252 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6253 			unsigned int gchanges)
6254 {
6255 	unsigned int changes = dev->flags ^ old_flags;
6256 
6257 	if (gchanges)
6258 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
6259 
6260 	if (changes & IFF_UP) {
6261 		if (dev->flags & IFF_UP)
6262 			call_netdevice_notifiers(NETDEV_UP, dev);
6263 		else
6264 			call_netdevice_notifiers(NETDEV_DOWN, dev);
6265 	}
6266 
6267 	if (dev->flags & IFF_UP &&
6268 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6269 		struct netdev_notifier_change_info change_info;
6270 
6271 		change_info.flags_changed = changes;
6272 		call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6273 					      &change_info.info);
6274 	}
6275 }
6276 
6277 /**
6278  *	dev_change_flags - change device settings
6279  *	@dev: device
6280  *	@flags: device state flags
6281  *
6282  *	Change settings on device based state flags. The flags are
6283  *	in the userspace exported format.
6284  */
6285 int dev_change_flags(struct net_device *dev, unsigned int flags)
6286 {
6287 	int ret;
6288 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
6289 
6290 	ret = __dev_change_flags(dev, flags);
6291 	if (ret < 0)
6292 		return ret;
6293 
6294 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
6295 	__dev_notify_flags(dev, old_flags, changes);
6296 	return ret;
6297 }
6298 EXPORT_SYMBOL(dev_change_flags);
6299 
6300 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6301 {
6302 	const struct net_device_ops *ops = dev->netdev_ops;
6303 
6304 	if (ops->ndo_change_mtu)
6305 		return ops->ndo_change_mtu(dev, new_mtu);
6306 
6307 	dev->mtu = new_mtu;
6308 	return 0;
6309 }
6310 
6311 /**
6312  *	dev_set_mtu - Change maximum transfer unit
6313  *	@dev: device
6314  *	@new_mtu: new transfer unit
6315  *
6316  *	Change the maximum transfer size of the network device.
6317  */
6318 int dev_set_mtu(struct net_device *dev, int new_mtu)
6319 {
6320 	int err, orig_mtu;
6321 
6322 	if (new_mtu == dev->mtu)
6323 		return 0;
6324 
6325 	/*	MTU must be positive.	 */
6326 	if (new_mtu < 0)
6327 		return -EINVAL;
6328 
6329 	if (!netif_device_present(dev))
6330 		return -ENODEV;
6331 
6332 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6333 	err = notifier_to_errno(err);
6334 	if (err)
6335 		return err;
6336 
6337 	orig_mtu = dev->mtu;
6338 	err = __dev_set_mtu(dev, new_mtu);
6339 
6340 	if (!err) {
6341 		err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6342 		err = notifier_to_errno(err);
6343 		if (err) {
6344 			/* setting mtu back and notifying everyone again,
6345 			 * so that they have a chance to revert changes.
6346 			 */
6347 			__dev_set_mtu(dev, orig_mtu);
6348 			call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6349 		}
6350 	}
6351 	return err;
6352 }
6353 EXPORT_SYMBOL(dev_set_mtu);
6354 
6355 /**
6356  *	dev_set_group - Change group this device belongs to
6357  *	@dev: device
6358  *	@new_group: group this device should belong to
6359  */
6360 void dev_set_group(struct net_device *dev, int new_group)
6361 {
6362 	dev->group = new_group;
6363 }
6364 EXPORT_SYMBOL(dev_set_group);
6365 
6366 /**
6367  *	dev_set_mac_address - Change Media Access Control Address
6368  *	@dev: device
6369  *	@sa: new address
6370  *
6371  *	Change the hardware (MAC) address of the device
6372  */
6373 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6374 {
6375 	const struct net_device_ops *ops = dev->netdev_ops;
6376 	int err;
6377 
6378 	if (!ops->ndo_set_mac_address)
6379 		return -EOPNOTSUPP;
6380 	if (sa->sa_family != dev->type)
6381 		return -EINVAL;
6382 	if (!netif_device_present(dev))
6383 		return -ENODEV;
6384 	err = ops->ndo_set_mac_address(dev, sa);
6385 	if (err)
6386 		return err;
6387 	dev->addr_assign_type = NET_ADDR_SET;
6388 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
6389 	add_device_randomness(dev->dev_addr, dev->addr_len);
6390 	return 0;
6391 }
6392 EXPORT_SYMBOL(dev_set_mac_address);
6393 
6394 /**
6395  *	dev_change_carrier - Change device carrier
6396  *	@dev: device
6397  *	@new_carrier: new value
6398  *
6399  *	Change device carrier
6400  */
6401 int dev_change_carrier(struct net_device *dev, bool new_carrier)
6402 {
6403 	const struct net_device_ops *ops = dev->netdev_ops;
6404 
6405 	if (!ops->ndo_change_carrier)
6406 		return -EOPNOTSUPP;
6407 	if (!netif_device_present(dev))
6408 		return -ENODEV;
6409 	return ops->ndo_change_carrier(dev, new_carrier);
6410 }
6411 EXPORT_SYMBOL(dev_change_carrier);
6412 
6413 /**
6414  *	dev_get_phys_port_id - Get device physical port ID
6415  *	@dev: device
6416  *	@ppid: port ID
6417  *
6418  *	Get device physical port ID
6419  */
6420 int dev_get_phys_port_id(struct net_device *dev,
6421 			 struct netdev_phys_item_id *ppid)
6422 {
6423 	const struct net_device_ops *ops = dev->netdev_ops;
6424 
6425 	if (!ops->ndo_get_phys_port_id)
6426 		return -EOPNOTSUPP;
6427 	return ops->ndo_get_phys_port_id(dev, ppid);
6428 }
6429 EXPORT_SYMBOL(dev_get_phys_port_id);
6430 
6431 /**
6432  *	dev_get_phys_port_name - Get device physical port name
6433  *	@dev: device
6434  *	@name: port name
6435  *
6436  *	Get device physical port name
6437  */
6438 int dev_get_phys_port_name(struct net_device *dev,
6439 			   char *name, size_t len)
6440 {
6441 	const struct net_device_ops *ops = dev->netdev_ops;
6442 
6443 	if (!ops->ndo_get_phys_port_name)
6444 		return -EOPNOTSUPP;
6445 	return ops->ndo_get_phys_port_name(dev, name, len);
6446 }
6447 EXPORT_SYMBOL(dev_get_phys_port_name);
6448 
6449 /**
6450  *	dev_change_proto_down - update protocol port state information
6451  *	@dev: device
6452  *	@proto_down: new value
6453  *
6454  *	This info can be used by switch drivers to set the phys state of the
6455  *	port.
6456  */
6457 int dev_change_proto_down(struct net_device *dev, bool proto_down)
6458 {
6459 	const struct net_device_ops *ops = dev->netdev_ops;
6460 
6461 	if (!ops->ndo_change_proto_down)
6462 		return -EOPNOTSUPP;
6463 	if (!netif_device_present(dev))
6464 		return -ENODEV;
6465 	return ops->ndo_change_proto_down(dev, proto_down);
6466 }
6467 EXPORT_SYMBOL(dev_change_proto_down);
6468 
6469 /**
6470  *	dev_new_index	-	allocate an ifindex
6471  *	@net: the applicable net namespace
6472  *
6473  *	Returns a suitable unique value for a new device interface
6474  *	number.  The caller must hold the rtnl semaphore or the
6475  *	dev_base_lock to be sure it remains unique.
6476  */
6477 static int dev_new_index(struct net *net)
6478 {
6479 	int ifindex = net->ifindex;
6480 	for (;;) {
6481 		if (++ifindex <= 0)
6482 			ifindex = 1;
6483 		if (!__dev_get_by_index(net, ifindex))
6484 			return net->ifindex = ifindex;
6485 	}
6486 }
6487 
6488 /* Delayed registration/unregisteration */
6489 static LIST_HEAD(net_todo_list);
6490 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
6491 
6492 static void net_set_todo(struct net_device *dev)
6493 {
6494 	list_add_tail(&dev->todo_list, &net_todo_list);
6495 	dev_net(dev)->dev_unreg_count++;
6496 }
6497 
6498 static void rollback_registered_many(struct list_head *head)
6499 {
6500 	struct net_device *dev, *tmp;
6501 	LIST_HEAD(close_head);
6502 
6503 	BUG_ON(dev_boot_phase);
6504 	ASSERT_RTNL();
6505 
6506 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
6507 		/* Some devices call without registering
6508 		 * for initialization unwind. Remove those
6509 		 * devices and proceed with the remaining.
6510 		 */
6511 		if (dev->reg_state == NETREG_UNINITIALIZED) {
6512 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6513 				 dev->name, dev);
6514 
6515 			WARN_ON(1);
6516 			list_del(&dev->unreg_list);
6517 			continue;
6518 		}
6519 		dev->dismantle = true;
6520 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
6521 	}
6522 
6523 	/* If device is running, close it first. */
6524 	list_for_each_entry(dev, head, unreg_list)
6525 		list_add_tail(&dev->close_list, &close_head);
6526 	dev_close_many(&close_head, true);
6527 
6528 	list_for_each_entry(dev, head, unreg_list) {
6529 		/* And unlink it from device chain. */
6530 		unlist_netdevice(dev);
6531 
6532 		dev->reg_state = NETREG_UNREGISTERING;
6533 		on_each_cpu(flush_backlog, dev, 1);
6534 	}
6535 
6536 	synchronize_net();
6537 
6538 	list_for_each_entry(dev, head, unreg_list) {
6539 		struct sk_buff *skb = NULL;
6540 
6541 		/* Shutdown queueing discipline. */
6542 		dev_shutdown(dev);
6543 
6544 
6545 		/* Notify protocols, that we are about to destroy
6546 		   this device. They should clean all the things.
6547 		*/
6548 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549 
6550 		if (!dev->rtnl_link_ops ||
6551 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6552 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6553 						     GFP_KERNEL);
6554 
6555 		/*
6556 		 *	Flush the unicast and multicast chains
6557 		 */
6558 		dev_uc_flush(dev);
6559 		dev_mc_flush(dev);
6560 
6561 		if (dev->netdev_ops->ndo_uninit)
6562 			dev->netdev_ops->ndo_uninit(dev);
6563 
6564 		if (skb)
6565 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
6566 
6567 		/* Notifier chain MUST detach us all upper devices. */
6568 		WARN_ON(netdev_has_any_upper_dev(dev));
6569 
6570 		/* Remove entries from kobject tree */
6571 		netdev_unregister_kobject(dev);
6572 #ifdef CONFIG_XPS
6573 		/* Remove XPS queueing entries */
6574 		netif_reset_xps_queues_gt(dev, 0);
6575 #endif
6576 	}
6577 
6578 	synchronize_net();
6579 
6580 	list_for_each_entry(dev, head, unreg_list)
6581 		dev_put(dev);
6582 }
6583 
6584 static void rollback_registered(struct net_device *dev)
6585 {
6586 	LIST_HEAD(single);
6587 
6588 	list_add(&dev->unreg_list, &single);
6589 	rollback_registered_many(&single);
6590 	list_del(&single);
6591 }
6592 
6593 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
6594 	struct net_device *upper, netdev_features_t features)
6595 {
6596 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6597 	netdev_features_t feature;
6598 	int feature_bit;
6599 
6600 	for_each_netdev_feature(&upper_disables, feature_bit) {
6601 		feature = __NETIF_F_BIT(feature_bit);
6602 		if (!(upper->wanted_features & feature)
6603 		    && (features & feature)) {
6604 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
6605 				   &feature, upper->name);
6606 			features &= ~feature;
6607 		}
6608 	}
6609 
6610 	return features;
6611 }
6612 
6613 static void netdev_sync_lower_features(struct net_device *upper,
6614 	struct net_device *lower, netdev_features_t features)
6615 {
6616 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6617 	netdev_features_t feature;
6618 	int feature_bit;
6619 
6620 	for_each_netdev_feature(&upper_disables, feature_bit) {
6621 		feature = __NETIF_F_BIT(feature_bit);
6622 		if (!(features & feature) && (lower->features & feature)) {
6623 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
6624 				   &feature, lower->name);
6625 			lower->wanted_features &= ~feature;
6626 			netdev_update_features(lower);
6627 
6628 			if (unlikely(lower->features & feature))
6629 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
6630 					    &feature, lower->name);
6631 		}
6632 	}
6633 }
6634 
6635 static netdev_features_t netdev_fix_features(struct net_device *dev,
6636 	netdev_features_t features)
6637 {
6638 	/* Fix illegal checksum combinations */
6639 	if ((features & NETIF_F_HW_CSUM) &&
6640 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6641 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
6642 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6643 	}
6644 
6645 	/* TSO requires that SG is present as well. */
6646 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6647 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
6648 		features &= ~NETIF_F_ALL_TSO;
6649 	}
6650 
6651 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6652 					!(features & NETIF_F_IP_CSUM)) {
6653 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6654 		features &= ~NETIF_F_TSO;
6655 		features &= ~NETIF_F_TSO_ECN;
6656 	}
6657 
6658 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6659 					 !(features & NETIF_F_IPV6_CSUM)) {
6660 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6661 		features &= ~NETIF_F_TSO6;
6662 	}
6663 
6664 	/* TSO ECN requires that TSO is present as well. */
6665 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6666 		features &= ~NETIF_F_TSO_ECN;
6667 
6668 	/* Software GSO depends on SG. */
6669 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6670 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
6671 		features &= ~NETIF_F_GSO;
6672 	}
6673 
6674 	/* UFO needs SG and checksumming */
6675 	if (features & NETIF_F_UFO) {
6676 		/* maybe split UFO into V4 and V6? */
6677 		if (!(features & NETIF_F_HW_CSUM) &&
6678 		    ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
6679 		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
6680 			netdev_dbg(dev,
6681 				"Dropping NETIF_F_UFO since no checksum offload features.\n");
6682 			features &= ~NETIF_F_UFO;
6683 		}
6684 
6685 		if (!(features & NETIF_F_SG)) {
6686 			netdev_dbg(dev,
6687 				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6688 			features &= ~NETIF_F_UFO;
6689 		}
6690 	}
6691 
6692 #ifdef CONFIG_NET_RX_BUSY_POLL
6693 	if (dev->netdev_ops->ndo_busy_poll)
6694 		features |= NETIF_F_BUSY_POLL;
6695 	else
6696 #endif
6697 		features &= ~NETIF_F_BUSY_POLL;
6698 
6699 	return features;
6700 }
6701 
6702 int __netdev_update_features(struct net_device *dev)
6703 {
6704 	struct net_device *upper, *lower;
6705 	netdev_features_t features;
6706 	struct list_head *iter;
6707 	int err = -1;
6708 
6709 	ASSERT_RTNL();
6710 
6711 	features = netdev_get_wanted_features(dev);
6712 
6713 	if (dev->netdev_ops->ndo_fix_features)
6714 		features = dev->netdev_ops->ndo_fix_features(dev, features);
6715 
6716 	/* driver might be less strict about feature dependencies */
6717 	features = netdev_fix_features(dev, features);
6718 
6719 	/* some features can't be enabled if they're off an an upper device */
6720 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
6721 		features = netdev_sync_upper_features(dev, upper, features);
6722 
6723 	if (dev->features == features)
6724 		goto sync_lower;
6725 
6726 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6727 		&dev->features, &features);
6728 
6729 	if (dev->netdev_ops->ndo_set_features)
6730 		err = dev->netdev_ops->ndo_set_features(dev, features);
6731 	else
6732 		err = 0;
6733 
6734 	if (unlikely(err < 0)) {
6735 		netdev_err(dev,
6736 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
6737 			err, &features, &dev->features);
6738 		/* return non-0 since some features might have changed and
6739 		 * it's better to fire a spurious notification than miss it
6740 		 */
6741 		return -1;
6742 	}
6743 
6744 sync_lower:
6745 	/* some features must be disabled on lower devices when disabled
6746 	 * on an upper device (think: bonding master or bridge)
6747 	 */
6748 	netdev_for_each_lower_dev(dev, lower, iter)
6749 		netdev_sync_lower_features(dev, lower, features);
6750 
6751 	if (!err)
6752 		dev->features = features;
6753 
6754 	return err < 0 ? 0 : 1;
6755 }
6756 
6757 /**
6758  *	netdev_update_features - recalculate device features
6759  *	@dev: the device to check
6760  *
6761  *	Recalculate dev->features set and send notifications if it
6762  *	has changed. Should be called after driver or hardware dependent
6763  *	conditions might have changed that influence the features.
6764  */
6765 void netdev_update_features(struct net_device *dev)
6766 {
6767 	if (__netdev_update_features(dev))
6768 		netdev_features_change(dev);
6769 }
6770 EXPORT_SYMBOL(netdev_update_features);
6771 
6772 /**
6773  *	netdev_change_features - recalculate device features
6774  *	@dev: the device to check
6775  *
6776  *	Recalculate dev->features set and send notifications even
6777  *	if they have not changed. Should be called instead of
6778  *	netdev_update_features() if also dev->vlan_features might
6779  *	have changed to allow the changes to be propagated to stacked
6780  *	VLAN devices.
6781  */
6782 void netdev_change_features(struct net_device *dev)
6783 {
6784 	__netdev_update_features(dev);
6785 	netdev_features_change(dev);
6786 }
6787 EXPORT_SYMBOL(netdev_change_features);
6788 
6789 /**
6790  *	netif_stacked_transfer_operstate -	transfer operstate
6791  *	@rootdev: the root or lower level device to transfer state from
6792  *	@dev: the device to transfer operstate to
6793  *
6794  *	Transfer operational state from root to device. This is normally
6795  *	called when a stacking relationship exists between the root
6796  *	device and the device(a leaf device).
6797  */
6798 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6799 					struct net_device *dev)
6800 {
6801 	if (rootdev->operstate == IF_OPER_DORMANT)
6802 		netif_dormant_on(dev);
6803 	else
6804 		netif_dormant_off(dev);
6805 
6806 	if (netif_carrier_ok(rootdev)) {
6807 		if (!netif_carrier_ok(dev))
6808 			netif_carrier_on(dev);
6809 	} else {
6810 		if (netif_carrier_ok(dev))
6811 			netif_carrier_off(dev);
6812 	}
6813 }
6814 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6815 
6816 #ifdef CONFIG_SYSFS
6817 static int netif_alloc_rx_queues(struct net_device *dev)
6818 {
6819 	unsigned int i, count = dev->num_rx_queues;
6820 	struct netdev_rx_queue *rx;
6821 	size_t sz = count * sizeof(*rx);
6822 
6823 	BUG_ON(count < 1);
6824 
6825 	rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6826 	if (!rx) {
6827 		rx = vzalloc(sz);
6828 		if (!rx)
6829 			return -ENOMEM;
6830 	}
6831 	dev->_rx = rx;
6832 
6833 	for (i = 0; i < count; i++)
6834 		rx[i].dev = dev;
6835 	return 0;
6836 }
6837 #endif
6838 
6839 static void netdev_init_one_queue(struct net_device *dev,
6840 				  struct netdev_queue *queue, void *_unused)
6841 {
6842 	/* Initialize queue lock */
6843 	spin_lock_init(&queue->_xmit_lock);
6844 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6845 	queue->xmit_lock_owner = -1;
6846 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6847 	queue->dev = dev;
6848 #ifdef CONFIG_BQL
6849 	dql_init(&queue->dql, HZ);
6850 #endif
6851 }
6852 
6853 static void netif_free_tx_queues(struct net_device *dev)
6854 {
6855 	kvfree(dev->_tx);
6856 }
6857 
6858 static int netif_alloc_netdev_queues(struct net_device *dev)
6859 {
6860 	unsigned int count = dev->num_tx_queues;
6861 	struct netdev_queue *tx;
6862 	size_t sz = count * sizeof(*tx);
6863 
6864 	if (count < 1 || count > 0xffff)
6865 		return -EINVAL;
6866 
6867 	tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6868 	if (!tx) {
6869 		tx = vzalloc(sz);
6870 		if (!tx)
6871 			return -ENOMEM;
6872 	}
6873 	dev->_tx = tx;
6874 
6875 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6876 	spin_lock_init(&dev->tx_global_lock);
6877 
6878 	return 0;
6879 }
6880 
6881 void netif_tx_stop_all_queues(struct net_device *dev)
6882 {
6883 	unsigned int i;
6884 
6885 	for (i = 0; i < dev->num_tx_queues; i++) {
6886 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6887 		netif_tx_stop_queue(txq);
6888 	}
6889 }
6890 EXPORT_SYMBOL(netif_tx_stop_all_queues);
6891 
6892 /**
6893  *	register_netdevice	- register a network device
6894  *	@dev: device to register
6895  *
6896  *	Take a completed network device structure and add it to the kernel
6897  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6898  *	chain. 0 is returned on success. A negative errno code is returned
6899  *	on a failure to set up the device, or if the name is a duplicate.
6900  *
6901  *	Callers must hold the rtnl semaphore. You may want
6902  *	register_netdev() instead of this.
6903  *
6904  *	BUGS:
6905  *	The locking appears insufficient to guarantee two parallel registers
6906  *	will not get the same name.
6907  */
6908 
6909 int register_netdevice(struct net_device *dev)
6910 {
6911 	int ret;
6912 	struct net *net = dev_net(dev);
6913 
6914 	BUG_ON(dev_boot_phase);
6915 	ASSERT_RTNL();
6916 
6917 	might_sleep();
6918 
6919 	/* When net_device's are persistent, this will be fatal. */
6920 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6921 	BUG_ON(!net);
6922 
6923 	spin_lock_init(&dev->addr_list_lock);
6924 	netdev_set_addr_lockdep_class(dev);
6925 
6926 	ret = dev_get_valid_name(net, dev, dev->name);
6927 	if (ret < 0)
6928 		goto out;
6929 
6930 	/* Init, if this function is available */
6931 	if (dev->netdev_ops->ndo_init) {
6932 		ret = dev->netdev_ops->ndo_init(dev);
6933 		if (ret) {
6934 			if (ret > 0)
6935 				ret = -EIO;
6936 			goto out;
6937 		}
6938 	}
6939 
6940 	if (((dev->hw_features | dev->features) &
6941 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
6942 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6943 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6944 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6945 		ret = -EINVAL;
6946 		goto err_uninit;
6947 	}
6948 
6949 	ret = -EBUSY;
6950 	if (!dev->ifindex)
6951 		dev->ifindex = dev_new_index(net);
6952 	else if (__dev_get_by_index(net, dev->ifindex))
6953 		goto err_uninit;
6954 
6955 	/* Transfer changeable features to wanted_features and enable
6956 	 * software offloads (GSO and GRO).
6957 	 */
6958 	dev->hw_features |= NETIF_F_SOFT_FEATURES;
6959 	dev->features |= NETIF_F_SOFT_FEATURES;
6960 	dev->wanted_features = dev->features & dev->hw_features;
6961 
6962 	if (!(dev->flags & IFF_LOOPBACK)) {
6963 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
6964 	}
6965 
6966 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6967 	 */
6968 	dev->vlan_features |= NETIF_F_HIGHDMA;
6969 
6970 	/* Make NETIF_F_SG inheritable to tunnel devices.
6971 	 */
6972 	dev->hw_enc_features |= NETIF_F_SG;
6973 
6974 	/* Make NETIF_F_SG inheritable to MPLS.
6975 	 */
6976 	dev->mpls_features |= NETIF_F_SG;
6977 
6978 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6979 	ret = notifier_to_errno(ret);
6980 	if (ret)
6981 		goto err_uninit;
6982 
6983 	ret = netdev_register_kobject(dev);
6984 	if (ret)
6985 		goto err_uninit;
6986 	dev->reg_state = NETREG_REGISTERED;
6987 
6988 	__netdev_update_features(dev);
6989 
6990 	/*
6991 	 *	Default initial state at registry is that the
6992 	 *	device is present.
6993 	 */
6994 
6995 	set_bit(__LINK_STATE_PRESENT, &dev->state);
6996 
6997 	linkwatch_init_dev(dev);
6998 
6999 	dev_init_scheduler(dev);
7000 	dev_hold(dev);
7001 	list_netdevice(dev);
7002 	add_device_randomness(dev->dev_addr, dev->addr_len);
7003 
7004 	/* If the device has permanent device address, driver should
7005 	 * set dev_addr and also addr_assign_type should be set to
7006 	 * NET_ADDR_PERM (default value).
7007 	 */
7008 	if (dev->addr_assign_type == NET_ADDR_PERM)
7009 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7010 
7011 	/* Notify protocols, that a new device appeared. */
7012 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
7013 	ret = notifier_to_errno(ret);
7014 	if (ret) {
7015 		rollback_registered(dev);
7016 		dev->reg_state = NETREG_UNREGISTERED;
7017 	}
7018 	/*
7019 	 *	Prevent userspace races by waiting until the network
7020 	 *	device is fully setup before sending notifications.
7021 	 */
7022 	if (!dev->rtnl_link_ops ||
7023 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7024 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7025 
7026 out:
7027 	return ret;
7028 
7029 err_uninit:
7030 	if (dev->netdev_ops->ndo_uninit)
7031 		dev->netdev_ops->ndo_uninit(dev);
7032 	goto out;
7033 }
7034 EXPORT_SYMBOL(register_netdevice);
7035 
7036 /**
7037  *	init_dummy_netdev	- init a dummy network device for NAPI
7038  *	@dev: device to init
7039  *
7040  *	This takes a network device structure and initialize the minimum
7041  *	amount of fields so it can be used to schedule NAPI polls without
7042  *	registering a full blown interface. This is to be used by drivers
7043  *	that need to tie several hardware interfaces to a single NAPI
7044  *	poll scheduler due to HW limitations.
7045  */
7046 int init_dummy_netdev(struct net_device *dev)
7047 {
7048 	/* Clear everything. Note we don't initialize spinlocks
7049 	 * are they aren't supposed to be taken by any of the
7050 	 * NAPI code and this dummy netdev is supposed to be
7051 	 * only ever used for NAPI polls
7052 	 */
7053 	memset(dev, 0, sizeof(struct net_device));
7054 
7055 	/* make sure we BUG if trying to hit standard
7056 	 * register/unregister code path
7057 	 */
7058 	dev->reg_state = NETREG_DUMMY;
7059 
7060 	/* NAPI wants this */
7061 	INIT_LIST_HEAD(&dev->napi_list);
7062 
7063 	/* a dummy interface is started by default */
7064 	set_bit(__LINK_STATE_PRESENT, &dev->state);
7065 	set_bit(__LINK_STATE_START, &dev->state);
7066 
7067 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
7068 	 * because users of this 'device' dont need to change
7069 	 * its refcount.
7070 	 */
7071 
7072 	return 0;
7073 }
7074 EXPORT_SYMBOL_GPL(init_dummy_netdev);
7075 
7076 
7077 /**
7078  *	register_netdev	- register a network device
7079  *	@dev: device to register
7080  *
7081  *	Take a completed network device structure and add it to the kernel
7082  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7083  *	chain. 0 is returned on success. A negative errno code is returned
7084  *	on a failure to set up the device, or if the name is a duplicate.
7085  *
7086  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
7087  *	and expands the device name if you passed a format string to
7088  *	alloc_netdev.
7089  */
7090 int register_netdev(struct net_device *dev)
7091 {
7092 	int err;
7093 
7094 	rtnl_lock();
7095 	err = register_netdevice(dev);
7096 	rtnl_unlock();
7097 	return err;
7098 }
7099 EXPORT_SYMBOL(register_netdev);
7100 
7101 int netdev_refcnt_read(const struct net_device *dev)
7102 {
7103 	int i, refcnt = 0;
7104 
7105 	for_each_possible_cpu(i)
7106 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7107 	return refcnt;
7108 }
7109 EXPORT_SYMBOL(netdev_refcnt_read);
7110 
7111 /**
7112  * netdev_wait_allrefs - wait until all references are gone.
7113  * @dev: target net_device
7114  *
7115  * This is called when unregistering network devices.
7116  *
7117  * Any protocol or device that holds a reference should register
7118  * for netdevice notification, and cleanup and put back the
7119  * reference if they receive an UNREGISTER event.
7120  * We can get stuck here if buggy protocols don't correctly
7121  * call dev_put.
7122  */
7123 static void netdev_wait_allrefs(struct net_device *dev)
7124 {
7125 	unsigned long rebroadcast_time, warning_time;
7126 	int refcnt;
7127 
7128 	linkwatch_forget_dev(dev);
7129 
7130 	rebroadcast_time = warning_time = jiffies;
7131 	refcnt = netdev_refcnt_read(dev);
7132 
7133 	while (refcnt != 0) {
7134 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
7135 			rtnl_lock();
7136 
7137 			/* Rebroadcast unregister notification */
7138 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7139 
7140 			__rtnl_unlock();
7141 			rcu_barrier();
7142 			rtnl_lock();
7143 
7144 			call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7145 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7146 				     &dev->state)) {
7147 				/* We must not have linkwatch events
7148 				 * pending on unregister. If this
7149 				 * happens, we simply run the queue
7150 				 * unscheduled, resulting in a noop
7151 				 * for this device.
7152 				 */
7153 				linkwatch_run_queue();
7154 			}
7155 
7156 			__rtnl_unlock();
7157 
7158 			rebroadcast_time = jiffies;
7159 		}
7160 
7161 		msleep(250);
7162 
7163 		refcnt = netdev_refcnt_read(dev);
7164 
7165 		if (time_after(jiffies, warning_time + 10 * HZ)) {
7166 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7167 				 dev->name, refcnt);
7168 			warning_time = jiffies;
7169 		}
7170 	}
7171 }
7172 
7173 /* The sequence is:
7174  *
7175  *	rtnl_lock();
7176  *	...
7177  *	register_netdevice(x1);
7178  *	register_netdevice(x2);
7179  *	...
7180  *	unregister_netdevice(y1);
7181  *	unregister_netdevice(y2);
7182  *      ...
7183  *	rtnl_unlock();
7184  *	free_netdev(y1);
7185  *	free_netdev(y2);
7186  *
7187  * We are invoked by rtnl_unlock().
7188  * This allows us to deal with problems:
7189  * 1) We can delete sysfs objects which invoke hotplug
7190  *    without deadlocking with linkwatch via keventd.
7191  * 2) Since we run with the RTNL semaphore not held, we can sleep
7192  *    safely in order to wait for the netdev refcnt to drop to zero.
7193  *
7194  * We must not return until all unregister events added during
7195  * the interval the lock was held have been completed.
7196  */
7197 void netdev_run_todo(void)
7198 {
7199 	struct list_head list;
7200 
7201 	/* Snapshot list, allow later requests */
7202 	list_replace_init(&net_todo_list, &list);
7203 
7204 	__rtnl_unlock();
7205 
7206 
7207 	/* Wait for rcu callbacks to finish before next phase */
7208 	if (!list_empty(&list))
7209 		rcu_barrier();
7210 
7211 	while (!list_empty(&list)) {
7212 		struct net_device *dev
7213 			= list_first_entry(&list, struct net_device, todo_list);
7214 		list_del(&dev->todo_list);
7215 
7216 		rtnl_lock();
7217 		call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7218 		__rtnl_unlock();
7219 
7220 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7221 			pr_err("network todo '%s' but state %d\n",
7222 			       dev->name, dev->reg_state);
7223 			dump_stack();
7224 			continue;
7225 		}
7226 
7227 		dev->reg_state = NETREG_UNREGISTERED;
7228 
7229 		netdev_wait_allrefs(dev);
7230 
7231 		/* paranoia */
7232 		BUG_ON(netdev_refcnt_read(dev));
7233 		BUG_ON(!list_empty(&dev->ptype_all));
7234 		BUG_ON(!list_empty(&dev->ptype_specific));
7235 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
7236 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
7237 		WARN_ON(dev->dn_ptr);
7238 
7239 		if (dev->destructor)
7240 			dev->destructor(dev);
7241 
7242 		/* Report a network device has been unregistered */
7243 		rtnl_lock();
7244 		dev_net(dev)->dev_unreg_count--;
7245 		__rtnl_unlock();
7246 		wake_up(&netdev_unregistering_wq);
7247 
7248 		/* Free network device */
7249 		kobject_put(&dev->dev.kobj);
7250 	}
7251 }
7252 
7253 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
7254  * fields in the same order, with only the type differing.
7255  */
7256 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7257 			     const struct net_device_stats *netdev_stats)
7258 {
7259 #if BITS_PER_LONG == 64
7260 	BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
7261 	memcpy(stats64, netdev_stats, sizeof(*stats64));
7262 #else
7263 	size_t i, n = sizeof(*stats64) / sizeof(u64);
7264 	const unsigned long *src = (const unsigned long *)netdev_stats;
7265 	u64 *dst = (u64 *)stats64;
7266 
7267 	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
7268 		     sizeof(*stats64) / sizeof(u64));
7269 	for (i = 0; i < n; i++)
7270 		dst[i] = src[i];
7271 #endif
7272 }
7273 EXPORT_SYMBOL(netdev_stats_to_stats64);
7274 
7275 /**
7276  *	dev_get_stats	- get network device statistics
7277  *	@dev: device to get statistics from
7278  *	@storage: place to store stats
7279  *
7280  *	Get network statistics from device. Return @storage.
7281  *	The device driver may provide its own method by setting
7282  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7283  *	otherwise the internal statistics structure is used.
7284  */
7285 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7286 					struct rtnl_link_stats64 *storage)
7287 {
7288 	const struct net_device_ops *ops = dev->netdev_ops;
7289 
7290 	if (ops->ndo_get_stats64) {
7291 		memset(storage, 0, sizeof(*storage));
7292 		ops->ndo_get_stats64(dev, storage);
7293 	} else if (ops->ndo_get_stats) {
7294 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
7295 	} else {
7296 		netdev_stats_to_stats64(storage, &dev->stats);
7297 	}
7298 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
7299 	storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
7300 	return storage;
7301 }
7302 EXPORT_SYMBOL(dev_get_stats);
7303 
7304 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
7305 {
7306 	struct netdev_queue *queue = dev_ingress_queue(dev);
7307 
7308 #ifdef CONFIG_NET_CLS_ACT
7309 	if (queue)
7310 		return queue;
7311 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7312 	if (!queue)
7313 		return NULL;
7314 	netdev_init_one_queue(dev, queue, NULL);
7315 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
7316 	queue->qdisc_sleeping = &noop_qdisc;
7317 	rcu_assign_pointer(dev->ingress_queue, queue);
7318 #endif
7319 	return queue;
7320 }
7321 
7322 static const struct ethtool_ops default_ethtool_ops;
7323 
7324 void netdev_set_default_ethtool_ops(struct net_device *dev,
7325 				    const struct ethtool_ops *ops)
7326 {
7327 	if (dev->ethtool_ops == &default_ethtool_ops)
7328 		dev->ethtool_ops = ops;
7329 }
7330 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7331 
7332 void netdev_freemem(struct net_device *dev)
7333 {
7334 	char *addr = (char *)dev - dev->padded;
7335 
7336 	kvfree(addr);
7337 }
7338 
7339 /**
7340  *	alloc_netdev_mqs - allocate network device
7341  *	@sizeof_priv:		size of private data to allocate space for
7342  *	@name:			device name format string
7343  *	@name_assign_type: 	origin of device name
7344  *	@setup:			callback to initialize device
7345  *	@txqs:			the number of TX subqueues to allocate
7346  *	@rxqs:			the number of RX subqueues to allocate
7347  *
7348  *	Allocates a struct net_device with private data area for driver use
7349  *	and performs basic initialization.  Also allocates subqueue structs
7350  *	for each queue on the device.
7351  */
7352 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7353 		unsigned char name_assign_type,
7354 		void (*setup)(struct net_device *),
7355 		unsigned int txqs, unsigned int rxqs)
7356 {
7357 	struct net_device *dev;
7358 	size_t alloc_size;
7359 	struct net_device *p;
7360 
7361 	BUG_ON(strlen(name) >= sizeof(dev->name));
7362 
7363 	if (txqs < 1) {
7364 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
7365 		return NULL;
7366 	}
7367 
7368 #ifdef CONFIG_SYSFS
7369 	if (rxqs < 1) {
7370 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
7371 		return NULL;
7372 	}
7373 #endif
7374 
7375 	alloc_size = sizeof(struct net_device);
7376 	if (sizeof_priv) {
7377 		/* ensure 32-byte alignment of private area */
7378 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
7379 		alloc_size += sizeof_priv;
7380 	}
7381 	/* ensure 32-byte alignment of whole construct */
7382 	alloc_size += NETDEV_ALIGN - 1;
7383 
7384 	p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7385 	if (!p)
7386 		p = vzalloc(alloc_size);
7387 	if (!p)
7388 		return NULL;
7389 
7390 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
7391 	dev->padded = (char *)dev - (char *)p;
7392 
7393 	dev->pcpu_refcnt = alloc_percpu(int);
7394 	if (!dev->pcpu_refcnt)
7395 		goto free_dev;
7396 
7397 	if (dev_addr_init(dev))
7398 		goto free_pcpu;
7399 
7400 	dev_mc_init(dev);
7401 	dev_uc_init(dev);
7402 
7403 	dev_net_set(dev, &init_net);
7404 
7405 	dev->gso_max_size = GSO_MAX_SIZE;
7406 	dev->gso_max_segs = GSO_MAX_SEGS;
7407 	dev->gso_min_segs = 0;
7408 
7409 	INIT_LIST_HEAD(&dev->napi_list);
7410 	INIT_LIST_HEAD(&dev->unreg_list);
7411 	INIT_LIST_HEAD(&dev->close_list);
7412 	INIT_LIST_HEAD(&dev->link_watch_list);
7413 	INIT_LIST_HEAD(&dev->adj_list.upper);
7414 	INIT_LIST_HEAD(&dev->adj_list.lower);
7415 	INIT_LIST_HEAD(&dev->all_adj_list.upper);
7416 	INIT_LIST_HEAD(&dev->all_adj_list.lower);
7417 	INIT_LIST_HEAD(&dev->ptype_all);
7418 	INIT_LIST_HEAD(&dev->ptype_specific);
7419 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
7420 	setup(dev);
7421 
7422 	if (!dev->tx_queue_len)
7423 		dev->priv_flags |= IFF_NO_QUEUE;
7424 
7425 	dev->num_tx_queues = txqs;
7426 	dev->real_num_tx_queues = txqs;
7427 	if (netif_alloc_netdev_queues(dev))
7428 		goto free_all;
7429 
7430 #ifdef CONFIG_SYSFS
7431 	dev->num_rx_queues = rxqs;
7432 	dev->real_num_rx_queues = rxqs;
7433 	if (netif_alloc_rx_queues(dev))
7434 		goto free_all;
7435 #endif
7436 
7437 	strcpy(dev->name, name);
7438 	dev->name_assign_type = name_assign_type;
7439 	dev->group = INIT_NETDEV_GROUP;
7440 	if (!dev->ethtool_ops)
7441 		dev->ethtool_ops = &default_ethtool_ops;
7442 
7443 	nf_hook_ingress_init(dev);
7444 
7445 	return dev;
7446 
7447 free_all:
7448 	free_netdev(dev);
7449 	return NULL;
7450 
7451 free_pcpu:
7452 	free_percpu(dev->pcpu_refcnt);
7453 free_dev:
7454 	netdev_freemem(dev);
7455 	return NULL;
7456 }
7457 EXPORT_SYMBOL(alloc_netdev_mqs);
7458 
7459 /**
7460  *	free_netdev - free network device
7461  *	@dev: device
7462  *
7463  *	This function does the last stage of destroying an allocated device
7464  * 	interface. The reference to the device object is released.
7465  *	If this is the last reference then it will be freed.
7466  *	Must be called in process context.
7467  */
7468 void free_netdev(struct net_device *dev)
7469 {
7470 	struct napi_struct *p, *n;
7471 
7472 	might_sleep();
7473 	netif_free_tx_queues(dev);
7474 #ifdef CONFIG_SYSFS
7475 	kvfree(dev->_rx);
7476 #endif
7477 
7478 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
7479 
7480 	/* Flush device addresses */
7481 	dev_addr_flush(dev);
7482 
7483 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7484 		netif_napi_del(p);
7485 
7486 	free_percpu(dev->pcpu_refcnt);
7487 	dev->pcpu_refcnt = NULL;
7488 
7489 	/*  Compatibility with error handling in drivers */
7490 	if (dev->reg_state == NETREG_UNINITIALIZED) {
7491 		netdev_freemem(dev);
7492 		return;
7493 	}
7494 
7495 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7496 	dev->reg_state = NETREG_RELEASED;
7497 
7498 	/* will free via device release */
7499 	put_device(&dev->dev);
7500 }
7501 EXPORT_SYMBOL(free_netdev);
7502 
7503 /**
7504  *	synchronize_net -  Synchronize with packet receive processing
7505  *
7506  *	Wait for packets currently being received to be done.
7507  *	Does not block later packets from starting.
7508  */
7509 void synchronize_net(void)
7510 {
7511 	might_sleep();
7512 	if (rtnl_is_locked())
7513 		synchronize_rcu_expedited();
7514 	else
7515 		synchronize_rcu();
7516 }
7517 EXPORT_SYMBOL(synchronize_net);
7518 
7519 /**
7520  *	unregister_netdevice_queue - remove device from the kernel
7521  *	@dev: device
7522  *	@head: list
7523  *
7524  *	This function shuts down a device interface and removes it
7525  *	from the kernel tables.
7526  *	If head not NULL, device is queued to be unregistered later.
7527  *
7528  *	Callers must hold the rtnl semaphore.  You may want
7529  *	unregister_netdev() instead of this.
7530  */
7531 
7532 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
7533 {
7534 	ASSERT_RTNL();
7535 
7536 	if (head) {
7537 		list_move_tail(&dev->unreg_list, head);
7538 	} else {
7539 		rollback_registered(dev);
7540 		/* Finish processing unregister after unlock */
7541 		net_set_todo(dev);
7542 	}
7543 }
7544 EXPORT_SYMBOL(unregister_netdevice_queue);
7545 
7546 /**
7547  *	unregister_netdevice_many - unregister many devices
7548  *	@head: list of devices
7549  *
7550  *  Note: As most callers use a stack allocated list_head,
7551  *  we force a list_del() to make sure stack wont be corrupted later.
7552  */
7553 void unregister_netdevice_many(struct list_head *head)
7554 {
7555 	struct net_device *dev;
7556 
7557 	if (!list_empty(head)) {
7558 		rollback_registered_many(head);
7559 		list_for_each_entry(dev, head, unreg_list)
7560 			net_set_todo(dev);
7561 		list_del(head);
7562 	}
7563 }
7564 EXPORT_SYMBOL(unregister_netdevice_many);
7565 
7566 /**
7567  *	unregister_netdev - remove device from the kernel
7568  *	@dev: device
7569  *
7570  *	This function shuts down a device interface and removes it
7571  *	from the kernel tables.
7572  *
7573  *	This is just a wrapper for unregister_netdevice that takes
7574  *	the rtnl semaphore.  In general you want to use this and not
7575  *	unregister_netdevice.
7576  */
7577 void unregister_netdev(struct net_device *dev)
7578 {
7579 	rtnl_lock();
7580 	unregister_netdevice(dev);
7581 	rtnl_unlock();
7582 }
7583 EXPORT_SYMBOL(unregister_netdev);
7584 
7585 /**
7586  *	dev_change_net_namespace - move device to different nethost namespace
7587  *	@dev: device
7588  *	@net: network namespace
7589  *	@pat: If not NULL name pattern to try if the current device name
7590  *	      is already taken in the destination network namespace.
7591  *
7592  *	This function shuts down a device interface and moves it
7593  *	to a new network namespace. On success 0 is returned, on
7594  *	a failure a netagive errno code is returned.
7595  *
7596  *	Callers must hold the rtnl semaphore.
7597  */
7598 
7599 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7600 {
7601 	int err;
7602 
7603 	ASSERT_RTNL();
7604 
7605 	/* Don't allow namespace local devices to be moved. */
7606 	err = -EINVAL;
7607 	if (dev->features & NETIF_F_NETNS_LOCAL)
7608 		goto out;
7609 
7610 	/* Ensure the device has been registrered */
7611 	if (dev->reg_state != NETREG_REGISTERED)
7612 		goto out;
7613 
7614 	/* Get out if there is nothing todo */
7615 	err = 0;
7616 	if (net_eq(dev_net(dev), net))
7617 		goto out;
7618 
7619 	/* Pick the destination device name, and ensure
7620 	 * we can use it in the destination network namespace.
7621 	 */
7622 	err = -EEXIST;
7623 	if (__dev_get_by_name(net, dev->name)) {
7624 		/* We get here if we can't use the current device name */
7625 		if (!pat)
7626 			goto out;
7627 		if (dev_get_valid_name(net, dev, pat) < 0)
7628 			goto out;
7629 	}
7630 
7631 	/*
7632 	 * And now a mini version of register_netdevice unregister_netdevice.
7633 	 */
7634 
7635 	/* If device is running close it first. */
7636 	dev_close(dev);
7637 
7638 	/* And unlink it from device chain */
7639 	err = -ENODEV;
7640 	unlist_netdevice(dev);
7641 
7642 	synchronize_net();
7643 
7644 	/* Shutdown queueing discipline. */
7645 	dev_shutdown(dev);
7646 
7647 	/* Notify protocols, that we are about to destroy
7648 	   this device. They should clean all the things.
7649 
7650 	   Note that dev->reg_state stays at NETREG_REGISTERED.
7651 	   This is wanted because this way 8021q and macvlan know
7652 	   the device is just moving and can keep their slaves up.
7653 	*/
7654 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7655 	rcu_barrier();
7656 	call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7657 	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
7658 
7659 	/*
7660 	 *	Flush the unicast and multicast chains
7661 	 */
7662 	dev_uc_flush(dev);
7663 	dev_mc_flush(dev);
7664 
7665 	/* Send a netdev-removed uevent to the old namespace */
7666 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
7667 	netdev_adjacent_del_links(dev);
7668 
7669 	/* Actually switch the network namespace */
7670 	dev_net_set(dev, net);
7671 
7672 	/* If there is an ifindex conflict assign a new one */
7673 	if (__dev_get_by_index(net, dev->ifindex))
7674 		dev->ifindex = dev_new_index(net);
7675 
7676 	/* Send a netdev-add uevent to the new namespace */
7677 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
7678 	netdev_adjacent_add_links(dev);
7679 
7680 	/* Fixup kobjects */
7681 	err = device_rename(&dev->dev, dev->name);
7682 	WARN_ON(err);
7683 
7684 	/* Add the device back in the hashes */
7685 	list_netdevice(dev);
7686 
7687 	/* Notify protocols, that a new device appeared. */
7688 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
7689 
7690 	/*
7691 	 *	Prevent userspace races by waiting until the network
7692 	 *	device is fully setup before sending notifications.
7693 	 */
7694 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7695 
7696 	synchronize_net();
7697 	err = 0;
7698 out:
7699 	return err;
7700 }
7701 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
7702 
7703 static int dev_cpu_callback(struct notifier_block *nfb,
7704 			    unsigned long action,
7705 			    void *ocpu)
7706 {
7707 	struct sk_buff **list_skb;
7708 	struct sk_buff *skb;
7709 	unsigned int cpu, oldcpu = (unsigned long)ocpu;
7710 	struct softnet_data *sd, *oldsd;
7711 
7712 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
7713 		return NOTIFY_OK;
7714 
7715 	local_irq_disable();
7716 	cpu = smp_processor_id();
7717 	sd = &per_cpu(softnet_data, cpu);
7718 	oldsd = &per_cpu(softnet_data, oldcpu);
7719 
7720 	/* Find end of our completion_queue. */
7721 	list_skb = &sd->completion_queue;
7722 	while (*list_skb)
7723 		list_skb = &(*list_skb)->next;
7724 	/* Append completion queue from offline CPU. */
7725 	*list_skb = oldsd->completion_queue;
7726 	oldsd->completion_queue = NULL;
7727 
7728 	/* Append output queue from offline CPU. */
7729 	if (oldsd->output_queue) {
7730 		*sd->output_queue_tailp = oldsd->output_queue;
7731 		sd->output_queue_tailp = oldsd->output_queue_tailp;
7732 		oldsd->output_queue = NULL;
7733 		oldsd->output_queue_tailp = &oldsd->output_queue;
7734 	}
7735 	/* Append NAPI poll list from offline CPU, with one exception :
7736 	 * process_backlog() must be called by cpu owning percpu backlog.
7737 	 * We properly handle process_queue & input_pkt_queue later.
7738 	 */
7739 	while (!list_empty(&oldsd->poll_list)) {
7740 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7741 							    struct napi_struct,
7742 							    poll_list);
7743 
7744 		list_del_init(&napi->poll_list);
7745 		if (napi->poll == process_backlog)
7746 			napi->state = 0;
7747 		else
7748 			____napi_schedule(sd, napi);
7749 	}
7750 
7751 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
7752 	local_irq_enable();
7753 
7754 	/* Process offline CPU's input_pkt_queue */
7755 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
7756 		netif_rx_ni(skb);
7757 		input_queue_head_incr(oldsd);
7758 	}
7759 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
7760 		netif_rx_ni(skb);
7761 		input_queue_head_incr(oldsd);
7762 	}
7763 
7764 	return NOTIFY_OK;
7765 }
7766 
7767 
7768 /**
7769  *	netdev_increment_features - increment feature set by one
7770  *	@all: current feature set
7771  *	@one: new feature set
7772  *	@mask: mask feature set
7773  *
7774  *	Computes a new feature set after adding a device with feature set
7775  *	@one to the master device with current feature set @all.  Will not
7776  *	enable anything that is off in @mask. Returns the new feature set.
7777  */
7778 netdev_features_t netdev_increment_features(netdev_features_t all,
7779 	netdev_features_t one, netdev_features_t mask)
7780 {
7781 	if (mask & NETIF_F_HW_CSUM)
7782 		mask |= NETIF_F_CSUM_MASK;
7783 	mask |= NETIF_F_VLAN_CHALLENGED;
7784 
7785 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
7786 	all &= one | ~NETIF_F_ALL_FOR_ALL;
7787 
7788 	/* If one device supports hw checksumming, set for all. */
7789 	if (all & NETIF_F_HW_CSUM)
7790 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
7791 
7792 	return all;
7793 }
7794 EXPORT_SYMBOL(netdev_increment_features);
7795 
7796 static struct hlist_head * __net_init netdev_create_hash(void)
7797 {
7798 	int i;
7799 	struct hlist_head *hash;
7800 
7801 	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7802 	if (hash != NULL)
7803 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
7804 			INIT_HLIST_HEAD(&hash[i]);
7805 
7806 	return hash;
7807 }
7808 
7809 /* Initialize per network namespace state */
7810 static int __net_init netdev_init(struct net *net)
7811 {
7812 	if (net != &init_net)
7813 		INIT_LIST_HEAD(&net->dev_base_head);
7814 
7815 	net->dev_name_head = netdev_create_hash();
7816 	if (net->dev_name_head == NULL)
7817 		goto err_name;
7818 
7819 	net->dev_index_head = netdev_create_hash();
7820 	if (net->dev_index_head == NULL)
7821 		goto err_idx;
7822 
7823 	return 0;
7824 
7825 err_idx:
7826 	kfree(net->dev_name_head);
7827 err_name:
7828 	return -ENOMEM;
7829 }
7830 
7831 /**
7832  *	netdev_drivername - network driver for the device
7833  *	@dev: network device
7834  *
7835  *	Determine network driver for device.
7836  */
7837 const char *netdev_drivername(const struct net_device *dev)
7838 {
7839 	const struct device_driver *driver;
7840 	const struct device *parent;
7841 	const char *empty = "";
7842 
7843 	parent = dev->dev.parent;
7844 	if (!parent)
7845 		return empty;
7846 
7847 	driver = parent->driver;
7848 	if (driver && driver->name)
7849 		return driver->name;
7850 	return empty;
7851 }
7852 
7853 static void __netdev_printk(const char *level, const struct net_device *dev,
7854 			    struct va_format *vaf)
7855 {
7856 	if (dev && dev->dev.parent) {
7857 		dev_printk_emit(level[1] - '0',
7858 				dev->dev.parent,
7859 				"%s %s %s%s: %pV",
7860 				dev_driver_string(dev->dev.parent),
7861 				dev_name(dev->dev.parent),
7862 				netdev_name(dev), netdev_reg_state(dev),
7863 				vaf);
7864 	} else if (dev) {
7865 		printk("%s%s%s: %pV",
7866 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
7867 	} else {
7868 		printk("%s(NULL net_device): %pV", level, vaf);
7869 	}
7870 }
7871 
7872 void netdev_printk(const char *level, const struct net_device *dev,
7873 		   const char *format, ...)
7874 {
7875 	struct va_format vaf;
7876 	va_list args;
7877 
7878 	va_start(args, format);
7879 
7880 	vaf.fmt = format;
7881 	vaf.va = &args;
7882 
7883 	__netdev_printk(level, dev, &vaf);
7884 
7885 	va_end(args);
7886 }
7887 EXPORT_SYMBOL(netdev_printk);
7888 
7889 #define define_netdev_printk_level(func, level)			\
7890 void func(const struct net_device *dev, const char *fmt, ...)	\
7891 {								\
7892 	struct va_format vaf;					\
7893 	va_list args;						\
7894 								\
7895 	va_start(args, fmt);					\
7896 								\
7897 	vaf.fmt = fmt;						\
7898 	vaf.va = &args;						\
7899 								\
7900 	__netdev_printk(level, dev, &vaf);			\
7901 								\
7902 	va_end(args);						\
7903 }								\
7904 EXPORT_SYMBOL(func);
7905 
7906 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7907 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7908 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7909 define_netdev_printk_level(netdev_err, KERN_ERR);
7910 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7911 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7912 define_netdev_printk_level(netdev_info, KERN_INFO);
7913 
7914 static void __net_exit netdev_exit(struct net *net)
7915 {
7916 	kfree(net->dev_name_head);
7917 	kfree(net->dev_index_head);
7918 }
7919 
7920 static struct pernet_operations __net_initdata netdev_net_ops = {
7921 	.init = netdev_init,
7922 	.exit = netdev_exit,
7923 };
7924 
7925 static void __net_exit default_device_exit(struct net *net)
7926 {
7927 	struct net_device *dev, *aux;
7928 	/*
7929 	 * Push all migratable network devices back to the
7930 	 * initial network namespace
7931 	 */
7932 	rtnl_lock();
7933 	for_each_netdev_safe(net, dev, aux) {
7934 		int err;
7935 		char fb_name[IFNAMSIZ];
7936 
7937 		/* Ignore unmoveable devices (i.e. loopback) */
7938 		if (dev->features & NETIF_F_NETNS_LOCAL)
7939 			continue;
7940 
7941 		/* Leave virtual devices for the generic cleanup */
7942 		if (dev->rtnl_link_ops)
7943 			continue;
7944 
7945 		/* Push remaining network devices to init_net */
7946 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7947 		err = dev_change_net_namespace(dev, &init_net, fb_name);
7948 		if (err) {
7949 			pr_emerg("%s: failed to move %s to init_net: %d\n",
7950 				 __func__, dev->name, err);
7951 			BUG();
7952 		}
7953 	}
7954 	rtnl_unlock();
7955 }
7956 
7957 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7958 {
7959 	/* Return with the rtnl_lock held when there are no network
7960 	 * devices unregistering in any network namespace in net_list.
7961 	 */
7962 	struct net *net;
7963 	bool unregistering;
7964 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
7965 
7966 	add_wait_queue(&netdev_unregistering_wq, &wait);
7967 	for (;;) {
7968 		unregistering = false;
7969 		rtnl_lock();
7970 		list_for_each_entry(net, net_list, exit_list) {
7971 			if (net->dev_unreg_count > 0) {
7972 				unregistering = true;
7973 				break;
7974 			}
7975 		}
7976 		if (!unregistering)
7977 			break;
7978 		__rtnl_unlock();
7979 
7980 		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
7981 	}
7982 	remove_wait_queue(&netdev_unregistering_wq, &wait);
7983 }
7984 
7985 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7986 {
7987 	/* At exit all network devices most be removed from a network
7988 	 * namespace.  Do this in the reverse order of registration.
7989 	 * Do this across as many network namespaces as possible to
7990 	 * improve batching efficiency.
7991 	 */
7992 	struct net_device *dev;
7993 	struct net *net;
7994 	LIST_HEAD(dev_kill_list);
7995 
7996 	/* To prevent network device cleanup code from dereferencing
7997 	 * loopback devices or network devices that have been freed
7998 	 * wait here for all pending unregistrations to complete,
7999 	 * before unregistring the loopback device and allowing the
8000 	 * network namespace be freed.
8001 	 *
8002 	 * The netdev todo list containing all network devices
8003 	 * unregistrations that happen in default_device_exit_batch
8004 	 * will run in the rtnl_unlock() at the end of
8005 	 * default_device_exit_batch.
8006 	 */
8007 	rtnl_lock_unregistering(net_list);
8008 	list_for_each_entry(net, net_list, exit_list) {
8009 		for_each_netdev_reverse(net, dev) {
8010 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
8011 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8012 			else
8013 				unregister_netdevice_queue(dev, &dev_kill_list);
8014 		}
8015 	}
8016 	unregister_netdevice_many(&dev_kill_list);
8017 	rtnl_unlock();
8018 }
8019 
8020 static struct pernet_operations __net_initdata default_device_ops = {
8021 	.exit = default_device_exit,
8022 	.exit_batch = default_device_exit_batch,
8023 };
8024 
8025 /*
8026  *	Initialize the DEV module. At boot time this walks the device list and
8027  *	unhooks any devices that fail to initialise (normally hardware not
8028  *	present) and leaves us with a valid list of present and active devices.
8029  *
8030  */
8031 
8032 /*
8033  *       This is called single threaded during boot, so no need
8034  *       to take the rtnl semaphore.
8035  */
8036 static int __init net_dev_init(void)
8037 {
8038 	int i, rc = -ENOMEM;
8039 
8040 	BUG_ON(!dev_boot_phase);
8041 
8042 	if (dev_proc_init())
8043 		goto out;
8044 
8045 	if (netdev_kobject_init())
8046 		goto out;
8047 
8048 	INIT_LIST_HEAD(&ptype_all);
8049 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
8050 		INIT_LIST_HEAD(&ptype_base[i]);
8051 
8052 	INIT_LIST_HEAD(&offload_base);
8053 
8054 	if (register_pernet_subsys(&netdev_net_ops))
8055 		goto out;
8056 
8057 	/*
8058 	 *	Initialise the packet receive queues.
8059 	 */
8060 
8061 	for_each_possible_cpu(i) {
8062 		struct softnet_data *sd = &per_cpu(softnet_data, i);
8063 
8064 		skb_queue_head_init(&sd->input_pkt_queue);
8065 		skb_queue_head_init(&sd->process_queue);
8066 		INIT_LIST_HEAD(&sd->poll_list);
8067 		sd->output_queue_tailp = &sd->output_queue;
8068 #ifdef CONFIG_RPS
8069 		sd->csd.func = rps_trigger_softirq;
8070 		sd->csd.info = sd;
8071 		sd->cpu = i;
8072 #endif
8073 
8074 		sd->backlog.poll = process_backlog;
8075 		sd->backlog.weight = weight_p;
8076 	}
8077 
8078 	dev_boot_phase = 0;
8079 
8080 	/* The loopback device is special if any other network devices
8081 	 * is present in a network namespace the loopback device must
8082 	 * be present. Since we now dynamically allocate and free the
8083 	 * loopback device ensure this invariant is maintained by
8084 	 * keeping the loopback device as the first device on the
8085 	 * list of network devices.  Ensuring the loopback devices
8086 	 * is the first device that appears and the last network device
8087 	 * that disappears.
8088 	 */
8089 	if (register_pernet_device(&loopback_net_ops))
8090 		goto out;
8091 
8092 	if (register_pernet_device(&default_device_ops))
8093 		goto out;
8094 
8095 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8096 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
8097 
8098 	hotcpu_notifier(dev_cpu_callback, 0);
8099 	dst_subsys_init();
8100 	rc = 0;
8101 out:
8102 	return rc;
8103 }
8104 
8105 subsys_initcall(net_dev_init);
8106