xref: /openbmc/linux/net/core/dev.c (revision 711aab1d)
1 /*
2  *      NET3    Protocol independent device support routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  *	Derived from the non IP parts of dev.c 1.0.19
10  *              Authors:	Ross Biro
11  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *	Additional Authors:
15  *		Florian la Roche <rzsfl@rz.uni-sb.de>
16  *		Alan Cox <gw4pts@gw4pts.ampr.org>
17  *		David Hinds <dahinds@users.sourceforge.net>
18  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *		Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *	Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *                                      to 2 if register_netdev gets called
25  *                                      before net_dev_init & also removed a
26  *                                      few lines of code in the process.
27  *		Alan Cox	:	device private ioctl copies fields back.
28  *		Alan Cox	:	Transmit queue code does relevant
29  *					stunts to keep the queue safe.
30  *		Alan Cox	:	Fixed double lock.
31  *		Alan Cox	:	Fixed promisc NULL pointer trap
32  *		????????	:	Support the full private ioctl range
33  *		Alan Cox	:	Moved ioctl permission check into
34  *					drivers
35  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
36  *		Alan Cox	:	100 backlog just doesn't cut it when
37  *					you start doing multicast video 8)
38  *		Alan Cox	:	Rewrote net_bh and list manager.
39  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
40  *		Alan Cox	:	Took out transmit every packet pass
41  *					Saved a few bytes in the ioctl handler
42  *		Alan Cox	:	Network driver sets packet type before
43  *					calling netif_rx. Saves a function
44  *					call a packet.
45  *		Alan Cox	:	Hashed net_bh()
46  *		Richard Kooijman:	Timestamp fixes.
47  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
48  *		Alan Cox	:	Device lock protection.
49  *              Alan Cox        :       Fixed nasty side effect of device close
50  *					changes.
51  *		Rudi Cilibrasi	:	Pass the right thing to
52  *					set_mac_address()
53  *		Dave Miller	:	32bit quantity for the device lock to
54  *					make it work out on a Sparc.
55  *		Bjorn Ekwall	:	Added KERNELD hack.
56  *		Alan Cox	:	Cleaned up the backlog initialise.
57  *		Craig Metz	:	SIOCGIFCONF fix if space for under
58  *					1 device.
59  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
60  *					is no device open function.
61  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
62  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
63  *		Cyrus Durgin	:	Cleaned for KMOD
64  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
65  *					A network device unload needs to purge
66  *					the backlog queue.
67  *	Paul Rusty Russell	:	SIOCSIFNAME
68  *              Pekka Riikonen  :	Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *                                      indefinitely on dev->refcnt
71  *              J Hadi Salim    :       - Backlog queue sampling
72  *				        - netif_rx() feedback
73  */
74 
75 #include <linux/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/sched/mm.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
87 #include <linux/mm.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
105 #include <net/dst.h>
106 #include <net/dst_metadata.h>
107 #include <net/pkt_sched.h>
108 #include <net/pkt_cls.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
126 #include <net/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <linux/pci.h>
136 #include <linux/inetdevice.h>
137 #include <linux/cpu_rmap.h>
138 #include <linux/static_key.h>
139 #include <linux/hashtable.h>
140 #include <linux/vmalloc.h>
141 #include <linux/if_macvlan.h>
142 #include <linux/errqueue.h>
143 #include <linux/hrtimer.h>
144 #include <linux/netfilter_ingress.h>
145 #include <linux/crash_dump.h>
146 #include <linux/sctp.h>
147 #include <net/udp_tunnel.h>
148 
149 #include "net-sysfs.h"
150 
151 /* Instead of increasing this, you should create a hash table. */
152 #define MAX_GRO_SKBS 8
153 
154 /* This should be increased if a protocol with a bigger head is added. */
155 #define GRO_MAX_HEAD (MAX_HEADER + 128)
156 
157 static DEFINE_SPINLOCK(ptype_lock);
158 static DEFINE_SPINLOCK(offload_lock);
159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160 struct list_head ptype_all __read_mostly;	/* Taps */
161 static struct list_head offload_base __read_mostly;
162 
163 static int netif_rx_internal(struct sk_buff *skb);
164 static int call_netdevice_notifiers_info(unsigned long val,
165 					 struct net_device *dev,
166 					 struct netdev_notifier_info *info);
167 static struct napi_struct *napi_by_id(unsigned int napi_id);
168 
169 /*
170  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
171  * semaphore.
172  *
173  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
174  *
175  * Writers must hold the rtnl semaphore while they loop through the
176  * dev_base_head list, and hold dev_base_lock for writing when they do the
177  * actual updates.  This allows pure readers to access the list even
178  * while a writer is preparing to update it.
179  *
180  * To put it another way, dev_base_lock is held for writing only to
181  * protect against pure readers; the rtnl semaphore provides the
182  * protection against other writers.
183  *
184  * See, for example usages, register_netdevice() and
185  * unregister_netdevice(), which must be called with the rtnl
186  * semaphore held.
187  */
188 DEFINE_RWLOCK(dev_base_lock);
189 EXPORT_SYMBOL(dev_base_lock);
190 
191 /* protects napi_hash addition/deletion and napi_gen_id */
192 static DEFINE_SPINLOCK(napi_hash_lock);
193 
194 static unsigned int napi_gen_id = NR_CPUS;
195 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
196 
197 static seqcount_t devnet_rename_seq;
198 
199 static inline void dev_base_seq_inc(struct net *net)
200 {
201 	while (++net->dev_base_seq == 0)
202 		;
203 }
204 
205 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
206 {
207 	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
208 
209 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
210 }
211 
212 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
213 {
214 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
215 }
216 
217 static inline void rps_lock(struct softnet_data *sd)
218 {
219 #ifdef CONFIG_RPS
220 	spin_lock(&sd->input_pkt_queue.lock);
221 #endif
222 }
223 
224 static inline void rps_unlock(struct softnet_data *sd)
225 {
226 #ifdef CONFIG_RPS
227 	spin_unlock(&sd->input_pkt_queue.lock);
228 #endif
229 }
230 
231 /* Device list insertion */
232 static void list_netdevice(struct net_device *dev)
233 {
234 	struct net *net = dev_net(dev);
235 
236 	ASSERT_RTNL();
237 
238 	write_lock_bh(&dev_base_lock);
239 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
240 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
241 	hlist_add_head_rcu(&dev->index_hlist,
242 			   dev_index_hash(net, dev->ifindex));
243 	write_unlock_bh(&dev_base_lock);
244 
245 	dev_base_seq_inc(net);
246 }
247 
248 /* Device list removal
249  * caller must respect a RCU grace period before freeing/reusing dev
250  */
251 static void unlist_netdevice(struct net_device *dev)
252 {
253 	ASSERT_RTNL();
254 
255 	/* Unlink dev from the device chain */
256 	write_lock_bh(&dev_base_lock);
257 	list_del_rcu(&dev->dev_list);
258 	hlist_del_rcu(&dev->name_hlist);
259 	hlist_del_rcu(&dev->index_hlist);
260 	write_unlock_bh(&dev_base_lock);
261 
262 	dev_base_seq_inc(dev_net(dev));
263 }
264 
265 /*
266  *	Our notifier list
267  */
268 
269 static RAW_NOTIFIER_HEAD(netdev_chain);
270 
271 /*
272  *	Device drivers call our routines to queue packets here. We empty the
273  *	queue in the local softnet handler.
274  */
275 
276 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
277 EXPORT_PER_CPU_SYMBOL(softnet_data);
278 
279 #ifdef CONFIG_LOCKDEP
280 /*
281  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
282  * according to dev->type
283  */
284 static const unsigned short netdev_lock_type[] = {
285 	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
286 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
287 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
288 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
289 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
290 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
291 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
292 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
293 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
294 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
295 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
296 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
297 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
298 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
299 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
300 
301 static const char *const netdev_lock_name[] = {
302 	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
303 	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
304 	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
305 	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
306 	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
307 	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
308 	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
309 	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
310 	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
311 	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
312 	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
313 	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
314 	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
315 	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
316 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
317 
318 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
319 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
320 
321 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
322 {
323 	int i;
324 
325 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
326 		if (netdev_lock_type[i] == dev_type)
327 			return i;
328 	/* the last key is used by default */
329 	return ARRAY_SIZE(netdev_lock_type) - 1;
330 }
331 
332 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
333 						 unsigned short dev_type)
334 {
335 	int i;
336 
337 	i = netdev_lock_pos(dev_type);
338 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
339 				   netdev_lock_name[i]);
340 }
341 
342 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
343 {
344 	int i;
345 
346 	i = netdev_lock_pos(dev->type);
347 	lockdep_set_class_and_name(&dev->addr_list_lock,
348 				   &netdev_addr_lock_key[i],
349 				   netdev_lock_name[i]);
350 }
351 #else
352 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
353 						 unsigned short dev_type)
354 {
355 }
356 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
357 {
358 }
359 #endif
360 
361 /*******************************************************************************
362  *
363  *		Protocol management and registration routines
364  *
365  *******************************************************************************/
366 
367 
368 /*
369  *	Add a protocol ID to the list. Now that the input handler is
370  *	smarter we can dispense with all the messy stuff that used to be
371  *	here.
372  *
373  *	BEWARE!!! Protocol handlers, mangling input packets,
374  *	MUST BE last in hash buckets and checking protocol handlers
375  *	MUST start from promiscuous ptype_all chain in net_bh.
376  *	It is true now, do not change it.
377  *	Explanation follows: if protocol handler, mangling packet, will
378  *	be the first on list, it is not able to sense, that packet
379  *	is cloned and should be copied-on-write, so that it will
380  *	change it and subsequent readers will get broken packet.
381  *							--ANK (980803)
382  */
383 
384 static inline struct list_head *ptype_head(const struct packet_type *pt)
385 {
386 	if (pt->type == htons(ETH_P_ALL))
387 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
388 	else
389 		return pt->dev ? &pt->dev->ptype_specific :
390 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
391 }
392 
393 /**
394  *	dev_add_pack - add packet handler
395  *	@pt: packet type declaration
396  *
397  *	Add a protocol handler to the networking stack. The passed &packet_type
398  *	is linked into kernel lists and may not be freed until it has been
399  *	removed from the kernel lists.
400  *
401  *	This call does not sleep therefore it can not
402  *	guarantee all CPU's that are in middle of receiving packets
403  *	will see the new packet type (until the next received packet).
404  */
405 
406 void dev_add_pack(struct packet_type *pt)
407 {
408 	struct list_head *head = ptype_head(pt);
409 
410 	spin_lock(&ptype_lock);
411 	list_add_rcu(&pt->list, head);
412 	spin_unlock(&ptype_lock);
413 }
414 EXPORT_SYMBOL(dev_add_pack);
415 
416 /**
417  *	__dev_remove_pack	 - remove packet handler
418  *	@pt: packet type declaration
419  *
420  *	Remove a protocol handler that was previously added to the kernel
421  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
422  *	from the kernel lists and can be freed or reused once this function
423  *	returns.
424  *
425  *      The packet type might still be in use by receivers
426  *	and must not be freed until after all the CPU's have gone
427  *	through a quiescent state.
428  */
429 void __dev_remove_pack(struct packet_type *pt)
430 {
431 	struct list_head *head = ptype_head(pt);
432 	struct packet_type *pt1;
433 
434 	spin_lock(&ptype_lock);
435 
436 	list_for_each_entry(pt1, head, list) {
437 		if (pt == pt1) {
438 			list_del_rcu(&pt->list);
439 			goto out;
440 		}
441 	}
442 
443 	pr_warn("dev_remove_pack: %p not found\n", pt);
444 out:
445 	spin_unlock(&ptype_lock);
446 }
447 EXPORT_SYMBOL(__dev_remove_pack);
448 
449 /**
450  *	dev_remove_pack	 - remove packet handler
451  *	@pt: packet type declaration
452  *
453  *	Remove a protocol handler that was previously added to the kernel
454  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
455  *	from the kernel lists and can be freed or reused once this function
456  *	returns.
457  *
458  *	This call sleeps to guarantee that no CPU is looking at the packet
459  *	type after return.
460  */
461 void dev_remove_pack(struct packet_type *pt)
462 {
463 	__dev_remove_pack(pt);
464 
465 	synchronize_net();
466 }
467 EXPORT_SYMBOL(dev_remove_pack);
468 
469 
470 /**
471  *	dev_add_offload - register offload handlers
472  *	@po: protocol offload declaration
473  *
474  *	Add protocol offload handlers to the networking stack. The passed
475  *	&proto_offload is linked into kernel lists and may not be freed until
476  *	it has been removed from the kernel lists.
477  *
478  *	This call does not sleep therefore it can not
479  *	guarantee all CPU's that are in middle of receiving packets
480  *	will see the new offload handlers (until the next received packet).
481  */
482 void dev_add_offload(struct packet_offload *po)
483 {
484 	struct packet_offload *elem;
485 
486 	spin_lock(&offload_lock);
487 	list_for_each_entry(elem, &offload_base, list) {
488 		if (po->priority < elem->priority)
489 			break;
490 	}
491 	list_add_rcu(&po->list, elem->list.prev);
492 	spin_unlock(&offload_lock);
493 }
494 EXPORT_SYMBOL(dev_add_offload);
495 
496 /**
497  *	__dev_remove_offload	 - remove offload handler
498  *	@po: packet offload declaration
499  *
500  *	Remove a protocol offload handler that was previously added to the
501  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
502  *	is removed from the kernel lists and can be freed or reused once this
503  *	function returns.
504  *
505  *      The packet type might still be in use by receivers
506  *	and must not be freed until after all the CPU's have gone
507  *	through a quiescent state.
508  */
509 static void __dev_remove_offload(struct packet_offload *po)
510 {
511 	struct list_head *head = &offload_base;
512 	struct packet_offload *po1;
513 
514 	spin_lock(&offload_lock);
515 
516 	list_for_each_entry(po1, head, list) {
517 		if (po == po1) {
518 			list_del_rcu(&po->list);
519 			goto out;
520 		}
521 	}
522 
523 	pr_warn("dev_remove_offload: %p not found\n", po);
524 out:
525 	spin_unlock(&offload_lock);
526 }
527 
528 /**
529  *	dev_remove_offload	 - remove packet offload handler
530  *	@po: packet offload declaration
531  *
532  *	Remove a packet offload handler that was previously added to the kernel
533  *	offload handlers by dev_add_offload(). The passed &offload_type is
534  *	removed from the kernel lists and can be freed or reused once this
535  *	function returns.
536  *
537  *	This call sleeps to guarantee that no CPU is looking at the packet
538  *	type after return.
539  */
540 void dev_remove_offload(struct packet_offload *po)
541 {
542 	__dev_remove_offload(po);
543 
544 	synchronize_net();
545 }
546 EXPORT_SYMBOL(dev_remove_offload);
547 
548 /******************************************************************************
549  *
550  *		      Device Boot-time Settings Routines
551  *
552  ******************************************************************************/
553 
554 /* Boot time configuration table */
555 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
556 
557 /**
558  *	netdev_boot_setup_add	- add new setup entry
559  *	@name: name of the device
560  *	@map: configured settings for the device
561  *
562  *	Adds new setup entry to the dev_boot_setup list.  The function
563  *	returns 0 on error and 1 on success.  This is a generic routine to
564  *	all netdevices.
565  */
566 static int netdev_boot_setup_add(char *name, struct ifmap *map)
567 {
568 	struct netdev_boot_setup *s;
569 	int i;
570 
571 	s = dev_boot_setup;
572 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
573 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
574 			memset(s[i].name, 0, sizeof(s[i].name));
575 			strlcpy(s[i].name, name, IFNAMSIZ);
576 			memcpy(&s[i].map, map, sizeof(s[i].map));
577 			break;
578 		}
579 	}
580 
581 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
582 }
583 
584 /**
585  * netdev_boot_setup_check	- check boot time settings
586  * @dev: the netdevice
587  *
588  * Check boot time settings for the device.
589  * The found settings are set for the device to be used
590  * later in the device probing.
591  * Returns 0 if no settings found, 1 if they are.
592  */
593 int netdev_boot_setup_check(struct net_device *dev)
594 {
595 	struct netdev_boot_setup *s = dev_boot_setup;
596 	int i;
597 
598 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
599 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
600 		    !strcmp(dev->name, s[i].name)) {
601 			dev->irq = s[i].map.irq;
602 			dev->base_addr = s[i].map.base_addr;
603 			dev->mem_start = s[i].map.mem_start;
604 			dev->mem_end = s[i].map.mem_end;
605 			return 1;
606 		}
607 	}
608 	return 0;
609 }
610 EXPORT_SYMBOL(netdev_boot_setup_check);
611 
612 
613 /**
614  * netdev_boot_base	- get address from boot time settings
615  * @prefix: prefix for network device
616  * @unit: id for network device
617  *
618  * Check boot time settings for the base address of device.
619  * The found settings are set for the device to be used
620  * later in the device probing.
621  * Returns 0 if no settings found.
622  */
623 unsigned long netdev_boot_base(const char *prefix, int unit)
624 {
625 	const struct netdev_boot_setup *s = dev_boot_setup;
626 	char name[IFNAMSIZ];
627 	int i;
628 
629 	sprintf(name, "%s%d", prefix, unit);
630 
631 	/*
632 	 * If device already registered then return base of 1
633 	 * to indicate not to probe for this interface
634 	 */
635 	if (__dev_get_by_name(&init_net, name))
636 		return 1;
637 
638 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
639 		if (!strcmp(name, s[i].name))
640 			return s[i].map.base_addr;
641 	return 0;
642 }
643 
644 /*
645  * Saves at boot time configured settings for any netdevice.
646  */
647 int __init netdev_boot_setup(char *str)
648 {
649 	int ints[5];
650 	struct ifmap map;
651 
652 	str = get_options(str, ARRAY_SIZE(ints), ints);
653 	if (!str || !*str)
654 		return 0;
655 
656 	/* Save settings */
657 	memset(&map, 0, sizeof(map));
658 	if (ints[0] > 0)
659 		map.irq = ints[1];
660 	if (ints[0] > 1)
661 		map.base_addr = ints[2];
662 	if (ints[0] > 2)
663 		map.mem_start = ints[3];
664 	if (ints[0] > 3)
665 		map.mem_end = ints[4];
666 
667 	/* Add new entry to the list */
668 	return netdev_boot_setup_add(str, &map);
669 }
670 
671 __setup("netdev=", netdev_boot_setup);
672 
673 /*******************************************************************************
674  *
675  *			    Device Interface Subroutines
676  *
677  *******************************************************************************/
678 
679 /**
680  *	dev_get_iflink	- get 'iflink' value of a interface
681  *	@dev: targeted interface
682  *
683  *	Indicates the ifindex the interface is linked to.
684  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
685  */
686 
687 int dev_get_iflink(const struct net_device *dev)
688 {
689 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
690 		return dev->netdev_ops->ndo_get_iflink(dev);
691 
692 	return dev->ifindex;
693 }
694 EXPORT_SYMBOL(dev_get_iflink);
695 
696 /**
697  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
698  *	@dev: targeted interface
699  *	@skb: The packet.
700  *
701  *	For better visibility of tunnel traffic OVS needs to retrieve
702  *	egress tunnel information for a packet. Following API allows
703  *	user to get this info.
704  */
705 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
706 {
707 	struct ip_tunnel_info *info;
708 
709 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
710 		return -EINVAL;
711 
712 	info = skb_tunnel_info_unclone(skb);
713 	if (!info)
714 		return -ENOMEM;
715 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
716 		return -EINVAL;
717 
718 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
719 }
720 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
721 
722 /**
723  *	__dev_get_by_name	- find a device by its name
724  *	@net: the applicable net namespace
725  *	@name: name to find
726  *
727  *	Find an interface by name. Must be called under RTNL semaphore
728  *	or @dev_base_lock. If the name is found a pointer to the device
729  *	is returned. If the name is not found then %NULL is returned. The
730  *	reference counters are not incremented so the caller must be
731  *	careful with locks.
732  */
733 
734 struct net_device *__dev_get_by_name(struct net *net, const char *name)
735 {
736 	struct net_device *dev;
737 	struct hlist_head *head = dev_name_hash(net, name);
738 
739 	hlist_for_each_entry(dev, head, name_hlist)
740 		if (!strncmp(dev->name, name, IFNAMSIZ))
741 			return dev;
742 
743 	return NULL;
744 }
745 EXPORT_SYMBOL(__dev_get_by_name);
746 
747 /**
748  * dev_get_by_name_rcu	- find a device by its name
749  * @net: the applicable net namespace
750  * @name: name to find
751  *
752  * Find an interface by name.
753  * If the name is found a pointer to the device is returned.
754  * If the name is not found then %NULL is returned.
755  * The reference counters are not incremented so the caller must be
756  * careful with locks. The caller must hold RCU lock.
757  */
758 
759 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
760 {
761 	struct net_device *dev;
762 	struct hlist_head *head = dev_name_hash(net, name);
763 
764 	hlist_for_each_entry_rcu(dev, head, name_hlist)
765 		if (!strncmp(dev->name, name, IFNAMSIZ))
766 			return dev;
767 
768 	return NULL;
769 }
770 EXPORT_SYMBOL(dev_get_by_name_rcu);
771 
772 /**
773  *	dev_get_by_name		- find a device by its name
774  *	@net: the applicable net namespace
775  *	@name: name to find
776  *
777  *	Find an interface by name. This can be called from any
778  *	context and does its own locking. The returned handle has
779  *	the usage count incremented and the caller must use dev_put() to
780  *	release it when it is no longer needed. %NULL is returned if no
781  *	matching device is found.
782  */
783 
784 struct net_device *dev_get_by_name(struct net *net, const char *name)
785 {
786 	struct net_device *dev;
787 
788 	rcu_read_lock();
789 	dev = dev_get_by_name_rcu(net, name);
790 	if (dev)
791 		dev_hold(dev);
792 	rcu_read_unlock();
793 	return dev;
794 }
795 EXPORT_SYMBOL(dev_get_by_name);
796 
797 /**
798  *	__dev_get_by_index - find a device by its ifindex
799  *	@net: the applicable net namespace
800  *	@ifindex: index of device
801  *
802  *	Search for an interface by index. Returns %NULL if the device
803  *	is not found or a pointer to the device. The device has not
804  *	had its reference counter increased so the caller must be careful
805  *	about locking. The caller must hold either the RTNL semaphore
806  *	or @dev_base_lock.
807  */
808 
809 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
810 {
811 	struct net_device *dev;
812 	struct hlist_head *head = dev_index_hash(net, ifindex);
813 
814 	hlist_for_each_entry(dev, head, index_hlist)
815 		if (dev->ifindex == ifindex)
816 			return dev;
817 
818 	return NULL;
819 }
820 EXPORT_SYMBOL(__dev_get_by_index);
821 
822 /**
823  *	dev_get_by_index_rcu - find a device by its ifindex
824  *	@net: the applicable net namespace
825  *	@ifindex: index of device
826  *
827  *	Search for an interface by index. Returns %NULL if the device
828  *	is not found or a pointer to the device. The device has not
829  *	had its reference counter increased so the caller must be careful
830  *	about locking. The caller must hold RCU lock.
831  */
832 
833 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
834 {
835 	struct net_device *dev;
836 	struct hlist_head *head = dev_index_hash(net, ifindex);
837 
838 	hlist_for_each_entry_rcu(dev, head, index_hlist)
839 		if (dev->ifindex == ifindex)
840 			return dev;
841 
842 	return NULL;
843 }
844 EXPORT_SYMBOL(dev_get_by_index_rcu);
845 
846 
847 /**
848  *	dev_get_by_index - find a device by its ifindex
849  *	@net: the applicable net namespace
850  *	@ifindex: index of device
851  *
852  *	Search for an interface by index. Returns NULL if the device
853  *	is not found or a pointer to the device. The device returned has
854  *	had a reference added and the pointer is safe until the user calls
855  *	dev_put to indicate they have finished with it.
856  */
857 
858 struct net_device *dev_get_by_index(struct net *net, int ifindex)
859 {
860 	struct net_device *dev;
861 
862 	rcu_read_lock();
863 	dev = dev_get_by_index_rcu(net, ifindex);
864 	if (dev)
865 		dev_hold(dev);
866 	rcu_read_unlock();
867 	return dev;
868 }
869 EXPORT_SYMBOL(dev_get_by_index);
870 
871 /**
872  *	dev_get_by_napi_id - find a device by napi_id
873  *	@napi_id: ID of the NAPI struct
874  *
875  *	Search for an interface by NAPI ID. Returns %NULL if the device
876  *	is not found or a pointer to the device. The device has not had
877  *	its reference counter increased so the caller must be careful
878  *	about locking. The caller must hold RCU lock.
879  */
880 
881 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
882 {
883 	struct napi_struct *napi;
884 
885 	WARN_ON_ONCE(!rcu_read_lock_held());
886 
887 	if (napi_id < MIN_NAPI_ID)
888 		return NULL;
889 
890 	napi = napi_by_id(napi_id);
891 
892 	return napi ? napi->dev : NULL;
893 }
894 EXPORT_SYMBOL(dev_get_by_napi_id);
895 
896 /**
897  *	netdev_get_name - get a netdevice name, knowing its ifindex.
898  *	@net: network namespace
899  *	@name: a pointer to the buffer where the name will be stored.
900  *	@ifindex: the ifindex of the interface to get the name from.
901  *
902  *	The use of raw_seqcount_begin() and cond_resched() before
903  *	retrying is required as we want to give the writers a chance
904  *	to complete when CONFIG_PREEMPT is not set.
905  */
906 int netdev_get_name(struct net *net, char *name, int ifindex)
907 {
908 	struct net_device *dev;
909 	unsigned int seq;
910 
911 retry:
912 	seq = raw_seqcount_begin(&devnet_rename_seq);
913 	rcu_read_lock();
914 	dev = dev_get_by_index_rcu(net, ifindex);
915 	if (!dev) {
916 		rcu_read_unlock();
917 		return -ENODEV;
918 	}
919 
920 	strcpy(name, dev->name);
921 	rcu_read_unlock();
922 	if (read_seqcount_retry(&devnet_rename_seq, seq)) {
923 		cond_resched();
924 		goto retry;
925 	}
926 
927 	return 0;
928 }
929 
930 /**
931  *	dev_getbyhwaddr_rcu - find a device by its hardware address
932  *	@net: the applicable net namespace
933  *	@type: media type of device
934  *	@ha: hardware address
935  *
936  *	Search for an interface by MAC address. Returns NULL if the device
937  *	is not found or a pointer to the device.
938  *	The caller must hold RCU or RTNL.
939  *	The returned device has not had its ref count increased
940  *	and the caller must therefore be careful about locking
941  *
942  */
943 
944 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
945 				       const char *ha)
946 {
947 	struct net_device *dev;
948 
949 	for_each_netdev_rcu(net, dev)
950 		if (dev->type == type &&
951 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
952 			return dev;
953 
954 	return NULL;
955 }
956 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
957 
958 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
959 {
960 	struct net_device *dev;
961 
962 	ASSERT_RTNL();
963 	for_each_netdev(net, dev)
964 		if (dev->type == type)
965 			return dev;
966 
967 	return NULL;
968 }
969 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
970 
971 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
972 {
973 	struct net_device *dev, *ret = NULL;
974 
975 	rcu_read_lock();
976 	for_each_netdev_rcu(net, dev)
977 		if (dev->type == type) {
978 			dev_hold(dev);
979 			ret = dev;
980 			break;
981 		}
982 	rcu_read_unlock();
983 	return ret;
984 }
985 EXPORT_SYMBOL(dev_getfirstbyhwtype);
986 
987 /**
988  *	__dev_get_by_flags - find any device with given flags
989  *	@net: the applicable net namespace
990  *	@if_flags: IFF_* values
991  *	@mask: bitmask of bits in if_flags to check
992  *
993  *	Search for any interface with the given flags. Returns NULL if a device
994  *	is not found or a pointer to the device. Must be called inside
995  *	rtnl_lock(), and result refcount is unchanged.
996  */
997 
998 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
999 				      unsigned short mask)
1000 {
1001 	struct net_device *dev, *ret;
1002 
1003 	ASSERT_RTNL();
1004 
1005 	ret = NULL;
1006 	for_each_netdev(net, dev) {
1007 		if (((dev->flags ^ if_flags) & mask) == 0) {
1008 			ret = dev;
1009 			break;
1010 		}
1011 	}
1012 	return ret;
1013 }
1014 EXPORT_SYMBOL(__dev_get_by_flags);
1015 
1016 /**
1017  *	dev_valid_name - check if name is okay for network device
1018  *	@name: name string
1019  *
1020  *	Network device names need to be valid file names to
1021  *	to allow sysfs to work.  We also disallow any kind of
1022  *	whitespace.
1023  */
1024 bool dev_valid_name(const char *name)
1025 {
1026 	if (*name == '\0')
1027 		return false;
1028 	if (strlen(name) >= IFNAMSIZ)
1029 		return false;
1030 	if (!strcmp(name, ".") || !strcmp(name, ".."))
1031 		return false;
1032 
1033 	while (*name) {
1034 		if (*name == '/' || *name == ':' || isspace(*name))
1035 			return false;
1036 		name++;
1037 	}
1038 	return true;
1039 }
1040 EXPORT_SYMBOL(dev_valid_name);
1041 
1042 /**
1043  *	__dev_alloc_name - allocate a name for a device
1044  *	@net: network namespace to allocate the device name in
1045  *	@name: name format string
1046  *	@buf:  scratch buffer and result name string
1047  *
1048  *	Passed a format string - eg "lt%d" it will try and find a suitable
1049  *	id. It scans list of devices to build up a free map, then chooses
1050  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1051  *	while allocating the name and adding the device in order to avoid
1052  *	duplicates.
1053  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1054  *	Returns the number of the unit assigned or a negative errno code.
1055  */
1056 
1057 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1058 {
1059 	int i = 0;
1060 	const char *p;
1061 	const int max_netdevices = 8*PAGE_SIZE;
1062 	unsigned long *inuse;
1063 	struct net_device *d;
1064 
1065 	p = strnchr(name, IFNAMSIZ-1, '%');
1066 	if (p) {
1067 		/*
1068 		 * Verify the string as this thing may have come from
1069 		 * the user.  There must be either one "%d" and no other "%"
1070 		 * characters.
1071 		 */
1072 		if (p[1] != 'd' || strchr(p + 2, '%'))
1073 			return -EINVAL;
1074 
1075 		/* Use one page as a bit array of possible slots */
1076 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1077 		if (!inuse)
1078 			return -ENOMEM;
1079 
1080 		for_each_netdev(net, d) {
1081 			if (!sscanf(d->name, name, &i))
1082 				continue;
1083 			if (i < 0 || i >= max_netdevices)
1084 				continue;
1085 
1086 			/*  avoid cases where sscanf is not exact inverse of printf */
1087 			snprintf(buf, IFNAMSIZ, name, i);
1088 			if (!strncmp(buf, d->name, IFNAMSIZ))
1089 				set_bit(i, inuse);
1090 		}
1091 
1092 		i = find_first_zero_bit(inuse, max_netdevices);
1093 		free_page((unsigned long) inuse);
1094 	}
1095 
1096 	if (buf != name)
1097 		snprintf(buf, IFNAMSIZ, name, i);
1098 	if (!__dev_get_by_name(net, buf))
1099 		return i;
1100 
1101 	/* It is possible to run out of possible slots
1102 	 * when the name is long and there isn't enough space left
1103 	 * for the digits, or if all bits are used.
1104 	 */
1105 	return -ENFILE;
1106 }
1107 
1108 /**
1109  *	dev_alloc_name - allocate a name for a device
1110  *	@dev: device
1111  *	@name: name format string
1112  *
1113  *	Passed a format string - eg "lt%d" it will try and find a suitable
1114  *	id. It scans list of devices to build up a free map, then chooses
1115  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1116  *	while allocating the name and adding the device in order to avoid
1117  *	duplicates.
1118  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1119  *	Returns the number of the unit assigned or a negative errno code.
1120  */
1121 
1122 int dev_alloc_name(struct net_device *dev, const char *name)
1123 {
1124 	char buf[IFNAMSIZ];
1125 	struct net *net;
1126 	int ret;
1127 
1128 	BUG_ON(!dev_net(dev));
1129 	net = dev_net(dev);
1130 	ret = __dev_alloc_name(net, name, buf);
1131 	if (ret >= 0)
1132 		strlcpy(dev->name, buf, IFNAMSIZ);
1133 	return ret;
1134 }
1135 EXPORT_SYMBOL(dev_alloc_name);
1136 
1137 static int dev_alloc_name_ns(struct net *net,
1138 			     struct net_device *dev,
1139 			     const char *name)
1140 {
1141 	char buf[IFNAMSIZ];
1142 	int ret;
1143 
1144 	ret = __dev_alloc_name(net, name, buf);
1145 	if (ret >= 0)
1146 		strlcpy(dev->name, buf, IFNAMSIZ);
1147 	return ret;
1148 }
1149 
1150 static int dev_get_valid_name(struct net *net,
1151 			      struct net_device *dev,
1152 			      const char *name)
1153 {
1154 	BUG_ON(!net);
1155 
1156 	if (!dev_valid_name(name))
1157 		return -EINVAL;
1158 
1159 	if (strchr(name, '%'))
1160 		return dev_alloc_name_ns(net, dev, name);
1161 	else if (__dev_get_by_name(net, name))
1162 		return -EEXIST;
1163 	else if (dev->name != name)
1164 		strlcpy(dev->name, name, IFNAMSIZ);
1165 
1166 	return 0;
1167 }
1168 
1169 /**
1170  *	dev_change_name - change name of a device
1171  *	@dev: device
1172  *	@newname: name (or format string) must be at least IFNAMSIZ
1173  *
1174  *	Change name of a device, can pass format strings "eth%d".
1175  *	for wildcarding.
1176  */
1177 int dev_change_name(struct net_device *dev, const char *newname)
1178 {
1179 	unsigned char old_assign_type;
1180 	char oldname[IFNAMSIZ];
1181 	int err = 0;
1182 	int ret;
1183 	struct net *net;
1184 
1185 	ASSERT_RTNL();
1186 	BUG_ON(!dev_net(dev));
1187 
1188 	net = dev_net(dev);
1189 	if (dev->flags & IFF_UP)
1190 		return -EBUSY;
1191 
1192 	write_seqcount_begin(&devnet_rename_seq);
1193 
1194 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1195 		write_seqcount_end(&devnet_rename_seq);
1196 		return 0;
1197 	}
1198 
1199 	memcpy(oldname, dev->name, IFNAMSIZ);
1200 
1201 	err = dev_get_valid_name(net, dev, newname);
1202 	if (err < 0) {
1203 		write_seqcount_end(&devnet_rename_seq);
1204 		return err;
1205 	}
1206 
1207 	if (oldname[0] && !strchr(oldname, '%'))
1208 		netdev_info(dev, "renamed from %s\n", oldname);
1209 
1210 	old_assign_type = dev->name_assign_type;
1211 	dev->name_assign_type = NET_NAME_RENAMED;
1212 
1213 rollback:
1214 	ret = device_rename(&dev->dev, dev->name);
1215 	if (ret) {
1216 		memcpy(dev->name, oldname, IFNAMSIZ);
1217 		dev->name_assign_type = old_assign_type;
1218 		write_seqcount_end(&devnet_rename_seq);
1219 		return ret;
1220 	}
1221 
1222 	write_seqcount_end(&devnet_rename_seq);
1223 
1224 	netdev_adjacent_rename_links(dev, oldname);
1225 
1226 	write_lock_bh(&dev_base_lock);
1227 	hlist_del_rcu(&dev->name_hlist);
1228 	write_unlock_bh(&dev_base_lock);
1229 
1230 	synchronize_rcu();
1231 
1232 	write_lock_bh(&dev_base_lock);
1233 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1234 	write_unlock_bh(&dev_base_lock);
1235 
1236 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1237 	ret = notifier_to_errno(ret);
1238 
1239 	if (ret) {
1240 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1241 		if (err >= 0) {
1242 			err = ret;
1243 			write_seqcount_begin(&devnet_rename_seq);
1244 			memcpy(dev->name, oldname, IFNAMSIZ);
1245 			memcpy(oldname, newname, IFNAMSIZ);
1246 			dev->name_assign_type = old_assign_type;
1247 			old_assign_type = NET_NAME_RENAMED;
1248 			goto rollback;
1249 		} else {
1250 			pr_err("%s: name change rollback failed: %d\n",
1251 			       dev->name, ret);
1252 		}
1253 	}
1254 
1255 	return err;
1256 }
1257 
1258 /**
1259  *	dev_set_alias - change ifalias of a device
1260  *	@dev: device
1261  *	@alias: name up to IFALIASZ
1262  *	@len: limit of bytes to copy from info
1263  *
1264  *	Set ifalias for a device,
1265  */
1266 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1267 {
1268 	char *new_ifalias;
1269 
1270 	ASSERT_RTNL();
1271 
1272 	if (len >= IFALIASZ)
1273 		return -EINVAL;
1274 
1275 	if (!len) {
1276 		kfree(dev->ifalias);
1277 		dev->ifalias = NULL;
1278 		return 0;
1279 	}
1280 
1281 	new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1282 	if (!new_ifalias)
1283 		return -ENOMEM;
1284 	dev->ifalias = new_ifalias;
1285 	memcpy(dev->ifalias, alias, len);
1286 	dev->ifalias[len] = 0;
1287 
1288 	return len;
1289 }
1290 
1291 
1292 /**
1293  *	netdev_features_change - device changes features
1294  *	@dev: device to cause notification
1295  *
1296  *	Called to indicate a device has changed features.
1297  */
1298 void netdev_features_change(struct net_device *dev)
1299 {
1300 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1301 }
1302 EXPORT_SYMBOL(netdev_features_change);
1303 
1304 /**
1305  *	netdev_state_change - device changes state
1306  *	@dev: device to cause notification
1307  *
1308  *	Called to indicate a device has changed state. This function calls
1309  *	the notifier chains for netdev_chain and sends a NEWLINK message
1310  *	to the routing socket.
1311  */
1312 void netdev_state_change(struct net_device *dev)
1313 {
1314 	if (dev->flags & IFF_UP) {
1315 		struct netdev_notifier_change_info change_info;
1316 
1317 		change_info.flags_changed = 0;
1318 		call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1319 					      &change_info.info);
1320 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1321 	}
1322 }
1323 EXPORT_SYMBOL(netdev_state_change);
1324 
1325 /**
1326  * netdev_notify_peers - notify network peers about existence of @dev
1327  * @dev: network device
1328  *
1329  * Generate traffic such that interested network peers are aware of
1330  * @dev, such as by generating a gratuitous ARP. This may be used when
1331  * a device wants to inform the rest of the network about some sort of
1332  * reconfiguration such as a failover event or virtual machine
1333  * migration.
1334  */
1335 void netdev_notify_peers(struct net_device *dev)
1336 {
1337 	rtnl_lock();
1338 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1339 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1340 	rtnl_unlock();
1341 }
1342 EXPORT_SYMBOL(netdev_notify_peers);
1343 
1344 static int __dev_open(struct net_device *dev)
1345 {
1346 	const struct net_device_ops *ops = dev->netdev_ops;
1347 	int ret;
1348 
1349 	ASSERT_RTNL();
1350 
1351 	if (!netif_device_present(dev))
1352 		return -ENODEV;
1353 
1354 	/* Block netpoll from trying to do any rx path servicing.
1355 	 * If we don't do this there is a chance ndo_poll_controller
1356 	 * or ndo_poll may be running while we open the device
1357 	 */
1358 	netpoll_poll_disable(dev);
1359 
1360 	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1361 	ret = notifier_to_errno(ret);
1362 	if (ret)
1363 		return ret;
1364 
1365 	set_bit(__LINK_STATE_START, &dev->state);
1366 
1367 	if (ops->ndo_validate_addr)
1368 		ret = ops->ndo_validate_addr(dev);
1369 
1370 	if (!ret && ops->ndo_open)
1371 		ret = ops->ndo_open(dev);
1372 
1373 	netpoll_poll_enable(dev);
1374 
1375 	if (ret)
1376 		clear_bit(__LINK_STATE_START, &dev->state);
1377 	else {
1378 		dev->flags |= IFF_UP;
1379 		dev_set_rx_mode(dev);
1380 		dev_activate(dev);
1381 		add_device_randomness(dev->dev_addr, dev->addr_len);
1382 	}
1383 
1384 	return ret;
1385 }
1386 
1387 /**
1388  *	dev_open	- prepare an interface for use.
1389  *	@dev:	device to open
1390  *
1391  *	Takes a device from down to up state. The device's private open
1392  *	function is invoked and then the multicast lists are loaded. Finally
1393  *	the device is moved into the up state and a %NETDEV_UP message is
1394  *	sent to the netdev notifier chain.
1395  *
1396  *	Calling this function on an active interface is a nop. On a failure
1397  *	a negative errno code is returned.
1398  */
1399 int dev_open(struct net_device *dev)
1400 {
1401 	int ret;
1402 
1403 	if (dev->flags & IFF_UP)
1404 		return 0;
1405 
1406 	ret = __dev_open(dev);
1407 	if (ret < 0)
1408 		return ret;
1409 
1410 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1411 	call_netdevice_notifiers(NETDEV_UP, dev);
1412 
1413 	return ret;
1414 }
1415 EXPORT_SYMBOL(dev_open);
1416 
1417 static void __dev_close_many(struct list_head *head)
1418 {
1419 	struct net_device *dev;
1420 
1421 	ASSERT_RTNL();
1422 	might_sleep();
1423 
1424 	list_for_each_entry(dev, head, close_list) {
1425 		/* Temporarily disable netpoll until the interface is down */
1426 		netpoll_poll_disable(dev);
1427 
1428 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1429 
1430 		clear_bit(__LINK_STATE_START, &dev->state);
1431 
1432 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1433 		 * can be even on different cpu. So just clear netif_running().
1434 		 *
1435 		 * dev->stop() will invoke napi_disable() on all of it's
1436 		 * napi_struct instances on this device.
1437 		 */
1438 		smp_mb__after_atomic(); /* Commit netif_running(). */
1439 	}
1440 
1441 	dev_deactivate_many(head);
1442 
1443 	list_for_each_entry(dev, head, close_list) {
1444 		const struct net_device_ops *ops = dev->netdev_ops;
1445 
1446 		/*
1447 		 *	Call the device specific close. This cannot fail.
1448 		 *	Only if device is UP
1449 		 *
1450 		 *	We allow it to be called even after a DETACH hot-plug
1451 		 *	event.
1452 		 */
1453 		if (ops->ndo_stop)
1454 			ops->ndo_stop(dev);
1455 
1456 		dev->flags &= ~IFF_UP;
1457 		netpoll_poll_enable(dev);
1458 	}
1459 }
1460 
1461 static void __dev_close(struct net_device *dev)
1462 {
1463 	LIST_HEAD(single);
1464 
1465 	list_add(&dev->close_list, &single);
1466 	__dev_close_many(&single);
1467 	list_del(&single);
1468 }
1469 
1470 void dev_close_many(struct list_head *head, bool unlink)
1471 {
1472 	struct net_device *dev, *tmp;
1473 
1474 	/* Remove the devices that don't need to be closed */
1475 	list_for_each_entry_safe(dev, tmp, head, close_list)
1476 		if (!(dev->flags & IFF_UP))
1477 			list_del_init(&dev->close_list);
1478 
1479 	__dev_close_many(head);
1480 
1481 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1482 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1483 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1484 		if (unlink)
1485 			list_del_init(&dev->close_list);
1486 	}
1487 }
1488 EXPORT_SYMBOL(dev_close_many);
1489 
1490 /**
1491  *	dev_close - shutdown an interface.
1492  *	@dev: device to shutdown
1493  *
1494  *	This function moves an active device into down state. A
1495  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1496  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1497  *	chain.
1498  */
1499 void dev_close(struct net_device *dev)
1500 {
1501 	if (dev->flags & IFF_UP) {
1502 		LIST_HEAD(single);
1503 
1504 		list_add(&dev->close_list, &single);
1505 		dev_close_many(&single, true);
1506 		list_del(&single);
1507 	}
1508 }
1509 EXPORT_SYMBOL(dev_close);
1510 
1511 
1512 /**
1513  *	dev_disable_lro - disable Large Receive Offload on a device
1514  *	@dev: device
1515  *
1516  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1517  *	called under RTNL.  This is needed if received packets may be
1518  *	forwarded to another interface.
1519  */
1520 void dev_disable_lro(struct net_device *dev)
1521 {
1522 	struct net_device *lower_dev;
1523 	struct list_head *iter;
1524 
1525 	dev->wanted_features &= ~NETIF_F_LRO;
1526 	netdev_update_features(dev);
1527 
1528 	if (unlikely(dev->features & NETIF_F_LRO))
1529 		netdev_WARN(dev, "failed to disable LRO!\n");
1530 
1531 	netdev_for_each_lower_dev(dev, lower_dev, iter)
1532 		dev_disable_lro(lower_dev);
1533 }
1534 EXPORT_SYMBOL(dev_disable_lro);
1535 
1536 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1537 				   struct net_device *dev)
1538 {
1539 	struct netdev_notifier_info info;
1540 
1541 	netdev_notifier_info_init(&info, dev);
1542 	return nb->notifier_call(nb, val, &info);
1543 }
1544 
1545 static int dev_boot_phase = 1;
1546 
1547 /**
1548  * register_netdevice_notifier - register a network notifier block
1549  * @nb: notifier
1550  *
1551  * Register a notifier to be called when network device events occur.
1552  * The notifier passed is linked into the kernel structures and must
1553  * not be reused until it has been unregistered. A negative errno code
1554  * is returned on a failure.
1555  *
1556  * When registered all registration and up events are replayed
1557  * to the new notifier to allow device to have a race free
1558  * view of the network device list.
1559  */
1560 
1561 int register_netdevice_notifier(struct notifier_block *nb)
1562 {
1563 	struct net_device *dev;
1564 	struct net_device *last;
1565 	struct net *net;
1566 	int err;
1567 
1568 	rtnl_lock();
1569 	err = raw_notifier_chain_register(&netdev_chain, nb);
1570 	if (err)
1571 		goto unlock;
1572 	if (dev_boot_phase)
1573 		goto unlock;
1574 	for_each_net(net) {
1575 		for_each_netdev(net, dev) {
1576 			err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1577 			err = notifier_to_errno(err);
1578 			if (err)
1579 				goto rollback;
1580 
1581 			if (!(dev->flags & IFF_UP))
1582 				continue;
1583 
1584 			call_netdevice_notifier(nb, NETDEV_UP, dev);
1585 		}
1586 	}
1587 
1588 unlock:
1589 	rtnl_unlock();
1590 	return err;
1591 
1592 rollback:
1593 	last = dev;
1594 	for_each_net(net) {
1595 		for_each_netdev(net, dev) {
1596 			if (dev == last)
1597 				goto outroll;
1598 
1599 			if (dev->flags & IFF_UP) {
1600 				call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1601 							dev);
1602 				call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1603 			}
1604 			call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1605 		}
1606 	}
1607 
1608 outroll:
1609 	raw_notifier_chain_unregister(&netdev_chain, nb);
1610 	goto unlock;
1611 }
1612 EXPORT_SYMBOL(register_netdevice_notifier);
1613 
1614 /**
1615  * unregister_netdevice_notifier - unregister a network notifier block
1616  * @nb: notifier
1617  *
1618  * Unregister a notifier previously registered by
1619  * register_netdevice_notifier(). The notifier is unlinked into the
1620  * kernel structures and may then be reused. A negative errno code
1621  * is returned on a failure.
1622  *
1623  * After unregistering unregister and down device events are synthesized
1624  * for all devices on the device list to the removed notifier to remove
1625  * the need for special case cleanup code.
1626  */
1627 
1628 int unregister_netdevice_notifier(struct notifier_block *nb)
1629 {
1630 	struct net_device *dev;
1631 	struct net *net;
1632 	int err;
1633 
1634 	rtnl_lock();
1635 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1636 	if (err)
1637 		goto unlock;
1638 
1639 	for_each_net(net) {
1640 		for_each_netdev(net, dev) {
1641 			if (dev->flags & IFF_UP) {
1642 				call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1643 							dev);
1644 				call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1645 			}
1646 			call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1647 		}
1648 	}
1649 unlock:
1650 	rtnl_unlock();
1651 	return err;
1652 }
1653 EXPORT_SYMBOL(unregister_netdevice_notifier);
1654 
1655 /**
1656  *	call_netdevice_notifiers_info - call all network notifier blocks
1657  *	@val: value passed unmodified to notifier function
1658  *	@dev: net_device pointer passed unmodified to notifier function
1659  *	@info: notifier information data
1660  *
1661  *	Call all network notifier blocks.  Parameters and return value
1662  *	are as for raw_notifier_call_chain().
1663  */
1664 
1665 static int call_netdevice_notifiers_info(unsigned long val,
1666 					 struct net_device *dev,
1667 					 struct netdev_notifier_info *info)
1668 {
1669 	ASSERT_RTNL();
1670 	netdev_notifier_info_init(info, dev);
1671 	return raw_notifier_call_chain(&netdev_chain, val, info);
1672 }
1673 
1674 /**
1675  *	call_netdevice_notifiers - call all network notifier blocks
1676  *      @val: value passed unmodified to notifier function
1677  *      @dev: net_device pointer passed unmodified to notifier function
1678  *
1679  *	Call all network notifier blocks.  Parameters and return value
1680  *	are as for raw_notifier_call_chain().
1681  */
1682 
1683 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1684 {
1685 	struct netdev_notifier_info info;
1686 
1687 	return call_netdevice_notifiers_info(val, dev, &info);
1688 }
1689 EXPORT_SYMBOL(call_netdevice_notifiers);
1690 
1691 #ifdef CONFIG_NET_INGRESS
1692 static struct static_key ingress_needed __read_mostly;
1693 
1694 void net_inc_ingress_queue(void)
1695 {
1696 	static_key_slow_inc(&ingress_needed);
1697 }
1698 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1699 
1700 void net_dec_ingress_queue(void)
1701 {
1702 	static_key_slow_dec(&ingress_needed);
1703 }
1704 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1705 #endif
1706 
1707 #ifdef CONFIG_NET_EGRESS
1708 static struct static_key egress_needed __read_mostly;
1709 
1710 void net_inc_egress_queue(void)
1711 {
1712 	static_key_slow_inc(&egress_needed);
1713 }
1714 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1715 
1716 void net_dec_egress_queue(void)
1717 {
1718 	static_key_slow_dec(&egress_needed);
1719 }
1720 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1721 #endif
1722 
1723 static struct static_key netstamp_needed __read_mostly;
1724 #ifdef HAVE_JUMP_LABEL
1725 static atomic_t netstamp_needed_deferred;
1726 static atomic_t netstamp_wanted;
1727 static void netstamp_clear(struct work_struct *work)
1728 {
1729 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1730 	int wanted;
1731 
1732 	wanted = atomic_add_return(deferred, &netstamp_wanted);
1733 	if (wanted > 0)
1734 		static_key_enable(&netstamp_needed);
1735 	else
1736 		static_key_disable(&netstamp_needed);
1737 }
1738 static DECLARE_WORK(netstamp_work, netstamp_clear);
1739 #endif
1740 
1741 void net_enable_timestamp(void)
1742 {
1743 #ifdef HAVE_JUMP_LABEL
1744 	int wanted;
1745 
1746 	while (1) {
1747 		wanted = atomic_read(&netstamp_wanted);
1748 		if (wanted <= 0)
1749 			break;
1750 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1751 			return;
1752 	}
1753 	atomic_inc(&netstamp_needed_deferred);
1754 	schedule_work(&netstamp_work);
1755 #else
1756 	static_key_slow_inc(&netstamp_needed);
1757 #endif
1758 }
1759 EXPORT_SYMBOL(net_enable_timestamp);
1760 
1761 void net_disable_timestamp(void)
1762 {
1763 #ifdef HAVE_JUMP_LABEL
1764 	int wanted;
1765 
1766 	while (1) {
1767 		wanted = atomic_read(&netstamp_wanted);
1768 		if (wanted <= 1)
1769 			break;
1770 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1771 			return;
1772 	}
1773 	atomic_dec(&netstamp_needed_deferred);
1774 	schedule_work(&netstamp_work);
1775 #else
1776 	static_key_slow_dec(&netstamp_needed);
1777 #endif
1778 }
1779 EXPORT_SYMBOL(net_disable_timestamp);
1780 
1781 static inline void net_timestamp_set(struct sk_buff *skb)
1782 {
1783 	skb->tstamp = 0;
1784 	if (static_key_false(&netstamp_needed))
1785 		__net_timestamp(skb);
1786 }
1787 
1788 #define net_timestamp_check(COND, SKB)			\
1789 	if (static_key_false(&netstamp_needed)) {		\
1790 		if ((COND) && !(SKB)->tstamp)	\
1791 			__net_timestamp(SKB);		\
1792 	}						\
1793 
1794 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1795 {
1796 	unsigned int len;
1797 
1798 	if (!(dev->flags & IFF_UP))
1799 		return false;
1800 
1801 	len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1802 	if (skb->len <= len)
1803 		return true;
1804 
1805 	/* if TSO is enabled, we don't care about the length as the packet
1806 	 * could be forwarded without being segmented before
1807 	 */
1808 	if (skb_is_gso(skb))
1809 		return true;
1810 
1811 	return false;
1812 }
1813 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1814 
1815 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1816 {
1817 	int ret = ____dev_forward_skb(dev, skb);
1818 
1819 	if (likely(!ret)) {
1820 		skb->protocol = eth_type_trans(skb, dev);
1821 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1822 	}
1823 
1824 	return ret;
1825 }
1826 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1827 
1828 /**
1829  * dev_forward_skb - loopback an skb to another netif
1830  *
1831  * @dev: destination network device
1832  * @skb: buffer to forward
1833  *
1834  * return values:
1835  *	NET_RX_SUCCESS	(no congestion)
1836  *	NET_RX_DROP     (packet was dropped, but freed)
1837  *
1838  * dev_forward_skb can be used for injecting an skb from the
1839  * start_xmit function of one device into the receive queue
1840  * of another device.
1841  *
1842  * The receiving device may be in another namespace, so
1843  * we have to clear all information in the skb that could
1844  * impact namespace isolation.
1845  */
1846 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1847 {
1848 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1849 }
1850 EXPORT_SYMBOL_GPL(dev_forward_skb);
1851 
1852 static inline int deliver_skb(struct sk_buff *skb,
1853 			      struct packet_type *pt_prev,
1854 			      struct net_device *orig_dev)
1855 {
1856 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1857 		return -ENOMEM;
1858 	refcount_inc(&skb->users);
1859 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1860 }
1861 
1862 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1863 					  struct packet_type **pt,
1864 					  struct net_device *orig_dev,
1865 					  __be16 type,
1866 					  struct list_head *ptype_list)
1867 {
1868 	struct packet_type *ptype, *pt_prev = *pt;
1869 
1870 	list_for_each_entry_rcu(ptype, ptype_list, list) {
1871 		if (ptype->type != type)
1872 			continue;
1873 		if (pt_prev)
1874 			deliver_skb(skb, pt_prev, orig_dev);
1875 		pt_prev = ptype;
1876 	}
1877 	*pt = pt_prev;
1878 }
1879 
1880 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1881 {
1882 	if (!ptype->af_packet_priv || !skb->sk)
1883 		return false;
1884 
1885 	if (ptype->id_match)
1886 		return ptype->id_match(ptype, skb->sk);
1887 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1888 		return true;
1889 
1890 	return false;
1891 }
1892 
1893 /*
1894  *	Support routine. Sends outgoing frames to any network
1895  *	taps currently in use.
1896  */
1897 
1898 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1899 {
1900 	struct packet_type *ptype;
1901 	struct sk_buff *skb2 = NULL;
1902 	struct packet_type *pt_prev = NULL;
1903 	struct list_head *ptype_list = &ptype_all;
1904 
1905 	rcu_read_lock();
1906 again:
1907 	list_for_each_entry_rcu(ptype, ptype_list, list) {
1908 		/* Never send packets back to the socket
1909 		 * they originated from - MvS (miquels@drinkel.ow.org)
1910 		 */
1911 		if (skb_loop_sk(ptype, skb))
1912 			continue;
1913 
1914 		if (pt_prev) {
1915 			deliver_skb(skb2, pt_prev, skb->dev);
1916 			pt_prev = ptype;
1917 			continue;
1918 		}
1919 
1920 		/* need to clone skb, done only once */
1921 		skb2 = skb_clone(skb, GFP_ATOMIC);
1922 		if (!skb2)
1923 			goto out_unlock;
1924 
1925 		net_timestamp_set(skb2);
1926 
1927 		/* skb->nh should be correctly
1928 		 * set by sender, so that the second statement is
1929 		 * just protection against buggy protocols.
1930 		 */
1931 		skb_reset_mac_header(skb2);
1932 
1933 		if (skb_network_header(skb2) < skb2->data ||
1934 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1935 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1936 					     ntohs(skb2->protocol),
1937 					     dev->name);
1938 			skb_reset_network_header(skb2);
1939 		}
1940 
1941 		skb2->transport_header = skb2->network_header;
1942 		skb2->pkt_type = PACKET_OUTGOING;
1943 		pt_prev = ptype;
1944 	}
1945 
1946 	if (ptype_list == &ptype_all) {
1947 		ptype_list = &dev->ptype_all;
1948 		goto again;
1949 	}
1950 out_unlock:
1951 	if (pt_prev)
1952 		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1953 	rcu_read_unlock();
1954 }
1955 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
1956 
1957 /**
1958  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1959  * @dev: Network device
1960  * @txq: number of queues available
1961  *
1962  * If real_num_tx_queues is changed the tc mappings may no longer be
1963  * valid. To resolve this verify the tc mapping remains valid and if
1964  * not NULL the mapping. With no priorities mapping to this
1965  * offset/count pair it will no longer be used. In the worst case TC0
1966  * is invalid nothing can be done so disable priority mappings. If is
1967  * expected that drivers will fix this mapping if they can before
1968  * calling netif_set_real_num_tx_queues.
1969  */
1970 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1971 {
1972 	int i;
1973 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1974 
1975 	/* If TC0 is invalidated disable TC mapping */
1976 	if (tc->offset + tc->count > txq) {
1977 		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1978 		dev->num_tc = 0;
1979 		return;
1980 	}
1981 
1982 	/* Invalidated prio to tc mappings set to TC0 */
1983 	for (i = 1; i < TC_BITMASK + 1; i++) {
1984 		int q = netdev_get_prio_tc_map(dev, i);
1985 
1986 		tc = &dev->tc_to_txq[q];
1987 		if (tc->offset + tc->count > txq) {
1988 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1989 				i, q);
1990 			netdev_set_prio_tc_map(dev, i, 0);
1991 		}
1992 	}
1993 }
1994 
1995 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
1996 {
1997 	if (dev->num_tc) {
1998 		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1999 		int i;
2000 
2001 		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2002 			if ((txq - tc->offset) < tc->count)
2003 				return i;
2004 		}
2005 
2006 		return -1;
2007 	}
2008 
2009 	return 0;
2010 }
2011 
2012 #ifdef CONFIG_XPS
2013 static DEFINE_MUTEX(xps_map_mutex);
2014 #define xmap_dereference(P)		\
2015 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2016 
2017 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2018 			     int tci, u16 index)
2019 {
2020 	struct xps_map *map = NULL;
2021 	int pos;
2022 
2023 	if (dev_maps)
2024 		map = xmap_dereference(dev_maps->cpu_map[tci]);
2025 	if (!map)
2026 		return false;
2027 
2028 	for (pos = map->len; pos--;) {
2029 		if (map->queues[pos] != index)
2030 			continue;
2031 
2032 		if (map->len > 1) {
2033 			map->queues[pos] = map->queues[--map->len];
2034 			break;
2035 		}
2036 
2037 		RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2038 		kfree_rcu(map, rcu);
2039 		return false;
2040 	}
2041 
2042 	return true;
2043 }
2044 
2045 static bool remove_xps_queue_cpu(struct net_device *dev,
2046 				 struct xps_dev_maps *dev_maps,
2047 				 int cpu, u16 offset, u16 count)
2048 {
2049 	int num_tc = dev->num_tc ? : 1;
2050 	bool active = false;
2051 	int tci;
2052 
2053 	for (tci = cpu * num_tc; num_tc--; tci++) {
2054 		int i, j;
2055 
2056 		for (i = count, j = offset; i--; j++) {
2057 			if (!remove_xps_queue(dev_maps, cpu, j))
2058 				break;
2059 		}
2060 
2061 		active |= i < 0;
2062 	}
2063 
2064 	return active;
2065 }
2066 
2067 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2068 				   u16 count)
2069 {
2070 	struct xps_dev_maps *dev_maps;
2071 	int cpu, i;
2072 	bool active = false;
2073 
2074 	mutex_lock(&xps_map_mutex);
2075 	dev_maps = xmap_dereference(dev->xps_maps);
2076 
2077 	if (!dev_maps)
2078 		goto out_no_maps;
2079 
2080 	for_each_possible_cpu(cpu)
2081 		active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2082 					       offset, count);
2083 
2084 	if (!active) {
2085 		RCU_INIT_POINTER(dev->xps_maps, NULL);
2086 		kfree_rcu(dev_maps, rcu);
2087 	}
2088 
2089 	for (i = offset + (count - 1); count--; i--)
2090 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2091 					     NUMA_NO_NODE);
2092 
2093 out_no_maps:
2094 	mutex_unlock(&xps_map_mutex);
2095 }
2096 
2097 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2098 {
2099 	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2100 }
2101 
2102 static struct xps_map *expand_xps_map(struct xps_map *map,
2103 				      int cpu, u16 index)
2104 {
2105 	struct xps_map *new_map;
2106 	int alloc_len = XPS_MIN_MAP_ALLOC;
2107 	int i, pos;
2108 
2109 	for (pos = 0; map && pos < map->len; pos++) {
2110 		if (map->queues[pos] != index)
2111 			continue;
2112 		return map;
2113 	}
2114 
2115 	/* Need to add queue to this CPU's existing map */
2116 	if (map) {
2117 		if (pos < map->alloc_len)
2118 			return map;
2119 
2120 		alloc_len = map->alloc_len * 2;
2121 	}
2122 
2123 	/* Need to allocate new map to store queue on this CPU's map */
2124 	new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2125 			       cpu_to_node(cpu));
2126 	if (!new_map)
2127 		return NULL;
2128 
2129 	for (i = 0; i < pos; i++)
2130 		new_map->queues[i] = map->queues[i];
2131 	new_map->alloc_len = alloc_len;
2132 	new_map->len = pos;
2133 
2134 	return new_map;
2135 }
2136 
2137 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2138 			u16 index)
2139 {
2140 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2141 	int i, cpu, tci, numa_node_id = -2;
2142 	int maps_sz, num_tc = 1, tc = 0;
2143 	struct xps_map *map, *new_map;
2144 	bool active = false;
2145 
2146 	if (dev->num_tc) {
2147 		num_tc = dev->num_tc;
2148 		tc = netdev_txq_to_tc(dev, index);
2149 		if (tc < 0)
2150 			return -EINVAL;
2151 	}
2152 
2153 	maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2154 	if (maps_sz < L1_CACHE_BYTES)
2155 		maps_sz = L1_CACHE_BYTES;
2156 
2157 	mutex_lock(&xps_map_mutex);
2158 
2159 	dev_maps = xmap_dereference(dev->xps_maps);
2160 
2161 	/* allocate memory for queue storage */
2162 	for_each_cpu_and(cpu, cpu_online_mask, mask) {
2163 		if (!new_dev_maps)
2164 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2165 		if (!new_dev_maps) {
2166 			mutex_unlock(&xps_map_mutex);
2167 			return -ENOMEM;
2168 		}
2169 
2170 		tci = cpu * num_tc + tc;
2171 		map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
2172 				 NULL;
2173 
2174 		map = expand_xps_map(map, cpu, index);
2175 		if (!map)
2176 			goto error;
2177 
2178 		RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2179 	}
2180 
2181 	if (!new_dev_maps)
2182 		goto out_no_new_maps;
2183 
2184 	for_each_possible_cpu(cpu) {
2185 		/* copy maps belonging to foreign traffic classes */
2186 		for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2187 			/* fill in the new device map from the old device map */
2188 			map = xmap_dereference(dev_maps->cpu_map[tci]);
2189 			RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2190 		}
2191 
2192 		/* We need to explicitly update tci as prevous loop
2193 		 * could break out early if dev_maps is NULL.
2194 		 */
2195 		tci = cpu * num_tc + tc;
2196 
2197 		if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2198 			/* add queue to CPU maps */
2199 			int pos = 0;
2200 
2201 			map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2202 			while ((pos < map->len) && (map->queues[pos] != index))
2203 				pos++;
2204 
2205 			if (pos == map->len)
2206 				map->queues[map->len++] = index;
2207 #ifdef CONFIG_NUMA
2208 			if (numa_node_id == -2)
2209 				numa_node_id = cpu_to_node(cpu);
2210 			else if (numa_node_id != cpu_to_node(cpu))
2211 				numa_node_id = -1;
2212 #endif
2213 		} else if (dev_maps) {
2214 			/* fill in the new device map from the old device map */
2215 			map = xmap_dereference(dev_maps->cpu_map[tci]);
2216 			RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2217 		}
2218 
2219 		/* copy maps belonging to foreign traffic classes */
2220 		for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2221 			/* fill in the new device map from the old device map */
2222 			map = xmap_dereference(dev_maps->cpu_map[tci]);
2223 			RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2224 		}
2225 	}
2226 
2227 	rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2228 
2229 	/* Cleanup old maps */
2230 	if (!dev_maps)
2231 		goto out_no_old_maps;
2232 
2233 	for_each_possible_cpu(cpu) {
2234 		for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2235 			new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2236 			map = xmap_dereference(dev_maps->cpu_map[tci]);
2237 			if (map && map != new_map)
2238 				kfree_rcu(map, rcu);
2239 		}
2240 	}
2241 
2242 	kfree_rcu(dev_maps, rcu);
2243 
2244 out_no_old_maps:
2245 	dev_maps = new_dev_maps;
2246 	active = true;
2247 
2248 out_no_new_maps:
2249 	/* update Tx queue numa node */
2250 	netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2251 				     (numa_node_id >= 0) ? numa_node_id :
2252 				     NUMA_NO_NODE);
2253 
2254 	if (!dev_maps)
2255 		goto out_no_maps;
2256 
2257 	/* removes queue from unused CPUs */
2258 	for_each_possible_cpu(cpu) {
2259 		for (i = tc, tci = cpu * num_tc; i--; tci++)
2260 			active |= remove_xps_queue(dev_maps, tci, index);
2261 		if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2262 			active |= remove_xps_queue(dev_maps, tci, index);
2263 		for (i = num_tc - tc, tci++; --i; tci++)
2264 			active |= remove_xps_queue(dev_maps, tci, index);
2265 	}
2266 
2267 	/* free map if not active */
2268 	if (!active) {
2269 		RCU_INIT_POINTER(dev->xps_maps, NULL);
2270 		kfree_rcu(dev_maps, rcu);
2271 	}
2272 
2273 out_no_maps:
2274 	mutex_unlock(&xps_map_mutex);
2275 
2276 	return 0;
2277 error:
2278 	/* remove any maps that we added */
2279 	for_each_possible_cpu(cpu) {
2280 		for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2281 			new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2282 			map = dev_maps ?
2283 			      xmap_dereference(dev_maps->cpu_map[tci]) :
2284 			      NULL;
2285 			if (new_map && new_map != map)
2286 				kfree(new_map);
2287 		}
2288 	}
2289 
2290 	mutex_unlock(&xps_map_mutex);
2291 
2292 	kfree(new_dev_maps);
2293 	return -ENOMEM;
2294 }
2295 EXPORT_SYMBOL(netif_set_xps_queue);
2296 
2297 #endif
2298 void netdev_reset_tc(struct net_device *dev)
2299 {
2300 #ifdef CONFIG_XPS
2301 	netif_reset_xps_queues_gt(dev, 0);
2302 #endif
2303 	dev->num_tc = 0;
2304 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2305 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2306 }
2307 EXPORT_SYMBOL(netdev_reset_tc);
2308 
2309 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2310 {
2311 	if (tc >= dev->num_tc)
2312 		return -EINVAL;
2313 
2314 #ifdef CONFIG_XPS
2315 	netif_reset_xps_queues(dev, offset, count);
2316 #endif
2317 	dev->tc_to_txq[tc].count = count;
2318 	dev->tc_to_txq[tc].offset = offset;
2319 	return 0;
2320 }
2321 EXPORT_SYMBOL(netdev_set_tc_queue);
2322 
2323 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2324 {
2325 	if (num_tc > TC_MAX_QUEUE)
2326 		return -EINVAL;
2327 
2328 #ifdef CONFIG_XPS
2329 	netif_reset_xps_queues_gt(dev, 0);
2330 #endif
2331 	dev->num_tc = num_tc;
2332 	return 0;
2333 }
2334 EXPORT_SYMBOL(netdev_set_num_tc);
2335 
2336 /*
2337  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2338  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2339  */
2340 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2341 {
2342 	int rc;
2343 
2344 	if (txq < 1 || txq > dev->num_tx_queues)
2345 		return -EINVAL;
2346 
2347 	if (dev->reg_state == NETREG_REGISTERED ||
2348 	    dev->reg_state == NETREG_UNREGISTERING) {
2349 		ASSERT_RTNL();
2350 
2351 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2352 						  txq);
2353 		if (rc)
2354 			return rc;
2355 
2356 		if (dev->num_tc)
2357 			netif_setup_tc(dev, txq);
2358 
2359 		if (txq < dev->real_num_tx_queues) {
2360 			qdisc_reset_all_tx_gt(dev, txq);
2361 #ifdef CONFIG_XPS
2362 			netif_reset_xps_queues_gt(dev, txq);
2363 #endif
2364 		}
2365 	}
2366 
2367 	dev->real_num_tx_queues = txq;
2368 	return 0;
2369 }
2370 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2371 
2372 #ifdef CONFIG_SYSFS
2373 /**
2374  *	netif_set_real_num_rx_queues - set actual number of RX queues used
2375  *	@dev: Network device
2376  *	@rxq: Actual number of RX queues
2377  *
2378  *	This must be called either with the rtnl_lock held or before
2379  *	registration of the net device.  Returns 0 on success, or a
2380  *	negative error code.  If called before registration, it always
2381  *	succeeds.
2382  */
2383 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2384 {
2385 	int rc;
2386 
2387 	if (rxq < 1 || rxq > dev->num_rx_queues)
2388 		return -EINVAL;
2389 
2390 	if (dev->reg_state == NETREG_REGISTERED) {
2391 		ASSERT_RTNL();
2392 
2393 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2394 						  rxq);
2395 		if (rc)
2396 			return rc;
2397 	}
2398 
2399 	dev->real_num_rx_queues = rxq;
2400 	return 0;
2401 }
2402 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2403 #endif
2404 
2405 /**
2406  * netif_get_num_default_rss_queues - default number of RSS queues
2407  *
2408  * This routine should set an upper limit on the number of RSS queues
2409  * used by default by multiqueue devices.
2410  */
2411 int netif_get_num_default_rss_queues(void)
2412 {
2413 	return is_kdump_kernel() ?
2414 		1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2415 }
2416 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2417 
2418 static void __netif_reschedule(struct Qdisc *q)
2419 {
2420 	struct softnet_data *sd;
2421 	unsigned long flags;
2422 
2423 	local_irq_save(flags);
2424 	sd = this_cpu_ptr(&softnet_data);
2425 	q->next_sched = NULL;
2426 	*sd->output_queue_tailp = q;
2427 	sd->output_queue_tailp = &q->next_sched;
2428 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
2429 	local_irq_restore(flags);
2430 }
2431 
2432 void __netif_schedule(struct Qdisc *q)
2433 {
2434 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2435 		__netif_reschedule(q);
2436 }
2437 EXPORT_SYMBOL(__netif_schedule);
2438 
2439 struct dev_kfree_skb_cb {
2440 	enum skb_free_reason reason;
2441 };
2442 
2443 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2444 {
2445 	return (struct dev_kfree_skb_cb *)skb->cb;
2446 }
2447 
2448 void netif_schedule_queue(struct netdev_queue *txq)
2449 {
2450 	rcu_read_lock();
2451 	if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2452 		struct Qdisc *q = rcu_dereference(txq->qdisc);
2453 
2454 		__netif_schedule(q);
2455 	}
2456 	rcu_read_unlock();
2457 }
2458 EXPORT_SYMBOL(netif_schedule_queue);
2459 
2460 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2461 {
2462 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2463 		struct Qdisc *q;
2464 
2465 		rcu_read_lock();
2466 		q = rcu_dereference(dev_queue->qdisc);
2467 		__netif_schedule(q);
2468 		rcu_read_unlock();
2469 	}
2470 }
2471 EXPORT_SYMBOL(netif_tx_wake_queue);
2472 
2473 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2474 {
2475 	unsigned long flags;
2476 
2477 	if (unlikely(!skb))
2478 		return;
2479 
2480 	if (likely(refcount_read(&skb->users) == 1)) {
2481 		smp_rmb();
2482 		refcount_set(&skb->users, 0);
2483 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
2484 		return;
2485 	}
2486 	get_kfree_skb_cb(skb)->reason = reason;
2487 	local_irq_save(flags);
2488 	skb->next = __this_cpu_read(softnet_data.completion_queue);
2489 	__this_cpu_write(softnet_data.completion_queue, skb);
2490 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
2491 	local_irq_restore(flags);
2492 }
2493 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2494 
2495 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2496 {
2497 	if (in_irq() || irqs_disabled())
2498 		__dev_kfree_skb_irq(skb, reason);
2499 	else
2500 		dev_kfree_skb(skb);
2501 }
2502 EXPORT_SYMBOL(__dev_kfree_skb_any);
2503 
2504 
2505 /**
2506  * netif_device_detach - mark device as removed
2507  * @dev: network device
2508  *
2509  * Mark device as removed from system and therefore no longer available.
2510  */
2511 void netif_device_detach(struct net_device *dev)
2512 {
2513 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2514 	    netif_running(dev)) {
2515 		netif_tx_stop_all_queues(dev);
2516 	}
2517 }
2518 EXPORT_SYMBOL(netif_device_detach);
2519 
2520 /**
2521  * netif_device_attach - mark device as attached
2522  * @dev: network device
2523  *
2524  * Mark device as attached from system and restart if needed.
2525  */
2526 void netif_device_attach(struct net_device *dev)
2527 {
2528 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2529 	    netif_running(dev)) {
2530 		netif_tx_wake_all_queues(dev);
2531 		__netdev_watchdog_up(dev);
2532 	}
2533 }
2534 EXPORT_SYMBOL(netif_device_attach);
2535 
2536 /*
2537  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2538  * to be used as a distribution range.
2539  */
2540 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2541 		  unsigned int num_tx_queues)
2542 {
2543 	u32 hash;
2544 	u16 qoffset = 0;
2545 	u16 qcount = num_tx_queues;
2546 
2547 	if (skb_rx_queue_recorded(skb)) {
2548 		hash = skb_get_rx_queue(skb);
2549 		while (unlikely(hash >= num_tx_queues))
2550 			hash -= num_tx_queues;
2551 		return hash;
2552 	}
2553 
2554 	if (dev->num_tc) {
2555 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2556 
2557 		qoffset = dev->tc_to_txq[tc].offset;
2558 		qcount = dev->tc_to_txq[tc].count;
2559 	}
2560 
2561 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2562 }
2563 EXPORT_SYMBOL(__skb_tx_hash);
2564 
2565 static void skb_warn_bad_offload(const struct sk_buff *skb)
2566 {
2567 	static const netdev_features_t null_features;
2568 	struct net_device *dev = skb->dev;
2569 	const char *name = "";
2570 
2571 	if (!net_ratelimit())
2572 		return;
2573 
2574 	if (dev) {
2575 		if (dev->dev.parent)
2576 			name = dev_driver_string(dev->dev.parent);
2577 		else
2578 			name = netdev_name(dev);
2579 	}
2580 	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2581 	     "gso_type=%d ip_summed=%d\n",
2582 	     name, dev ? &dev->features : &null_features,
2583 	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
2584 	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2585 	     skb_shinfo(skb)->gso_type, skb->ip_summed);
2586 }
2587 
2588 /*
2589  * Invalidate hardware checksum when packet is to be mangled, and
2590  * complete checksum manually on outgoing path.
2591  */
2592 int skb_checksum_help(struct sk_buff *skb)
2593 {
2594 	__wsum csum;
2595 	int ret = 0, offset;
2596 
2597 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2598 		goto out_set_summed;
2599 
2600 	if (unlikely(skb_shinfo(skb)->gso_size)) {
2601 		skb_warn_bad_offload(skb);
2602 		return -EINVAL;
2603 	}
2604 
2605 	/* Before computing a checksum, we should make sure no frag could
2606 	 * be modified by an external entity : checksum could be wrong.
2607 	 */
2608 	if (skb_has_shared_frag(skb)) {
2609 		ret = __skb_linearize(skb);
2610 		if (ret)
2611 			goto out;
2612 	}
2613 
2614 	offset = skb_checksum_start_offset(skb);
2615 	BUG_ON(offset >= skb_headlen(skb));
2616 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
2617 
2618 	offset += skb->csum_offset;
2619 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2620 
2621 	if (skb_cloned(skb) &&
2622 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2623 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2624 		if (ret)
2625 			goto out;
2626 	}
2627 
2628 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2629 out_set_summed:
2630 	skb->ip_summed = CHECKSUM_NONE;
2631 out:
2632 	return ret;
2633 }
2634 EXPORT_SYMBOL(skb_checksum_help);
2635 
2636 int skb_crc32c_csum_help(struct sk_buff *skb)
2637 {
2638 	__le32 crc32c_csum;
2639 	int ret = 0, offset, start;
2640 
2641 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2642 		goto out;
2643 
2644 	if (unlikely(skb_is_gso(skb)))
2645 		goto out;
2646 
2647 	/* Before computing a checksum, we should make sure no frag could
2648 	 * be modified by an external entity : checksum could be wrong.
2649 	 */
2650 	if (unlikely(skb_has_shared_frag(skb))) {
2651 		ret = __skb_linearize(skb);
2652 		if (ret)
2653 			goto out;
2654 	}
2655 	start = skb_checksum_start_offset(skb);
2656 	offset = start + offsetof(struct sctphdr, checksum);
2657 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2658 		ret = -EINVAL;
2659 		goto out;
2660 	}
2661 	if (skb_cloned(skb) &&
2662 	    !skb_clone_writable(skb, offset + sizeof(__le32))) {
2663 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2664 		if (ret)
2665 			goto out;
2666 	}
2667 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2668 						  skb->len - start, ~(__u32)0,
2669 						  crc32c_csum_stub));
2670 	*(__le32 *)(skb->data + offset) = crc32c_csum;
2671 	skb->ip_summed = CHECKSUM_NONE;
2672 	skb->csum_not_inet = 0;
2673 out:
2674 	return ret;
2675 }
2676 
2677 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2678 {
2679 	__be16 type = skb->protocol;
2680 
2681 	/* Tunnel gso handlers can set protocol to ethernet. */
2682 	if (type == htons(ETH_P_TEB)) {
2683 		struct ethhdr *eth;
2684 
2685 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2686 			return 0;
2687 
2688 		eth = (struct ethhdr *)skb_mac_header(skb);
2689 		type = eth->h_proto;
2690 	}
2691 
2692 	return __vlan_get_protocol(skb, type, depth);
2693 }
2694 
2695 /**
2696  *	skb_mac_gso_segment - mac layer segmentation handler.
2697  *	@skb: buffer to segment
2698  *	@features: features for the output path (see dev->features)
2699  */
2700 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2701 				    netdev_features_t features)
2702 {
2703 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2704 	struct packet_offload *ptype;
2705 	int vlan_depth = skb->mac_len;
2706 	__be16 type = skb_network_protocol(skb, &vlan_depth);
2707 
2708 	if (unlikely(!type))
2709 		return ERR_PTR(-EINVAL);
2710 
2711 	__skb_pull(skb, vlan_depth);
2712 
2713 	rcu_read_lock();
2714 	list_for_each_entry_rcu(ptype, &offload_base, list) {
2715 		if (ptype->type == type && ptype->callbacks.gso_segment) {
2716 			segs = ptype->callbacks.gso_segment(skb, features);
2717 			break;
2718 		}
2719 	}
2720 	rcu_read_unlock();
2721 
2722 	__skb_push(skb, skb->data - skb_mac_header(skb));
2723 
2724 	return segs;
2725 }
2726 EXPORT_SYMBOL(skb_mac_gso_segment);
2727 
2728 
2729 /* openvswitch calls this on rx path, so we need a different check.
2730  */
2731 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2732 {
2733 	if (tx_path)
2734 		return skb->ip_summed != CHECKSUM_PARTIAL;
2735 
2736 	return skb->ip_summed == CHECKSUM_NONE;
2737 }
2738 
2739 /**
2740  *	__skb_gso_segment - Perform segmentation on skb.
2741  *	@skb: buffer to segment
2742  *	@features: features for the output path (see dev->features)
2743  *	@tx_path: whether it is called in TX path
2744  *
2745  *	This function segments the given skb and returns a list of segments.
2746  *
2747  *	It may return NULL if the skb requires no segmentation.  This is
2748  *	only possible when GSO is used for verifying header integrity.
2749  *
2750  *	Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2751  */
2752 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2753 				  netdev_features_t features, bool tx_path)
2754 {
2755 	struct sk_buff *segs;
2756 
2757 	if (unlikely(skb_needs_check(skb, tx_path))) {
2758 		int err;
2759 
2760 		/* We're going to init ->check field in TCP or UDP header */
2761 		err = skb_cow_head(skb, 0);
2762 		if (err < 0)
2763 			return ERR_PTR(err);
2764 	}
2765 
2766 	/* Only report GSO partial support if it will enable us to
2767 	 * support segmentation on this frame without needing additional
2768 	 * work.
2769 	 */
2770 	if (features & NETIF_F_GSO_PARTIAL) {
2771 		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2772 		struct net_device *dev = skb->dev;
2773 
2774 		partial_features |= dev->features & dev->gso_partial_features;
2775 		if (!skb_gso_ok(skb, features | partial_features))
2776 			features &= ~NETIF_F_GSO_PARTIAL;
2777 	}
2778 
2779 	BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2780 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2781 
2782 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2783 	SKB_GSO_CB(skb)->encap_level = 0;
2784 
2785 	skb_reset_mac_header(skb);
2786 	skb_reset_mac_len(skb);
2787 
2788 	segs = skb_mac_gso_segment(skb, features);
2789 
2790 	if (unlikely(skb_needs_check(skb, tx_path)))
2791 		skb_warn_bad_offload(skb);
2792 
2793 	return segs;
2794 }
2795 EXPORT_SYMBOL(__skb_gso_segment);
2796 
2797 /* Take action when hardware reception checksum errors are detected. */
2798 #ifdef CONFIG_BUG
2799 void netdev_rx_csum_fault(struct net_device *dev)
2800 {
2801 	if (net_ratelimit()) {
2802 		pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2803 		dump_stack();
2804 	}
2805 }
2806 EXPORT_SYMBOL(netdev_rx_csum_fault);
2807 #endif
2808 
2809 /* Actually, we should eliminate this check as soon as we know, that:
2810  * 1. IOMMU is present and allows to map all the memory.
2811  * 2. No high memory really exists on this machine.
2812  */
2813 
2814 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2815 {
2816 #ifdef CONFIG_HIGHMEM
2817 	int i;
2818 
2819 	if (!(dev->features & NETIF_F_HIGHDMA)) {
2820 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2821 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2822 
2823 			if (PageHighMem(skb_frag_page(frag)))
2824 				return 1;
2825 		}
2826 	}
2827 
2828 	if (PCI_DMA_BUS_IS_PHYS) {
2829 		struct device *pdev = dev->dev.parent;
2830 
2831 		if (!pdev)
2832 			return 0;
2833 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2834 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2835 			dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2836 
2837 			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2838 				return 1;
2839 		}
2840 	}
2841 #endif
2842 	return 0;
2843 }
2844 
2845 /* If MPLS offload request, verify we are testing hardware MPLS features
2846  * instead of standard features for the netdev.
2847  */
2848 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2849 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2850 					   netdev_features_t features,
2851 					   __be16 type)
2852 {
2853 	if (eth_p_mpls(type))
2854 		features &= skb->dev->mpls_features;
2855 
2856 	return features;
2857 }
2858 #else
2859 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2860 					   netdev_features_t features,
2861 					   __be16 type)
2862 {
2863 	return features;
2864 }
2865 #endif
2866 
2867 static netdev_features_t harmonize_features(struct sk_buff *skb,
2868 	netdev_features_t features)
2869 {
2870 	int tmp;
2871 	__be16 type;
2872 
2873 	type = skb_network_protocol(skb, &tmp);
2874 	features = net_mpls_features(skb, features, type);
2875 
2876 	if (skb->ip_summed != CHECKSUM_NONE &&
2877 	    !can_checksum_protocol(features, type)) {
2878 		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2879 	}
2880 	if (illegal_highdma(skb->dev, skb))
2881 		features &= ~NETIF_F_SG;
2882 
2883 	return features;
2884 }
2885 
2886 netdev_features_t passthru_features_check(struct sk_buff *skb,
2887 					  struct net_device *dev,
2888 					  netdev_features_t features)
2889 {
2890 	return features;
2891 }
2892 EXPORT_SYMBOL(passthru_features_check);
2893 
2894 static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2895 					     struct net_device *dev,
2896 					     netdev_features_t features)
2897 {
2898 	return vlan_features_check(skb, features);
2899 }
2900 
2901 static netdev_features_t gso_features_check(const struct sk_buff *skb,
2902 					    struct net_device *dev,
2903 					    netdev_features_t features)
2904 {
2905 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
2906 
2907 	if (gso_segs > dev->gso_max_segs)
2908 		return features & ~NETIF_F_GSO_MASK;
2909 
2910 	/* Support for GSO partial features requires software
2911 	 * intervention before we can actually process the packets
2912 	 * so we need to strip support for any partial features now
2913 	 * and we can pull them back in after we have partially
2914 	 * segmented the frame.
2915 	 */
2916 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2917 		features &= ~dev->gso_partial_features;
2918 
2919 	/* Make sure to clear the IPv4 ID mangling feature if the
2920 	 * IPv4 header has the potential to be fragmented.
2921 	 */
2922 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2923 		struct iphdr *iph = skb->encapsulation ?
2924 				    inner_ip_hdr(skb) : ip_hdr(skb);
2925 
2926 		if (!(iph->frag_off & htons(IP_DF)))
2927 			features &= ~NETIF_F_TSO_MANGLEID;
2928 	}
2929 
2930 	return features;
2931 }
2932 
2933 netdev_features_t netif_skb_features(struct sk_buff *skb)
2934 {
2935 	struct net_device *dev = skb->dev;
2936 	netdev_features_t features = dev->features;
2937 
2938 	if (skb_is_gso(skb))
2939 		features = gso_features_check(skb, dev, features);
2940 
2941 	/* If encapsulation offload request, verify we are testing
2942 	 * hardware encapsulation features instead of standard
2943 	 * features for the netdev
2944 	 */
2945 	if (skb->encapsulation)
2946 		features &= dev->hw_enc_features;
2947 
2948 	if (skb_vlan_tagged(skb))
2949 		features = netdev_intersect_features(features,
2950 						     dev->vlan_features |
2951 						     NETIF_F_HW_VLAN_CTAG_TX |
2952 						     NETIF_F_HW_VLAN_STAG_TX);
2953 
2954 	if (dev->netdev_ops->ndo_features_check)
2955 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
2956 								features);
2957 	else
2958 		features &= dflt_features_check(skb, dev, features);
2959 
2960 	return harmonize_features(skb, features);
2961 }
2962 EXPORT_SYMBOL(netif_skb_features);
2963 
2964 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2965 		    struct netdev_queue *txq, bool more)
2966 {
2967 	unsigned int len;
2968 	int rc;
2969 
2970 	if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2971 		dev_queue_xmit_nit(skb, dev);
2972 
2973 	len = skb->len;
2974 	trace_net_dev_start_xmit(skb, dev);
2975 	rc = netdev_start_xmit(skb, dev, txq, more);
2976 	trace_net_dev_xmit(skb, rc, dev, len);
2977 
2978 	return rc;
2979 }
2980 
2981 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2982 				    struct netdev_queue *txq, int *ret)
2983 {
2984 	struct sk_buff *skb = first;
2985 	int rc = NETDEV_TX_OK;
2986 
2987 	while (skb) {
2988 		struct sk_buff *next = skb->next;
2989 
2990 		skb->next = NULL;
2991 		rc = xmit_one(skb, dev, txq, next != NULL);
2992 		if (unlikely(!dev_xmit_complete(rc))) {
2993 			skb->next = next;
2994 			goto out;
2995 		}
2996 
2997 		skb = next;
2998 		if (netif_xmit_stopped(txq) && skb) {
2999 			rc = NETDEV_TX_BUSY;
3000 			break;
3001 		}
3002 	}
3003 
3004 out:
3005 	*ret = rc;
3006 	return skb;
3007 }
3008 
3009 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3010 					  netdev_features_t features)
3011 {
3012 	if (skb_vlan_tag_present(skb) &&
3013 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3014 		skb = __vlan_hwaccel_push_inside(skb);
3015 	return skb;
3016 }
3017 
3018 int skb_csum_hwoffload_help(struct sk_buff *skb,
3019 			    const netdev_features_t features)
3020 {
3021 	if (unlikely(skb->csum_not_inet))
3022 		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3023 			skb_crc32c_csum_help(skb);
3024 
3025 	return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3026 }
3027 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3028 
3029 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
3030 {
3031 	netdev_features_t features;
3032 
3033 	features = netif_skb_features(skb);
3034 	skb = validate_xmit_vlan(skb, features);
3035 	if (unlikely(!skb))
3036 		goto out_null;
3037 
3038 	if (netif_needs_gso(skb, features)) {
3039 		struct sk_buff *segs;
3040 
3041 		segs = skb_gso_segment(skb, features);
3042 		if (IS_ERR(segs)) {
3043 			goto out_kfree_skb;
3044 		} else if (segs) {
3045 			consume_skb(skb);
3046 			skb = segs;
3047 		}
3048 	} else {
3049 		if (skb_needs_linearize(skb, features) &&
3050 		    __skb_linearize(skb))
3051 			goto out_kfree_skb;
3052 
3053 		if (validate_xmit_xfrm(skb, features))
3054 			goto out_kfree_skb;
3055 
3056 		/* If packet is not checksummed and device does not
3057 		 * support checksumming for this protocol, complete
3058 		 * checksumming here.
3059 		 */
3060 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3061 			if (skb->encapsulation)
3062 				skb_set_inner_transport_header(skb,
3063 							       skb_checksum_start_offset(skb));
3064 			else
3065 				skb_set_transport_header(skb,
3066 							 skb_checksum_start_offset(skb));
3067 			if (skb_csum_hwoffload_help(skb, features))
3068 				goto out_kfree_skb;
3069 		}
3070 	}
3071 
3072 	return skb;
3073 
3074 out_kfree_skb:
3075 	kfree_skb(skb);
3076 out_null:
3077 	atomic_long_inc(&dev->tx_dropped);
3078 	return NULL;
3079 }
3080 
3081 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3082 {
3083 	struct sk_buff *next, *head = NULL, *tail;
3084 
3085 	for (; skb != NULL; skb = next) {
3086 		next = skb->next;
3087 		skb->next = NULL;
3088 
3089 		/* in case skb wont be segmented, point to itself */
3090 		skb->prev = skb;
3091 
3092 		skb = validate_xmit_skb(skb, dev);
3093 		if (!skb)
3094 			continue;
3095 
3096 		if (!head)
3097 			head = skb;
3098 		else
3099 			tail->next = skb;
3100 		/* If skb was segmented, skb->prev points to
3101 		 * the last segment. If not, it still contains skb.
3102 		 */
3103 		tail = skb->prev;
3104 	}
3105 	return head;
3106 }
3107 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3108 
3109 static void qdisc_pkt_len_init(struct sk_buff *skb)
3110 {
3111 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3112 
3113 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3114 
3115 	/* To get more precise estimation of bytes sent on wire,
3116 	 * we add to pkt_len the headers size of all segments
3117 	 */
3118 	if (shinfo->gso_size)  {
3119 		unsigned int hdr_len;
3120 		u16 gso_segs = shinfo->gso_segs;
3121 
3122 		/* mac layer + network layer */
3123 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3124 
3125 		/* + transport layer */
3126 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3127 			hdr_len += tcp_hdrlen(skb);
3128 		else
3129 			hdr_len += sizeof(struct udphdr);
3130 
3131 		if (shinfo->gso_type & SKB_GSO_DODGY)
3132 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3133 						shinfo->gso_size);
3134 
3135 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3136 	}
3137 }
3138 
3139 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3140 				 struct net_device *dev,
3141 				 struct netdev_queue *txq)
3142 {
3143 	spinlock_t *root_lock = qdisc_lock(q);
3144 	struct sk_buff *to_free = NULL;
3145 	bool contended;
3146 	int rc;
3147 
3148 	qdisc_calculate_pkt_len(skb, q);
3149 	/*
3150 	 * Heuristic to force contended enqueues to serialize on a
3151 	 * separate lock before trying to get qdisc main lock.
3152 	 * This permits qdisc->running owner to get the lock more
3153 	 * often and dequeue packets faster.
3154 	 */
3155 	contended = qdisc_is_running(q);
3156 	if (unlikely(contended))
3157 		spin_lock(&q->busylock);
3158 
3159 	spin_lock(root_lock);
3160 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3161 		__qdisc_drop(skb, &to_free);
3162 		rc = NET_XMIT_DROP;
3163 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3164 		   qdisc_run_begin(q)) {
3165 		/*
3166 		 * This is a work-conserving queue; there are no old skbs
3167 		 * waiting to be sent out; and the qdisc is not running -
3168 		 * xmit the skb directly.
3169 		 */
3170 
3171 		qdisc_bstats_update(q, skb);
3172 
3173 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3174 			if (unlikely(contended)) {
3175 				spin_unlock(&q->busylock);
3176 				contended = false;
3177 			}
3178 			__qdisc_run(q);
3179 		} else
3180 			qdisc_run_end(q);
3181 
3182 		rc = NET_XMIT_SUCCESS;
3183 	} else {
3184 		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3185 		if (qdisc_run_begin(q)) {
3186 			if (unlikely(contended)) {
3187 				spin_unlock(&q->busylock);
3188 				contended = false;
3189 			}
3190 			__qdisc_run(q);
3191 		}
3192 	}
3193 	spin_unlock(root_lock);
3194 	if (unlikely(to_free))
3195 		kfree_skb_list(to_free);
3196 	if (unlikely(contended))
3197 		spin_unlock(&q->busylock);
3198 	return rc;
3199 }
3200 
3201 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3202 static void skb_update_prio(struct sk_buff *skb)
3203 {
3204 	struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
3205 
3206 	if (!skb->priority && skb->sk && map) {
3207 		unsigned int prioidx =
3208 			sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
3209 
3210 		if (prioidx < map->priomap_len)
3211 			skb->priority = map->priomap[prioidx];
3212 	}
3213 }
3214 #else
3215 #define skb_update_prio(skb)
3216 #endif
3217 
3218 DEFINE_PER_CPU(int, xmit_recursion);
3219 EXPORT_SYMBOL(xmit_recursion);
3220 
3221 /**
3222  *	dev_loopback_xmit - loop back @skb
3223  *	@net: network namespace this loopback is happening in
3224  *	@sk:  sk needed to be a netfilter okfn
3225  *	@skb: buffer to transmit
3226  */
3227 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3228 {
3229 	skb_reset_mac_header(skb);
3230 	__skb_pull(skb, skb_network_offset(skb));
3231 	skb->pkt_type = PACKET_LOOPBACK;
3232 	skb->ip_summed = CHECKSUM_UNNECESSARY;
3233 	WARN_ON(!skb_dst(skb));
3234 	skb_dst_force(skb);
3235 	netif_rx_ni(skb);
3236 	return 0;
3237 }
3238 EXPORT_SYMBOL(dev_loopback_xmit);
3239 
3240 #ifdef CONFIG_NET_EGRESS
3241 static struct sk_buff *
3242 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3243 {
3244 	struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3245 	struct tcf_result cl_res;
3246 
3247 	if (!cl)
3248 		return skb;
3249 
3250 	/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3251 	qdisc_bstats_cpu_update(cl->q, skb);
3252 
3253 	switch (tcf_classify(skb, cl, &cl_res, false)) {
3254 	case TC_ACT_OK:
3255 	case TC_ACT_RECLASSIFY:
3256 		skb->tc_index = TC_H_MIN(cl_res.classid);
3257 		break;
3258 	case TC_ACT_SHOT:
3259 		qdisc_qstats_cpu_drop(cl->q);
3260 		*ret = NET_XMIT_DROP;
3261 		kfree_skb(skb);
3262 		return NULL;
3263 	case TC_ACT_STOLEN:
3264 	case TC_ACT_QUEUED:
3265 	case TC_ACT_TRAP:
3266 		*ret = NET_XMIT_SUCCESS;
3267 		consume_skb(skb);
3268 		return NULL;
3269 	case TC_ACT_REDIRECT:
3270 		/* No need to push/pop skb's mac_header here on egress! */
3271 		skb_do_redirect(skb);
3272 		*ret = NET_XMIT_SUCCESS;
3273 		return NULL;
3274 	default:
3275 		break;
3276 	}
3277 
3278 	return skb;
3279 }
3280 #endif /* CONFIG_NET_EGRESS */
3281 
3282 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3283 {
3284 #ifdef CONFIG_XPS
3285 	struct xps_dev_maps *dev_maps;
3286 	struct xps_map *map;
3287 	int queue_index = -1;
3288 
3289 	rcu_read_lock();
3290 	dev_maps = rcu_dereference(dev->xps_maps);
3291 	if (dev_maps) {
3292 		unsigned int tci = skb->sender_cpu - 1;
3293 
3294 		if (dev->num_tc) {
3295 			tci *= dev->num_tc;
3296 			tci += netdev_get_prio_tc_map(dev, skb->priority);
3297 		}
3298 
3299 		map = rcu_dereference(dev_maps->cpu_map[tci]);
3300 		if (map) {
3301 			if (map->len == 1)
3302 				queue_index = map->queues[0];
3303 			else
3304 				queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3305 									   map->len)];
3306 			if (unlikely(queue_index >= dev->real_num_tx_queues))
3307 				queue_index = -1;
3308 		}
3309 	}
3310 	rcu_read_unlock();
3311 
3312 	return queue_index;
3313 #else
3314 	return -1;
3315 #endif
3316 }
3317 
3318 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3319 {
3320 	struct sock *sk = skb->sk;
3321 	int queue_index = sk_tx_queue_get(sk);
3322 
3323 	if (queue_index < 0 || skb->ooo_okay ||
3324 	    queue_index >= dev->real_num_tx_queues) {
3325 		int new_index = get_xps_queue(dev, skb);
3326 
3327 		if (new_index < 0)
3328 			new_index = skb_tx_hash(dev, skb);
3329 
3330 		if (queue_index != new_index && sk &&
3331 		    sk_fullsock(sk) &&
3332 		    rcu_access_pointer(sk->sk_dst_cache))
3333 			sk_tx_queue_set(sk, new_index);
3334 
3335 		queue_index = new_index;
3336 	}
3337 
3338 	return queue_index;
3339 }
3340 
3341 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3342 				    struct sk_buff *skb,
3343 				    void *accel_priv)
3344 {
3345 	int queue_index = 0;
3346 
3347 #ifdef CONFIG_XPS
3348 	u32 sender_cpu = skb->sender_cpu - 1;
3349 
3350 	if (sender_cpu >= (u32)NR_CPUS)
3351 		skb->sender_cpu = raw_smp_processor_id() + 1;
3352 #endif
3353 
3354 	if (dev->real_num_tx_queues != 1) {
3355 		const struct net_device_ops *ops = dev->netdev_ops;
3356 
3357 		if (ops->ndo_select_queue)
3358 			queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3359 							    __netdev_pick_tx);
3360 		else
3361 			queue_index = __netdev_pick_tx(dev, skb);
3362 
3363 		if (!accel_priv)
3364 			queue_index = netdev_cap_txqueue(dev, queue_index);
3365 	}
3366 
3367 	skb_set_queue_mapping(skb, queue_index);
3368 	return netdev_get_tx_queue(dev, queue_index);
3369 }
3370 
3371 /**
3372  *	__dev_queue_xmit - transmit a buffer
3373  *	@skb: buffer to transmit
3374  *	@accel_priv: private data used for L2 forwarding offload
3375  *
3376  *	Queue a buffer for transmission to a network device. The caller must
3377  *	have set the device and priority and built the buffer before calling
3378  *	this function. The function can be called from an interrupt.
3379  *
3380  *	A negative errno code is returned on a failure. A success does not
3381  *	guarantee the frame will be transmitted as it may be dropped due
3382  *	to congestion or traffic shaping.
3383  *
3384  * -----------------------------------------------------------------------------------
3385  *      I notice this method can also return errors from the queue disciplines,
3386  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
3387  *      be positive.
3388  *
3389  *      Regardless of the return value, the skb is consumed, so it is currently
3390  *      difficult to retry a send to this method.  (You can bump the ref count
3391  *      before sending to hold a reference for retry if you are careful.)
3392  *
3393  *      When calling this method, interrupts MUST be enabled.  This is because
3394  *      the BH enable code must have IRQs enabled so that it will not deadlock.
3395  *          --BLG
3396  */
3397 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
3398 {
3399 	struct net_device *dev = skb->dev;
3400 	struct netdev_queue *txq;
3401 	struct Qdisc *q;
3402 	int rc = -ENOMEM;
3403 
3404 	skb_reset_mac_header(skb);
3405 
3406 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3407 		__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3408 
3409 	/* Disable soft irqs for various locks below. Also
3410 	 * stops preemption for RCU.
3411 	 */
3412 	rcu_read_lock_bh();
3413 
3414 	skb_update_prio(skb);
3415 
3416 	qdisc_pkt_len_init(skb);
3417 #ifdef CONFIG_NET_CLS_ACT
3418 	skb->tc_at_ingress = 0;
3419 # ifdef CONFIG_NET_EGRESS
3420 	if (static_key_false(&egress_needed)) {
3421 		skb = sch_handle_egress(skb, &rc, dev);
3422 		if (!skb)
3423 			goto out;
3424 	}
3425 # endif
3426 #endif
3427 	/* If device/qdisc don't need skb->dst, release it right now while
3428 	 * its hot in this cpu cache.
3429 	 */
3430 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3431 		skb_dst_drop(skb);
3432 	else
3433 		skb_dst_force(skb);
3434 
3435 	txq = netdev_pick_tx(dev, skb, accel_priv);
3436 	q = rcu_dereference_bh(txq->qdisc);
3437 
3438 	trace_net_dev_queue(skb);
3439 	if (q->enqueue) {
3440 		rc = __dev_xmit_skb(skb, q, dev, txq);
3441 		goto out;
3442 	}
3443 
3444 	/* The device has no queue. Common case for software devices:
3445 	 * loopback, all the sorts of tunnels...
3446 
3447 	 * Really, it is unlikely that netif_tx_lock protection is necessary
3448 	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
3449 	 * counters.)
3450 	 * However, it is possible, that they rely on protection
3451 	 * made by us here.
3452 
3453 	 * Check this and shot the lock. It is not prone from deadlocks.
3454 	 *Either shot noqueue qdisc, it is even simpler 8)
3455 	 */
3456 	if (dev->flags & IFF_UP) {
3457 		int cpu = smp_processor_id(); /* ok because BHs are off */
3458 
3459 		if (txq->xmit_lock_owner != cpu) {
3460 			if (unlikely(__this_cpu_read(xmit_recursion) >
3461 				     XMIT_RECURSION_LIMIT))
3462 				goto recursion_alert;
3463 
3464 			skb = validate_xmit_skb(skb, dev);
3465 			if (!skb)
3466 				goto out;
3467 
3468 			HARD_TX_LOCK(dev, txq, cpu);
3469 
3470 			if (!netif_xmit_stopped(txq)) {
3471 				__this_cpu_inc(xmit_recursion);
3472 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3473 				__this_cpu_dec(xmit_recursion);
3474 				if (dev_xmit_complete(rc)) {
3475 					HARD_TX_UNLOCK(dev, txq);
3476 					goto out;
3477 				}
3478 			}
3479 			HARD_TX_UNLOCK(dev, txq);
3480 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3481 					     dev->name);
3482 		} else {
3483 			/* Recursion is detected! It is possible,
3484 			 * unfortunately
3485 			 */
3486 recursion_alert:
3487 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3488 					     dev->name);
3489 		}
3490 	}
3491 
3492 	rc = -ENETDOWN;
3493 	rcu_read_unlock_bh();
3494 
3495 	atomic_long_inc(&dev->tx_dropped);
3496 	kfree_skb_list(skb);
3497 	return rc;
3498 out:
3499 	rcu_read_unlock_bh();
3500 	return rc;
3501 }
3502 
3503 int dev_queue_xmit(struct sk_buff *skb)
3504 {
3505 	return __dev_queue_xmit(skb, NULL);
3506 }
3507 EXPORT_SYMBOL(dev_queue_xmit);
3508 
3509 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3510 {
3511 	return __dev_queue_xmit(skb, accel_priv);
3512 }
3513 EXPORT_SYMBOL(dev_queue_xmit_accel);
3514 
3515 
3516 /*************************************************************************
3517  *			Receiver routines
3518  *************************************************************************/
3519 
3520 int netdev_max_backlog __read_mostly = 1000;
3521 EXPORT_SYMBOL(netdev_max_backlog);
3522 
3523 int netdev_tstamp_prequeue __read_mostly = 1;
3524 int netdev_budget __read_mostly = 300;
3525 unsigned int __read_mostly netdev_budget_usecs = 2000;
3526 int weight_p __read_mostly = 64;           /* old backlog weight */
3527 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
3528 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
3529 int dev_rx_weight __read_mostly = 64;
3530 int dev_tx_weight __read_mostly = 64;
3531 
3532 /* Called with irq disabled */
3533 static inline void ____napi_schedule(struct softnet_data *sd,
3534 				     struct napi_struct *napi)
3535 {
3536 	list_add_tail(&napi->poll_list, &sd->poll_list);
3537 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
3538 }
3539 
3540 #ifdef CONFIG_RPS
3541 
3542 /* One global table that all flow-based protocols share. */
3543 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3544 EXPORT_SYMBOL(rps_sock_flow_table);
3545 u32 rps_cpu_mask __read_mostly;
3546 EXPORT_SYMBOL(rps_cpu_mask);
3547 
3548 struct static_key rps_needed __read_mostly;
3549 EXPORT_SYMBOL(rps_needed);
3550 struct static_key rfs_needed __read_mostly;
3551 EXPORT_SYMBOL(rfs_needed);
3552 
3553 static struct rps_dev_flow *
3554 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3555 	    struct rps_dev_flow *rflow, u16 next_cpu)
3556 {
3557 	if (next_cpu < nr_cpu_ids) {
3558 #ifdef CONFIG_RFS_ACCEL
3559 		struct netdev_rx_queue *rxqueue;
3560 		struct rps_dev_flow_table *flow_table;
3561 		struct rps_dev_flow *old_rflow;
3562 		u32 flow_id;
3563 		u16 rxq_index;
3564 		int rc;
3565 
3566 		/* Should we steer this flow to a different hardware queue? */
3567 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3568 		    !(dev->features & NETIF_F_NTUPLE))
3569 			goto out;
3570 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3571 		if (rxq_index == skb_get_rx_queue(skb))
3572 			goto out;
3573 
3574 		rxqueue = dev->_rx + rxq_index;
3575 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
3576 		if (!flow_table)
3577 			goto out;
3578 		flow_id = skb_get_hash(skb) & flow_table->mask;
3579 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3580 							rxq_index, flow_id);
3581 		if (rc < 0)
3582 			goto out;
3583 		old_rflow = rflow;
3584 		rflow = &flow_table->flows[flow_id];
3585 		rflow->filter = rc;
3586 		if (old_rflow->filter == rflow->filter)
3587 			old_rflow->filter = RPS_NO_FILTER;
3588 	out:
3589 #endif
3590 		rflow->last_qtail =
3591 			per_cpu(softnet_data, next_cpu).input_queue_head;
3592 	}
3593 
3594 	rflow->cpu = next_cpu;
3595 	return rflow;
3596 }
3597 
3598 /*
3599  * get_rps_cpu is called from netif_receive_skb and returns the target
3600  * CPU from the RPS map of the receiving queue for a given skb.
3601  * rcu_read_lock must be held on entry.
3602  */
3603 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3604 		       struct rps_dev_flow **rflowp)
3605 {
3606 	const struct rps_sock_flow_table *sock_flow_table;
3607 	struct netdev_rx_queue *rxqueue = dev->_rx;
3608 	struct rps_dev_flow_table *flow_table;
3609 	struct rps_map *map;
3610 	int cpu = -1;
3611 	u32 tcpu;
3612 	u32 hash;
3613 
3614 	if (skb_rx_queue_recorded(skb)) {
3615 		u16 index = skb_get_rx_queue(skb);
3616 
3617 		if (unlikely(index >= dev->real_num_rx_queues)) {
3618 			WARN_ONCE(dev->real_num_rx_queues > 1,
3619 				  "%s received packet on queue %u, but number "
3620 				  "of RX queues is %u\n",
3621 				  dev->name, index, dev->real_num_rx_queues);
3622 			goto done;
3623 		}
3624 		rxqueue += index;
3625 	}
3626 
3627 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3628 
3629 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
3630 	map = rcu_dereference(rxqueue->rps_map);
3631 	if (!flow_table && !map)
3632 		goto done;
3633 
3634 	skb_reset_network_header(skb);
3635 	hash = skb_get_hash(skb);
3636 	if (!hash)
3637 		goto done;
3638 
3639 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
3640 	if (flow_table && sock_flow_table) {
3641 		struct rps_dev_flow *rflow;
3642 		u32 next_cpu;
3643 		u32 ident;
3644 
3645 		/* First check into global flow table if there is a match */
3646 		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3647 		if ((ident ^ hash) & ~rps_cpu_mask)
3648 			goto try_rps;
3649 
3650 		next_cpu = ident & rps_cpu_mask;
3651 
3652 		/* OK, now we know there is a match,
3653 		 * we can look at the local (per receive queue) flow table
3654 		 */
3655 		rflow = &flow_table->flows[hash & flow_table->mask];
3656 		tcpu = rflow->cpu;
3657 
3658 		/*
3659 		 * If the desired CPU (where last recvmsg was done) is
3660 		 * different from current CPU (one in the rx-queue flow
3661 		 * table entry), switch if one of the following holds:
3662 		 *   - Current CPU is unset (>= nr_cpu_ids).
3663 		 *   - Current CPU is offline.
3664 		 *   - The current CPU's queue tail has advanced beyond the
3665 		 *     last packet that was enqueued using this table entry.
3666 		 *     This guarantees that all previous packets for the flow
3667 		 *     have been dequeued, thus preserving in order delivery.
3668 		 */
3669 		if (unlikely(tcpu != next_cpu) &&
3670 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3671 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3672 		      rflow->last_qtail)) >= 0)) {
3673 			tcpu = next_cpu;
3674 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3675 		}
3676 
3677 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3678 			*rflowp = rflow;
3679 			cpu = tcpu;
3680 			goto done;
3681 		}
3682 	}
3683 
3684 try_rps:
3685 
3686 	if (map) {
3687 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3688 		if (cpu_online(tcpu)) {
3689 			cpu = tcpu;
3690 			goto done;
3691 		}
3692 	}
3693 
3694 done:
3695 	return cpu;
3696 }
3697 
3698 #ifdef CONFIG_RFS_ACCEL
3699 
3700 /**
3701  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3702  * @dev: Device on which the filter was set
3703  * @rxq_index: RX queue index
3704  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3705  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3706  *
3707  * Drivers that implement ndo_rx_flow_steer() should periodically call
3708  * this function for each installed filter and remove the filters for
3709  * which it returns %true.
3710  */
3711 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3712 			 u32 flow_id, u16 filter_id)
3713 {
3714 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3715 	struct rps_dev_flow_table *flow_table;
3716 	struct rps_dev_flow *rflow;
3717 	bool expire = true;
3718 	unsigned int cpu;
3719 
3720 	rcu_read_lock();
3721 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
3722 	if (flow_table && flow_id <= flow_table->mask) {
3723 		rflow = &flow_table->flows[flow_id];
3724 		cpu = ACCESS_ONCE(rflow->cpu);
3725 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3726 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3727 			   rflow->last_qtail) <
3728 		     (int)(10 * flow_table->mask)))
3729 			expire = false;
3730 	}
3731 	rcu_read_unlock();
3732 	return expire;
3733 }
3734 EXPORT_SYMBOL(rps_may_expire_flow);
3735 
3736 #endif /* CONFIG_RFS_ACCEL */
3737 
3738 /* Called from hardirq (IPI) context */
3739 static void rps_trigger_softirq(void *data)
3740 {
3741 	struct softnet_data *sd = data;
3742 
3743 	____napi_schedule(sd, &sd->backlog);
3744 	sd->received_rps++;
3745 }
3746 
3747 #endif /* CONFIG_RPS */
3748 
3749 /*
3750  * Check if this softnet_data structure is another cpu one
3751  * If yes, queue it to our IPI list and return 1
3752  * If no, return 0
3753  */
3754 static int rps_ipi_queued(struct softnet_data *sd)
3755 {
3756 #ifdef CONFIG_RPS
3757 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3758 
3759 	if (sd != mysd) {
3760 		sd->rps_ipi_next = mysd->rps_ipi_list;
3761 		mysd->rps_ipi_list = sd;
3762 
3763 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
3764 		return 1;
3765 	}
3766 #endif /* CONFIG_RPS */
3767 	return 0;
3768 }
3769 
3770 #ifdef CONFIG_NET_FLOW_LIMIT
3771 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3772 #endif
3773 
3774 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3775 {
3776 #ifdef CONFIG_NET_FLOW_LIMIT
3777 	struct sd_flow_limit *fl;
3778 	struct softnet_data *sd;
3779 	unsigned int old_flow, new_flow;
3780 
3781 	if (qlen < (netdev_max_backlog >> 1))
3782 		return false;
3783 
3784 	sd = this_cpu_ptr(&softnet_data);
3785 
3786 	rcu_read_lock();
3787 	fl = rcu_dereference(sd->flow_limit);
3788 	if (fl) {
3789 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3790 		old_flow = fl->history[fl->history_head];
3791 		fl->history[fl->history_head] = new_flow;
3792 
3793 		fl->history_head++;
3794 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3795 
3796 		if (likely(fl->buckets[old_flow]))
3797 			fl->buckets[old_flow]--;
3798 
3799 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3800 			fl->count++;
3801 			rcu_read_unlock();
3802 			return true;
3803 		}
3804 	}
3805 	rcu_read_unlock();
3806 #endif
3807 	return false;
3808 }
3809 
3810 /*
3811  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3812  * queue (may be a remote CPU queue).
3813  */
3814 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3815 			      unsigned int *qtail)
3816 {
3817 	struct softnet_data *sd;
3818 	unsigned long flags;
3819 	unsigned int qlen;
3820 
3821 	sd = &per_cpu(softnet_data, cpu);
3822 
3823 	local_irq_save(flags);
3824 
3825 	rps_lock(sd);
3826 	if (!netif_running(skb->dev))
3827 		goto drop;
3828 	qlen = skb_queue_len(&sd->input_pkt_queue);
3829 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3830 		if (qlen) {
3831 enqueue:
3832 			__skb_queue_tail(&sd->input_pkt_queue, skb);
3833 			input_queue_tail_incr_save(sd, qtail);
3834 			rps_unlock(sd);
3835 			local_irq_restore(flags);
3836 			return NET_RX_SUCCESS;
3837 		}
3838 
3839 		/* Schedule NAPI for backlog device
3840 		 * We can use non atomic operation since we own the queue lock
3841 		 */
3842 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3843 			if (!rps_ipi_queued(sd))
3844 				____napi_schedule(sd, &sd->backlog);
3845 		}
3846 		goto enqueue;
3847 	}
3848 
3849 drop:
3850 	sd->dropped++;
3851 	rps_unlock(sd);
3852 
3853 	local_irq_restore(flags);
3854 
3855 	atomic_long_inc(&skb->dev->rx_dropped);
3856 	kfree_skb(skb);
3857 	return NET_RX_DROP;
3858 }
3859 
3860 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3861 				     struct bpf_prog *xdp_prog)
3862 {
3863 	struct xdp_buff xdp;
3864 	u32 act = XDP_DROP;
3865 	void *orig_data;
3866 	int hlen, off;
3867 	u32 mac_len;
3868 
3869 	/* Reinjected packets coming from act_mirred or similar should
3870 	 * not get XDP generic processing.
3871 	 */
3872 	if (skb_cloned(skb))
3873 		return XDP_PASS;
3874 
3875 	if (skb_linearize(skb))
3876 		goto do_drop;
3877 
3878 	/* The XDP program wants to see the packet starting at the MAC
3879 	 * header.
3880 	 */
3881 	mac_len = skb->data - skb_mac_header(skb);
3882 	hlen = skb_headlen(skb) + mac_len;
3883 	xdp.data = skb->data - mac_len;
3884 	xdp.data_end = xdp.data + hlen;
3885 	xdp.data_hard_start = skb->data - skb_headroom(skb);
3886 	orig_data = xdp.data;
3887 
3888 	act = bpf_prog_run_xdp(xdp_prog, &xdp);
3889 
3890 	off = xdp.data - orig_data;
3891 	if (off > 0)
3892 		__skb_pull(skb, off);
3893 	else if (off < 0)
3894 		__skb_push(skb, -off);
3895 
3896 	switch (act) {
3897 	case XDP_REDIRECT:
3898 	case XDP_TX:
3899 		__skb_push(skb, mac_len);
3900 		/* fall through */
3901 	case XDP_PASS:
3902 		break;
3903 
3904 	default:
3905 		bpf_warn_invalid_xdp_action(act);
3906 		/* fall through */
3907 	case XDP_ABORTED:
3908 		trace_xdp_exception(skb->dev, xdp_prog, act);
3909 		/* fall through */
3910 	case XDP_DROP:
3911 	do_drop:
3912 		kfree_skb(skb);
3913 		break;
3914 	}
3915 
3916 	return act;
3917 }
3918 
3919 /* When doing generic XDP we have to bypass the qdisc layer and the
3920  * network taps in order to match in-driver-XDP behavior.
3921  */
3922 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
3923 {
3924 	struct net_device *dev = skb->dev;
3925 	struct netdev_queue *txq;
3926 	bool free_skb = true;
3927 	int cpu, rc;
3928 
3929 	txq = netdev_pick_tx(dev, skb, NULL);
3930 	cpu = smp_processor_id();
3931 	HARD_TX_LOCK(dev, txq, cpu);
3932 	if (!netif_xmit_stopped(txq)) {
3933 		rc = netdev_start_xmit(skb, dev, txq, 0);
3934 		if (dev_xmit_complete(rc))
3935 			free_skb = false;
3936 	}
3937 	HARD_TX_UNLOCK(dev, txq);
3938 	if (free_skb) {
3939 		trace_xdp_exception(dev, xdp_prog, XDP_TX);
3940 		kfree_skb(skb);
3941 	}
3942 }
3943 EXPORT_SYMBOL_GPL(generic_xdp_tx);
3944 
3945 static struct static_key generic_xdp_needed __read_mostly;
3946 
3947 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
3948 {
3949 	if (xdp_prog) {
3950 		u32 act = netif_receive_generic_xdp(skb, xdp_prog);
3951 		int err;
3952 
3953 		if (act != XDP_PASS) {
3954 			switch (act) {
3955 			case XDP_REDIRECT:
3956 				err = xdp_do_generic_redirect(skb->dev, skb,
3957 							      xdp_prog);
3958 				if (err)
3959 					goto out_redir;
3960 			/* fallthru to submit skb */
3961 			case XDP_TX:
3962 				generic_xdp_tx(skb, xdp_prog);
3963 				break;
3964 			}
3965 			return XDP_DROP;
3966 		}
3967 	}
3968 	return XDP_PASS;
3969 out_redir:
3970 	kfree_skb(skb);
3971 	return XDP_DROP;
3972 }
3973 EXPORT_SYMBOL_GPL(do_xdp_generic);
3974 
3975 static int netif_rx_internal(struct sk_buff *skb)
3976 {
3977 	int ret;
3978 
3979 	net_timestamp_check(netdev_tstamp_prequeue, skb);
3980 
3981 	trace_netif_rx(skb);
3982 
3983 	if (static_key_false(&generic_xdp_needed)) {
3984 		int ret;
3985 
3986 		preempt_disable();
3987 		rcu_read_lock();
3988 		ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
3989 		rcu_read_unlock();
3990 		preempt_enable();
3991 
3992 		/* Consider XDP consuming the packet a success from
3993 		 * the netdev point of view we do not want to count
3994 		 * this as an error.
3995 		 */
3996 		if (ret != XDP_PASS)
3997 			return NET_RX_SUCCESS;
3998 	}
3999 
4000 #ifdef CONFIG_RPS
4001 	if (static_key_false(&rps_needed)) {
4002 		struct rps_dev_flow voidflow, *rflow = &voidflow;
4003 		int cpu;
4004 
4005 		preempt_disable();
4006 		rcu_read_lock();
4007 
4008 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
4009 		if (cpu < 0)
4010 			cpu = smp_processor_id();
4011 
4012 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4013 
4014 		rcu_read_unlock();
4015 		preempt_enable();
4016 	} else
4017 #endif
4018 	{
4019 		unsigned int qtail;
4020 
4021 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4022 		put_cpu();
4023 	}
4024 	return ret;
4025 }
4026 
4027 /**
4028  *	netif_rx	-	post buffer to the network code
4029  *	@skb: buffer to post
4030  *
4031  *	This function receives a packet from a device driver and queues it for
4032  *	the upper (protocol) levels to process.  It always succeeds. The buffer
4033  *	may be dropped during processing for congestion control or by the
4034  *	protocol layers.
4035  *
4036  *	return values:
4037  *	NET_RX_SUCCESS	(no congestion)
4038  *	NET_RX_DROP     (packet was dropped)
4039  *
4040  */
4041 
4042 int netif_rx(struct sk_buff *skb)
4043 {
4044 	trace_netif_rx_entry(skb);
4045 
4046 	return netif_rx_internal(skb);
4047 }
4048 EXPORT_SYMBOL(netif_rx);
4049 
4050 int netif_rx_ni(struct sk_buff *skb)
4051 {
4052 	int err;
4053 
4054 	trace_netif_rx_ni_entry(skb);
4055 
4056 	preempt_disable();
4057 	err = netif_rx_internal(skb);
4058 	if (local_softirq_pending())
4059 		do_softirq();
4060 	preempt_enable();
4061 
4062 	return err;
4063 }
4064 EXPORT_SYMBOL(netif_rx_ni);
4065 
4066 static __latent_entropy void net_tx_action(struct softirq_action *h)
4067 {
4068 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4069 
4070 	if (sd->completion_queue) {
4071 		struct sk_buff *clist;
4072 
4073 		local_irq_disable();
4074 		clist = sd->completion_queue;
4075 		sd->completion_queue = NULL;
4076 		local_irq_enable();
4077 
4078 		while (clist) {
4079 			struct sk_buff *skb = clist;
4080 
4081 			clist = clist->next;
4082 
4083 			WARN_ON(refcount_read(&skb->users));
4084 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4085 				trace_consume_skb(skb);
4086 			else
4087 				trace_kfree_skb(skb, net_tx_action);
4088 
4089 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4090 				__kfree_skb(skb);
4091 			else
4092 				__kfree_skb_defer(skb);
4093 		}
4094 
4095 		__kfree_skb_flush();
4096 	}
4097 
4098 	if (sd->output_queue) {
4099 		struct Qdisc *head;
4100 
4101 		local_irq_disable();
4102 		head = sd->output_queue;
4103 		sd->output_queue = NULL;
4104 		sd->output_queue_tailp = &sd->output_queue;
4105 		local_irq_enable();
4106 
4107 		while (head) {
4108 			struct Qdisc *q = head;
4109 			spinlock_t *root_lock;
4110 
4111 			head = head->next_sched;
4112 
4113 			root_lock = qdisc_lock(q);
4114 			spin_lock(root_lock);
4115 			/* We need to make sure head->next_sched is read
4116 			 * before clearing __QDISC_STATE_SCHED
4117 			 */
4118 			smp_mb__before_atomic();
4119 			clear_bit(__QDISC_STATE_SCHED, &q->state);
4120 			qdisc_run(q);
4121 			spin_unlock(root_lock);
4122 		}
4123 	}
4124 }
4125 
4126 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4127 /* This hook is defined here for ATM LANE */
4128 int (*br_fdb_test_addr_hook)(struct net_device *dev,
4129 			     unsigned char *addr) __read_mostly;
4130 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4131 #endif
4132 
4133 static inline struct sk_buff *
4134 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4135 		   struct net_device *orig_dev)
4136 {
4137 #ifdef CONFIG_NET_CLS_ACT
4138 	struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
4139 	struct tcf_result cl_res;
4140 
4141 	/* If there's at least one ingress present somewhere (so
4142 	 * we get here via enabled static key), remaining devices
4143 	 * that are not configured with an ingress qdisc will bail
4144 	 * out here.
4145 	 */
4146 	if (!cl)
4147 		return skb;
4148 	if (*pt_prev) {
4149 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
4150 		*pt_prev = NULL;
4151 	}
4152 
4153 	qdisc_skb_cb(skb)->pkt_len = skb->len;
4154 	skb->tc_at_ingress = 1;
4155 	qdisc_bstats_cpu_update(cl->q, skb);
4156 
4157 	switch (tcf_classify(skb, cl, &cl_res, false)) {
4158 	case TC_ACT_OK:
4159 	case TC_ACT_RECLASSIFY:
4160 		skb->tc_index = TC_H_MIN(cl_res.classid);
4161 		break;
4162 	case TC_ACT_SHOT:
4163 		qdisc_qstats_cpu_drop(cl->q);
4164 		kfree_skb(skb);
4165 		return NULL;
4166 	case TC_ACT_STOLEN:
4167 	case TC_ACT_QUEUED:
4168 	case TC_ACT_TRAP:
4169 		consume_skb(skb);
4170 		return NULL;
4171 	case TC_ACT_REDIRECT:
4172 		/* skb_mac_header check was done by cls/act_bpf, so
4173 		 * we can safely push the L2 header back before
4174 		 * redirecting to another netdev
4175 		 */
4176 		__skb_push(skb, skb->mac_len);
4177 		skb_do_redirect(skb);
4178 		return NULL;
4179 	default:
4180 		break;
4181 	}
4182 #endif /* CONFIG_NET_CLS_ACT */
4183 	return skb;
4184 }
4185 
4186 /**
4187  *	netdev_is_rx_handler_busy - check if receive handler is registered
4188  *	@dev: device to check
4189  *
4190  *	Check if a receive handler is already registered for a given device.
4191  *	Return true if there one.
4192  *
4193  *	The caller must hold the rtnl_mutex.
4194  */
4195 bool netdev_is_rx_handler_busy(struct net_device *dev)
4196 {
4197 	ASSERT_RTNL();
4198 	return dev && rtnl_dereference(dev->rx_handler);
4199 }
4200 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4201 
4202 /**
4203  *	netdev_rx_handler_register - register receive handler
4204  *	@dev: device to register a handler for
4205  *	@rx_handler: receive handler to register
4206  *	@rx_handler_data: data pointer that is used by rx handler
4207  *
4208  *	Register a receive handler for a device. This handler will then be
4209  *	called from __netif_receive_skb. A negative errno code is returned
4210  *	on a failure.
4211  *
4212  *	The caller must hold the rtnl_mutex.
4213  *
4214  *	For a general description of rx_handler, see enum rx_handler_result.
4215  */
4216 int netdev_rx_handler_register(struct net_device *dev,
4217 			       rx_handler_func_t *rx_handler,
4218 			       void *rx_handler_data)
4219 {
4220 	if (netdev_is_rx_handler_busy(dev))
4221 		return -EBUSY;
4222 
4223 	/* Note: rx_handler_data must be set before rx_handler */
4224 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
4225 	rcu_assign_pointer(dev->rx_handler, rx_handler);
4226 
4227 	return 0;
4228 }
4229 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4230 
4231 /**
4232  *	netdev_rx_handler_unregister - unregister receive handler
4233  *	@dev: device to unregister a handler from
4234  *
4235  *	Unregister a receive handler from a device.
4236  *
4237  *	The caller must hold the rtnl_mutex.
4238  */
4239 void netdev_rx_handler_unregister(struct net_device *dev)
4240 {
4241 
4242 	ASSERT_RTNL();
4243 	RCU_INIT_POINTER(dev->rx_handler, NULL);
4244 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4245 	 * section has a guarantee to see a non NULL rx_handler_data
4246 	 * as well.
4247 	 */
4248 	synchronize_net();
4249 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
4250 }
4251 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4252 
4253 /*
4254  * Limit the use of PFMEMALLOC reserves to those protocols that implement
4255  * the special handling of PFMEMALLOC skbs.
4256  */
4257 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4258 {
4259 	switch (skb->protocol) {
4260 	case htons(ETH_P_ARP):
4261 	case htons(ETH_P_IP):
4262 	case htons(ETH_P_IPV6):
4263 	case htons(ETH_P_8021Q):
4264 	case htons(ETH_P_8021AD):
4265 		return true;
4266 	default:
4267 		return false;
4268 	}
4269 }
4270 
4271 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4272 			     int *ret, struct net_device *orig_dev)
4273 {
4274 #ifdef CONFIG_NETFILTER_INGRESS
4275 	if (nf_hook_ingress_active(skb)) {
4276 		int ingress_retval;
4277 
4278 		if (*pt_prev) {
4279 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
4280 			*pt_prev = NULL;
4281 		}
4282 
4283 		rcu_read_lock();
4284 		ingress_retval = nf_hook_ingress(skb);
4285 		rcu_read_unlock();
4286 		return ingress_retval;
4287 	}
4288 #endif /* CONFIG_NETFILTER_INGRESS */
4289 	return 0;
4290 }
4291 
4292 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
4293 {
4294 	struct packet_type *ptype, *pt_prev;
4295 	rx_handler_func_t *rx_handler;
4296 	struct net_device *orig_dev;
4297 	bool deliver_exact = false;
4298 	int ret = NET_RX_DROP;
4299 	__be16 type;
4300 
4301 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
4302 
4303 	trace_netif_receive_skb(skb);
4304 
4305 	orig_dev = skb->dev;
4306 
4307 	skb_reset_network_header(skb);
4308 	if (!skb_transport_header_was_set(skb))
4309 		skb_reset_transport_header(skb);
4310 	skb_reset_mac_len(skb);
4311 
4312 	pt_prev = NULL;
4313 
4314 another_round:
4315 	skb->skb_iif = skb->dev->ifindex;
4316 
4317 	__this_cpu_inc(softnet_data.processed);
4318 
4319 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4320 	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4321 		skb = skb_vlan_untag(skb);
4322 		if (unlikely(!skb))
4323 			goto out;
4324 	}
4325 
4326 	if (skb_skip_tc_classify(skb))
4327 		goto skip_classify;
4328 
4329 	if (pfmemalloc)
4330 		goto skip_taps;
4331 
4332 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
4333 		if (pt_prev)
4334 			ret = deliver_skb(skb, pt_prev, orig_dev);
4335 		pt_prev = ptype;
4336 	}
4337 
4338 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4339 		if (pt_prev)
4340 			ret = deliver_skb(skb, pt_prev, orig_dev);
4341 		pt_prev = ptype;
4342 	}
4343 
4344 skip_taps:
4345 #ifdef CONFIG_NET_INGRESS
4346 	if (static_key_false(&ingress_needed)) {
4347 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4348 		if (!skb)
4349 			goto out;
4350 
4351 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
4352 			goto out;
4353 	}
4354 #endif
4355 	skb_reset_tc(skb);
4356 skip_classify:
4357 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4358 		goto drop;
4359 
4360 	if (skb_vlan_tag_present(skb)) {
4361 		if (pt_prev) {
4362 			ret = deliver_skb(skb, pt_prev, orig_dev);
4363 			pt_prev = NULL;
4364 		}
4365 		if (vlan_do_receive(&skb))
4366 			goto another_round;
4367 		else if (unlikely(!skb))
4368 			goto out;
4369 	}
4370 
4371 	rx_handler = rcu_dereference(skb->dev->rx_handler);
4372 	if (rx_handler) {
4373 		if (pt_prev) {
4374 			ret = deliver_skb(skb, pt_prev, orig_dev);
4375 			pt_prev = NULL;
4376 		}
4377 		switch (rx_handler(&skb)) {
4378 		case RX_HANDLER_CONSUMED:
4379 			ret = NET_RX_SUCCESS;
4380 			goto out;
4381 		case RX_HANDLER_ANOTHER:
4382 			goto another_round;
4383 		case RX_HANDLER_EXACT:
4384 			deliver_exact = true;
4385 		case RX_HANDLER_PASS:
4386 			break;
4387 		default:
4388 			BUG();
4389 		}
4390 	}
4391 
4392 	if (unlikely(skb_vlan_tag_present(skb))) {
4393 		if (skb_vlan_tag_get_id(skb))
4394 			skb->pkt_type = PACKET_OTHERHOST;
4395 		/* Note: we might in the future use prio bits
4396 		 * and set skb->priority like in vlan_do_receive()
4397 		 * For the time being, just ignore Priority Code Point
4398 		 */
4399 		skb->vlan_tci = 0;
4400 	}
4401 
4402 	type = skb->protocol;
4403 
4404 	/* deliver only exact match when indicated */
4405 	if (likely(!deliver_exact)) {
4406 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4407 				       &ptype_base[ntohs(type) &
4408 						   PTYPE_HASH_MASK]);
4409 	}
4410 
4411 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4412 			       &orig_dev->ptype_specific);
4413 
4414 	if (unlikely(skb->dev != orig_dev)) {
4415 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4416 				       &skb->dev->ptype_specific);
4417 	}
4418 
4419 	if (pt_prev) {
4420 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
4421 			goto drop;
4422 		else
4423 			ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4424 	} else {
4425 drop:
4426 		if (!deliver_exact)
4427 			atomic_long_inc(&skb->dev->rx_dropped);
4428 		else
4429 			atomic_long_inc(&skb->dev->rx_nohandler);
4430 		kfree_skb(skb);
4431 		/* Jamal, now you will not able to escape explaining
4432 		 * me how you were going to use this. :-)
4433 		 */
4434 		ret = NET_RX_DROP;
4435 	}
4436 
4437 out:
4438 	return ret;
4439 }
4440 
4441 static int __netif_receive_skb(struct sk_buff *skb)
4442 {
4443 	int ret;
4444 
4445 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4446 		unsigned int noreclaim_flag;
4447 
4448 		/*
4449 		 * PFMEMALLOC skbs are special, they should
4450 		 * - be delivered to SOCK_MEMALLOC sockets only
4451 		 * - stay away from userspace
4452 		 * - have bounded memory usage
4453 		 *
4454 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
4455 		 * context down to all allocation sites.
4456 		 */
4457 		noreclaim_flag = memalloc_noreclaim_save();
4458 		ret = __netif_receive_skb_core(skb, true);
4459 		memalloc_noreclaim_restore(noreclaim_flag);
4460 	} else
4461 		ret = __netif_receive_skb_core(skb, false);
4462 
4463 	return ret;
4464 }
4465 
4466 static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
4467 {
4468 	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
4469 	struct bpf_prog *new = xdp->prog;
4470 	int ret = 0;
4471 
4472 	switch (xdp->command) {
4473 	case XDP_SETUP_PROG:
4474 		rcu_assign_pointer(dev->xdp_prog, new);
4475 		if (old)
4476 			bpf_prog_put(old);
4477 
4478 		if (old && !new) {
4479 			static_key_slow_dec(&generic_xdp_needed);
4480 		} else if (new && !old) {
4481 			static_key_slow_inc(&generic_xdp_needed);
4482 			dev_disable_lro(dev);
4483 		}
4484 		break;
4485 
4486 	case XDP_QUERY_PROG:
4487 		xdp->prog_attached = !!old;
4488 		xdp->prog_id = old ? old->aux->id : 0;
4489 		break;
4490 
4491 	default:
4492 		ret = -EINVAL;
4493 		break;
4494 	}
4495 
4496 	return ret;
4497 }
4498 
4499 static int netif_receive_skb_internal(struct sk_buff *skb)
4500 {
4501 	int ret;
4502 
4503 	net_timestamp_check(netdev_tstamp_prequeue, skb);
4504 
4505 	if (skb_defer_rx_timestamp(skb))
4506 		return NET_RX_SUCCESS;
4507 
4508 	if (static_key_false(&generic_xdp_needed)) {
4509 		int ret;
4510 
4511 		preempt_disable();
4512 		rcu_read_lock();
4513 		ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4514 		rcu_read_unlock();
4515 		preempt_enable();
4516 
4517 		if (ret != XDP_PASS)
4518 			return NET_RX_DROP;
4519 	}
4520 
4521 	rcu_read_lock();
4522 #ifdef CONFIG_RPS
4523 	if (static_key_false(&rps_needed)) {
4524 		struct rps_dev_flow voidflow, *rflow = &voidflow;
4525 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
4526 
4527 		if (cpu >= 0) {
4528 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4529 			rcu_read_unlock();
4530 			return ret;
4531 		}
4532 	}
4533 #endif
4534 	ret = __netif_receive_skb(skb);
4535 	rcu_read_unlock();
4536 	return ret;
4537 }
4538 
4539 /**
4540  *	netif_receive_skb - process receive buffer from network
4541  *	@skb: buffer to process
4542  *
4543  *	netif_receive_skb() is the main receive data processing function.
4544  *	It always succeeds. The buffer may be dropped during processing
4545  *	for congestion control or by the protocol layers.
4546  *
4547  *	This function may only be called from softirq context and interrupts
4548  *	should be enabled.
4549  *
4550  *	Return values (usually ignored):
4551  *	NET_RX_SUCCESS: no congestion
4552  *	NET_RX_DROP: packet was dropped
4553  */
4554 int netif_receive_skb(struct sk_buff *skb)
4555 {
4556 	trace_netif_receive_skb_entry(skb);
4557 
4558 	return netif_receive_skb_internal(skb);
4559 }
4560 EXPORT_SYMBOL(netif_receive_skb);
4561 
4562 DEFINE_PER_CPU(struct work_struct, flush_works);
4563 
4564 /* Network device is going away, flush any packets still pending */
4565 static void flush_backlog(struct work_struct *work)
4566 {
4567 	struct sk_buff *skb, *tmp;
4568 	struct softnet_data *sd;
4569 
4570 	local_bh_disable();
4571 	sd = this_cpu_ptr(&softnet_data);
4572 
4573 	local_irq_disable();
4574 	rps_lock(sd);
4575 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
4576 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
4577 			__skb_unlink(skb, &sd->input_pkt_queue);
4578 			kfree_skb(skb);
4579 			input_queue_head_incr(sd);
4580 		}
4581 	}
4582 	rps_unlock(sd);
4583 	local_irq_enable();
4584 
4585 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4586 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
4587 			__skb_unlink(skb, &sd->process_queue);
4588 			kfree_skb(skb);
4589 			input_queue_head_incr(sd);
4590 		}
4591 	}
4592 	local_bh_enable();
4593 }
4594 
4595 static void flush_all_backlogs(void)
4596 {
4597 	unsigned int cpu;
4598 
4599 	get_online_cpus();
4600 
4601 	for_each_online_cpu(cpu)
4602 		queue_work_on(cpu, system_highpri_wq,
4603 			      per_cpu_ptr(&flush_works, cpu));
4604 
4605 	for_each_online_cpu(cpu)
4606 		flush_work(per_cpu_ptr(&flush_works, cpu));
4607 
4608 	put_online_cpus();
4609 }
4610 
4611 static int napi_gro_complete(struct sk_buff *skb)
4612 {
4613 	struct packet_offload *ptype;
4614 	__be16 type = skb->protocol;
4615 	struct list_head *head = &offload_base;
4616 	int err = -ENOENT;
4617 
4618 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4619 
4620 	if (NAPI_GRO_CB(skb)->count == 1) {
4621 		skb_shinfo(skb)->gso_size = 0;
4622 		goto out;
4623 	}
4624 
4625 	rcu_read_lock();
4626 	list_for_each_entry_rcu(ptype, head, list) {
4627 		if (ptype->type != type || !ptype->callbacks.gro_complete)
4628 			continue;
4629 
4630 		err = ptype->callbacks.gro_complete(skb, 0);
4631 		break;
4632 	}
4633 	rcu_read_unlock();
4634 
4635 	if (err) {
4636 		WARN_ON(&ptype->list == head);
4637 		kfree_skb(skb);
4638 		return NET_RX_SUCCESS;
4639 	}
4640 
4641 out:
4642 	return netif_receive_skb_internal(skb);
4643 }
4644 
4645 /* napi->gro_list contains packets ordered by age.
4646  * youngest packets at the head of it.
4647  * Complete skbs in reverse order to reduce latencies.
4648  */
4649 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
4650 {
4651 	struct sk_buff *skb, *prev = NULL;
4652 
4653 	/* scan list and build reverse chain */
4654 	for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4655 		skb->prev = prev;
4656 		prev = skb;
4657 	}
4658 
4659 	for (skb = prev; skb; skb = prev) {
4660 		skb->next = NULL;
4661 
4662 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4663 			return;
4664 
4665 		prev = skb->prev;
4666 		napi_gro_complete(skb);
4667 		napi->gro_count--;
4668 	}
4669 
4670 	napi->gro_list = NULL;
4671 }
4672 EXPORT_SYMBOL(napi_gro_flush);
4673 
4674 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4675 {
4676 	struct sk_buff *p;
4677 	unsigned int maclen = skb->dev->hard_header_len;
4678 	u32 hash = skb_get_hash_raw(skb);
4679 
4680 	for (p = napi->gro_list; p; p = p->next) {
4681 		unsigned long diffs;
4682 
4683 		NAPI_GRO_CB(p)->flush = 0;
4684 
4685 		if (hash != skb_get_hash_raw(p)) {
4686 			NAPI_GRO_CB(p)->same_flow = 0;
4687 			continue;
4688 		}
4689 
4690 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4691 		diffs |= p->vlan_tci ^ skb->vlan_tci;
4692 		diffs |= skb_metadata_dst_cmp(p, skb);
4693 		if (maclen == ETH_HLEN)
4694 			diffs |= compare_ether_header(skb_mac_header(p),
4695 						      skb_mac_header(skb));
4696 		else if (!diffs)
4697 			diffs = memcmp(skb_mac_header(p),
4698 				       skb_mac_header(skb),
4699 				       maclen);
4700 		NAPI_GRO_CB(p)->same_flow = !diffs;
4701 	}
4702 }
4703 
4704 static void skb_gro_reset_offset(struct sk_buff *skb)
4705 {
4706 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
4707 	const skb_frag_t *frag0 = &pinfo->frags[0];
4708 
4709 	NAPI_GRO_CB(skb)->data_offset = 0;
4710 	NAPI_GRO_CB(skb)->frag0 = NULL;
4711 	NAPI_GRO_CB(skb)->frag0_len = 0;
4712 
4713 	if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4714 	    pinfo->nr_frags &&
4715 	    !PageHighMem(skb_frag_page(frag0))) {
4716 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4717 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4718 						    skb_frag_size(frag0),
4719 						    skb->end - skb->tail);
4720 	}
4721 }
4722 
4723 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4724 {
4725 	struct skb_shared_info *pinfo = skb_shinfo(skb);
4726 
4727 	BUG_ON(skb->end - skb->tail < grow);
4728 
4729 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4730 
4731 	skb->data_len -= grow;
4732 	skb->tail += grow;
4733 
4734 	pinfo->frags[0].page_offset += grow;
4735 	skb_frag_size_sub(&pinfo->frags[0], grow);
4736 
4737 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4738 		skb_frag_unref(skb, 0);
4739 		memmove(pinfo->frags, pinfo->frags + 1,
4740 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
4741 	}
4742 }
4743 
4744 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4745 {
4746 	struct sk_buff **pp = NULL;
4747 	struct packet_offload *ptype;
4748 	__be16 type = skb->protocol;
4749 	struct list_head *head = &offload_base;
4750 	int same_flow;
4751 	enum gro_result ret;
4752 	int grow;
4753 
4754 	if (netif_elide_gro(skb->dev))
4755 		goto normal;
4756 
4757 	gro_list_prepare(napi, skb);
4758 
4759 	rcu_read_lock();
4760 	list_for_each_entry_rcu(ptype, head, list) {
4761 		if (ptype->type != type || !ptype->callbacks.gro_receive)
4762 			continue;
4763 
4764 		skb_set_network_header(skb, skb_gro_offset(skb));
4765 		skb_reset_mac_len(skb);
4766 		NAPI_GRO_CB(skb)->same_flow = 0;
4767 		NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
4768 		NAPI_GRO_CB(skb)->free = 0;
4769 		NAPI_GRO_CB(skb)->encap_mark = 0;
4770 		NAPI_GRO_CB(skb)->recursion_counter = 0;
4771 		NAPI_GRO_CB(skb)->is_fou = 0;
4772 		NAPI_GRO_CB(skb)->is_atomic = 1;
4773 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4774 
4775 		/* Setup for GRO checksum validation */
4776 		switch (skb->ip_summed) {
4777 		case CHECKSUM_COMPLETE:
4778 			NAPI_GRO_CB(skb)->csum = skb->csum;
4779 			NAPI_GRO_CB(skb)->csum_valid = 1;
4780 			NAPI_GRO_CB(skb)->csum_cnt = 0;
4781 			break;
4782 		case CHECKSUM_UNNECESSARY:
4783 			NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4784 			NAPI_GRO_CB(skb)->csum_valid = 0;
4785 			break;
4786 		default:
4787 			NAPI_GRO_CB(skb)->csum_cnt = 0;
4788 			NAPI_GRO_CB(skb)->csum_valid = 0;
4789 		}
4790 
4791 		pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4792 		break;
4793 	}
4794 	rcu_read_unlock();
4795 
4796 	if (&ptype->list == head)
4797 		goto normal;
4798 
4799 	if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4800 		ret = GRO_CONSUMED;
4801 		goto ok;
4802 	}
4803 
4804 	same_flow = NAPI_GRO_CB(skb)->same_flow;
4805 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4806 
4807 	if (pp) {
4808 		struct sk_buff *nskb = *pp;
4809 
4810 		*pp = nskb->next;
4811 		nskb->next = NULL;
4812 		napi_gro_complete(nskb);
4813 		napi->gro_count--;
4814 	}
4815 
4816 	if (same_flow)
4817 		goto ok;
4818 
4819 	if (NAPI_GRO_CB(skb)->flush)
4820 		goto normal;
4821 
4822 	if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4823 		struct sk_buff *nskb = napi->gro_list;
4824 
4825 		/* locate the end of the list to select the 'oldest' flow */
4826 		while (nskb->next) {
4827 			pp = &nskb->next;
4828 			nskb = *pp;
4829 		}
4830 		*pp = NULL;
4831 		nskb->next = NULL;
4832 		napi_gro_complete(nskb);
4833 	} else {
4834 		napi->gro_count++;
4835 	}
4836 	NAPI_GRO_CB(skb)->count = 1;
4837 	NAPI_GRO_CB(skb)->age = jiffies;
4838 	NAPI_GRO_CB(skb)->last = skb;
4839 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4840 	skb->next = napi->gro_list;
4841 	napi->gro_list = skb;
4842 	ret = GRO_HELD;
4843 
4844 pull:
4845 	grow = skb_gro_offset(skb) - skb_headlen(skb);
4846 	if (grow > 0)
4847 		gro_pull_from_frag0(skb, grow);
4848 ok:
4849 	return ret;
4850 
4851 normal:
4852 	ret = GRO_NORMAL;
4853 	goto pull;
4854 }
4855 
4856 struct packet_offload *gro_find_receive_by_type(__be16 type)
4857 {
4858 	struct list_head *offload_head = &offload_base;
4859 	struct packet_offload *ptype;
4860 
4861 	list_for_each_entry_rcu(ptype, offload_head, list) {
4862 		if (ptype->type != type || !ptype->callbacks.gro_receive)
4863 			continue;
4864 		return ptype;
4865 	}
4866 	return NULL;
4867 }
4868 EXPORT_SYMBOL(gro_find_receive_by_type);
4869 
4870 struct packet_offload *gro_find_complete_by_type(__be16 type)
4871 {
4872 	struct list_head *offload_head = &offload_base;
4873 	struct packet_offload *ptype;
4874 
4875 	list_for_each_entry_rcu(ptype, offload_head, list) {
4876 		if (ptype->type != type || !ptype->callbacks.gro_complete)
4877 			continue;
4878 		return ptype;
4879 	}
4880 	return NULL;
4881 }
4882 EXPORT_SYMBOL(gro_find_complete_by_type);
4883 
4884 static void napi_skb_free_stolen_head(struct sk_buff *skb)
4885 {
4886 	skb_dst_drop(skb);
4887 	secpath_reset(skb);
4888 	kmem_cache_free(skbuff_head_cache, skb);
4889 }
4890 
4891 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4892 {
4893 	switch (ret) {
4894 	case GRO_NORMAL:
4895 		if (netif_receive_skb_internal(skb))
4896 			ret = GRO_DROP;
4897 		break;
4898 
4899 	case GRO_DROP:
4900 		kfree_skb(skb);
4901 		break;
4902 
4903 	case GRO_MERGED_FREE:
4904 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4905 			napi_skb_free_stolen_head(skb);
4906 		else
4907 			__kfree_skb(skb);
4908 		break;
4909 
4910 	case GRO_HELD:
4911 	case GRO_MERGED:
4912 	case GRO_CONSUMED:
4913 		break;
4914 	}
4915 
4916 	return ret;
4917 }
4918 
4919 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4920 {
4921 	skb_mark_napi_id(skb, napi);
4922 	trace_napi_gro_receive_entry(skb);
4923 
4924 	skb_gro_reset_offset(skb);
4925 
4926 	return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4927 }
4928 EXPORT_SYMBOL(napi_gro_receive);
4929 
4930 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4931 {
4932 	if (unlikely(skb->pfmemalloc)) {
4933 		consume_skb(skb);
4934 		return;
4935 	}
4936 	__skb_pull(skb, skb_headlen(skb));
4937 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
4938 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4939 	skb->vlan_tci = 0;
4940 	skb->dev = napi->dev;
4941 	skb->skb_iif = 0;
4942 	skb->encapsulation = 0;
4943 	skb_shinfo(skb)->gso_type = 0;
4944 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4945 	secpath_reset(skb);
4946 
4947 	napi->skb = skb;
4948 }
4949 
4950 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4951 {
4952 	struct sk_buff *skb = napi->skb;
4953 
4954 	if (!skb) {
4955 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4956 		if (skb) {
4957 			napi->skb = skb;
4958 			skb_mark_napi_id(skb, napi);
4959 		}
4960 	}
4961 	return skb;
4962 }
4963 EXPORT_SYMBOL(napi_get_frags);
4964 
4965 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4966 				      struct sk_buff *skb,
4967 				      gro_result_t ret)
4968 {
4969 	switch (ret) {
4970 	case GRO_NORMAL:
4971 	case GRO_HELD:
4972 		__skb_push(skb, ETH_HLEN);
4973 		skb->protocol = eth_type_trans(skb, skb->dev);
4974 		if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4975 			ret = GRO_DROP;
4976 		break;
4977 
4978 	case GRO_DROP:
4979 		napi_reuse_skb(napi, skb);
4980 		break;
4981 
4982 	case GRO_MERGED_FREE:
4983 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4984 			napi_skb_free_stolen_head(skb);
4985 		else
4986 			napi_reuse_skb(napi, skb);
4987 		break;
4988 
4989 	case GRO_MERGED:
4990 	case GRO_CONSUMED:
4991 		break;
4992 	}
4993 
4994 	return ret;
4995 }
4996 
4997 /* Upper GRO stack assumes network header starts at gro_offset=0
4998  * Drivers could call both napi_gro_frags() and napi_gro_receive()
4999  * We copy ethernet header into skb->data to have a common layout.
5000  */
5001 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
5002 {
5003 	struct sk_buff *skb = napi->skb;
5004 	const struct ethhdr *eth;
5005 	unsigned int hlen = sizeof(*eth);
5006 
5007 	napi->skb = NULL;
5008 
5009 	skb_reset_mac_header(skb);
5010 	skb_gro_reset_offset(skb);
5011 
5012 	eth = skb_gro_header_fast(skb, 0);
5013 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
5014 		eth = skb_gro_header_slow(skb, hlen, 0);
5015 		if (unlikely(!eth)) {
5016 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5017 					     __func__, napi->dev->name);
5018 			napi_reuse_skb(napi, skb);
5019 			return NULL;
5020 		}
5021 	} else {
5022 		gro_pull_from_frag0(skb, hlen);
5023 		NAPI_GRO_CB(skb)->frag0 += hlen;
5024 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
5025 	}
5026 	__skb_pull(skb, hlen);
5027 
5028 	/*
5029 	 * This works because the only protocols we care about don't require
5030 	 * special handling.
5031 	 * We'll fix it up properly in napi_frags_finish()
5032 	 */
5033 	skb->protocol = eth->h_proto;
5034 
5035 	return skb;
5036 }
5037 
5038 gro_result_t napi_gro_frags(struct napi_struct *napi)
5039 {
5040 	struct sk_buff *skb = napi_frags_skb(napi);
5041 
5042 	if (!skb)
5043 		return GRO_DROP;
5044 
5045 	trace_napi_gro_frags_entry(skb);
5046 
5047 	return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5048 }
5049 EXPORT_SYMBOL(napi_gro_frags);
5050 
5051 /* Compute the checksum from gro_offset and return the folded value
5052  * after adding in any pseudo checksum.
5053  */
5054 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5055 {
5056 	__wsum wsum;
5057 	__sum16 sum;
5058 
5059 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5060 
5061 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5062 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5063 	if (likely(!sum)) {
5064 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5065 		    !skb->csum_complete_sw)
5066 			netdev_rx_csum_fault(skb->dev);
5067 	}
5068 
5069 	NAPI_GRO_CB(skb)->csum = wsum;
5070 	NAPI_GRO_CB(skb)->csum_valid = 1;
5071 
5072 	return sum;
5073 }
5074 EXPORT_SYMBOL(__skb_gro_checksum_complete);
5075 
5076 static void net_rps_send_ipi(struct softnet_data *remsd)
5077 {
5078 #ifdef CONFIG_RPS
5079 	while (remsd) {
5080 		struct softnet_data *next = remsd->rps_ipi_next;
5081 
5082 		if (cpu_online(remsd->cpu))
5083 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
5084 		remsd = next;
5085 	}
5086 #endif
5087 }
5088 
5089 /*
5090  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5091  * Note: called with local irq disabled, but exits with local irq enabled.
5092  */
5093 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5094 {
5095 #ifdef CONFIG_RPS
5096 	struct softnet_data *remsd = sd->rps_ipi_list;
5097 
5098 	if (remsd) {
5099 		sd->rps_ipi_list = NULL;
5100 
5101 		local_irq_enable();
5102 
5103 		/* Send pending IPI's to kick RPS processing on remote cpus. */
5104 		net_rps_send_ipi(remsd);
5105 	} else
5106 #endif
5107 		local_irq_enable();
5108 }
5109 
5110 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5111 {
5112 #ifdef CONFIG_RPS
5113 	return sd->rps_ipi_list != NULL;
5114 #else
5115 	return false;
5116 #endif
5117 }
5118 
5119 static int process_backlog(struct napi_struct *napi, int quota)
5120 {
5121 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5122 	bool again = true;
5123 	int work = 0;
5124 
5125 	/* Check if we have pending ipi, its better to send them now,
5126 	 * not waiting net_rx_action() end.
5127 	 */
5128 	if (sd_has_rps_ipi_waiting(sd)) {
5129 		local_irq_disable();
5130 		net_rps_action_and_irq_enable(sd);
5131 	}
5132 
5133 	napi->weight = dev_rx_weight;
5134 	while (again) {
5135 		struct sk_buff *skb;
5136 
5137 		while ((skb = __skb_dequeue(&sd->process_queue))) {
5138 			rcu_read_lock();
5139 			__netif_receive_skb(skb);
5140 			rcu_read_unlock();
5141 			input_queue_head_incr(sd);
5142 			if (++work >= quota)
5143 				return work;
5144 
5145 		}
5146 
5147 		local_irq_disable();
5148 		rps_lock(sd);
5149 		if (skb_queue_empty(&sd->input_pkt_queue)) {
5150 			/*
5151 			 * Inline a custom version of __napi_complete().
5152 			 * only current cpu owns and manipulates this napi,
5153 			 * and NAPI_STATE_SCHED is the only possible flag set
5154 			 * on backlog.
5155 			 * We can use a plain write instead of clear_bit(),
5156 			 * and we dont need an smp_mb() memory barrier.
5157 			 */
5158 			napi->state = 0;
5159 			again = false;
5160 		} else {
5161 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
5162 						   &sd->process_queue);
5163 		}
5164 		rps_unlock(sd);
5165 		local_irq_enable();
5166 	}
5167 
5168 	return work;
5169 }
5170 
5171 /**
5172  * __napi_schedule - schedule for receive
5173  * @n: entry to schedule
5174  *
5175  * The entry's receive function will be scheduled to run.
5176  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5177  */
5178 void __napi_schedule(struct napi_struct *n)
5179 {
5180 	unsigned long flags;
5181 
5182 	local_irq_save(flags);
5183 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
5184 	local_irq_restore(flags);
5185 }
5186 EXPORT_SYMBOL(__napi_schedule);
5187 
5188 /**
5189  *	napi_schedule_prep - check if napi can be scheduled
5190  *	@n: napi context
5191  *
5192  * Test if NAPI routine is already running, and if not mark
5193  * it as running.  This is used as a condition variable
5194  * insure only one NAPI poll instance runs.  We also make
5195  * sure there is no pending NAPI disable.
5196  */
5197 bool napi_schedule_prep(struct napi_struct *n)
5198 {
5199 	unsigned long val, new;
5200 
5201 	do {
5202 		val = READ_ONCE(n->state);
5203 		if (unlikely(val & NAPIF_STATE_DISABLE))
5204 			return false;
5205 		new = val | NAPIF_STATE_SCHED;
5206 
5207 		/* Sets STATE_MISSED bit if STATE_SCHED was already set
5208 		 * This was suggested by Alexander Duyck, as compiler
5209 		 * emits better code than :
5210 		 * if (val & NAPIF_STATE_SCHED)
5211 		 *     new |= NAPIF_STATE_MISSED;
5212 		 */
5213 		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5214 						   NAPIF_STATE_MISSED;
5215 	} while (cmpxchg(&n->state, val, new) != val);
5216 
5217 	return !(val & NAPIF_STATE_SCHED);
5218 }
5219 EXPORT_SYMBOL(napi_schedule_prep);
5220 
5221 /**
5222  * __napi_schedule_irqoff - schedule for receive
5223  * @n: entry to schedule
5224  *
5225  * Variant of __napi_schedule() assuming hard irqs are masked
5226  */
5227 void __napi_schedule_irqoff(struct napi_struct *n)
5228 {
5229 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
5230 }
5231 EXPORT_SYMBOL(__napi_schedule_irqoff);
5232 
5233 bool napi_complete_done(struct napi_struct *n, int work_done)
5234 {
5235 	unsigned long flags, val, new;
5236 
5237 	/*
5238 	 * 1) Don't let napi dequeue from the cpu poll list
5239 	 *    just in case its running on a different cpu.
5240 	 * 2) If we are busy polling, do nothing here, we have
5241 	 *    the guarantee we will be called later.
5242 	 */
5243 	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5244 				 NAPIF_STATE_IN_BUSY_POLL)))
5245 		return false;
5246 
5247 	if (n->gro_list) {
5248 		unsigned long timeout = 0;
5249 
5250 		if (work_done)
5251 			timeout = n->dev->gro_flush_timeout;
5252 
5253 		if (timeout)
5254 			hrtimer_start(&n->timer, ns_to_ktime(timeout),
5255 				      HRTIMER_MODE_REL_PINNED);
5256 		else
5257 			napi_gro_flush(n, false);
5258 	}
5259 	if (unlikely(!list_empty(&n->poll_list))) {
5260 		/* If n->poll_list is not empty, we need to mask irqs */
5261 		local_irq_save(flags);
5262 		list_del_init(&n->poll_list);
5263 		local_irq_restore(flags);
5264 	}
5265 
5266 	do {
5267 		val = READ_ONCE(n->state);
5268 
5269 		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5270 
5271 		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5272 
5273 		/* If STATE_MISSED was set, leave STATE_SCHED set,
5274 		 * because we will call napi->poll() one more time.
5275 		 * This C code was suggested by Alexander Duyck to help gcc.
5276 		 */
5277 		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5278 						    NAPIF_STATE_SCHED;
5279 	} while (cmpxchg(&n->state, val, new) != val);
5280 
5281 	if (unlikely(val & NAPIF_STATE_MISSED)) {
5282 		__napi_schedule(n);
5283 		return false;
5284 	}
5285 
5286 	return true;
5287 }
5288 EXPORT_SYMBOL(napi_complete_done);
5289 
5290 /* must be called under rcu_read_lock(), as we dont take a reference */
5291 static struct napi_struct *napi_by_id(unsigned int napi_id)
5292 {
5293 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5294 	struct napi_struct *napi;
5295 
5296 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5297 		if (napi->napi_id == napi_id)
5298 			return napi;
5299 
5300 	return NULL;
5301 }
5302 
5303 #if defined(CONFIG_NET_RX_BUSY_POLL)
5304 
5305 #define BUSY_POLL_BUDGET 8
5306 
5307 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5308 {
5309 	int rc;
5310 
5311 	/* Busy polling means there is a high chance device driver hard irq
5312 	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5313 	 * set in napi_schedule_prep().
5314 	 * Since we are about to call napi->poll() once more, we can safely
5315 	 * clear NAPI_STATE_MISSED.
5316 	 *
5317 	 * Note: x86 could use a single "lock and ..." instruction
5318 	 * to perform these two clear_bit()
5319 	 */
5320 	clear_bit(NAPI_STATE_MISSED, &napi->state);
5321 	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5322 
5323 	local_bh_disable();
5324 
5325 	/* All we really want here is to re-enable device interrupts.
5326 	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5327 	 */
5328 	rc = napi->poll(napi, BUSY_POLL_BUDGET);
5329 	trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
5330 	netpoll_poll_unlock(have_poll_lock);
5331 	if (rc == BUSY_POLL_BUDGET)
5332 		__napi_schedule(napi);
5333 	local_bh_enable();
5334 }
5335 
5336 void napi_busy_loop(unsigned int napi_id,
5337 		    bool (*loop_end)(void *, unsigned long),
5338 		    void *loop_end_arg)
5339 {
5340 	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
5341 	int (*napi_poll)(struct napi_struct *napi, int budget);
5342 	void *have_poll_lock = NULL;
5343 	struct napi_struct *napi;
5344 
5345 restart:
5346 	napi_poll = NULL;
5347 
5348 	rcu_read_lock();
5349 
5350 	napi = napi_by_id(napi_id);
5351 	if (!napi)
5352 		goto out;
5353 
5354 	preempt_disable();
5355 	for (;;) {
5356 		int work = 0;
5357 
5358 		local_bh_disable();
5359 		if (!napi_poll) {
5360 			unsigned long val = READ_ONCE(napi->state);
5361 
5362 			/* If multiple threads are competing for this napi,
5363 			 * we avoid dirtying napi->state as much as we can.
5364 			 */
5365 			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5366 				   NAPIF_STATE_IN_BUSY_POLL))
5367 				goto count;
5368 			if (cmpxchg(&napi->state, val,
5369 				    val | NAPIF_STATE_IN_BUSY_POLL |
5370 					  NAPIF_STATE_SCHED) != val)
5371 				goto count;
5372 			have_poll_lock = netpoll_poll_lock(napi);
5373 			napi_poll = napi->poll;
5374 		}
5375 		work = napi_poll(napi, BUSY_POLL_BUDGET);
5376 		trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
5377 count:
5378 		if (work > 0)
5379 			__NET_ADD_STATS(dev_net(napi->dev),
5380 					LINUX_MIB_BUSYPOLLRXPACKETS, work);
5381 		local_bh_enable();
5382 
5383 		if (!loop_end || loop_end(loop_end_arg, start_time))
5384 			break;
5385 
5386 		if (unlikely(need_resched())) {
5387 			if (napi_poll)
5388 				busy_poll_stop(napi, have_poll_lock);
5389 			preempt_enable();
5390 			rcu_read_unlock();
5391 			cond_resched();
5392 			if (loop_end(loop_end_arg, start_time))
5393 				return;
5394 			goto restart;
5395 		}
5396 		cpu_relax();
5397 	}
5398 	if (napi_poll)
5399 		busy_poll_stop(napi, have_poll_lock);
5400 	preempt_enable();
5401 out:
5402 	rcu_read_unlock();
5403 }
5404 EXPORT_SYMBOL(napi_busy_loop);
5405 
5406 #endif /* CONFIG_NET_RX_BUSY_POLL */
5407 
5408 static void napi_hash_add(struct napi_struct *napi)
5409 {
5410 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5411 	    test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
5412 		return;
5413 
5414 	spin_lock(&napi_hash_lock);
5415 
5416 	/* 0..NR_CPUS range is reserved for sender_cpu use */
5417 	do {
5418 		if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5419 			napi_gen_id = MIN_NAPI_ID;
5420 	} while (napi_by_id(napi_gen_id));
5421 	napi->napi_id = napi_gen_id;
5422 
5423 	hlist_add_head_rcu(&napi->napi_hash_node,
5424 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
5425 
5426 	spin_unlock(&napi_hash_lock);
5427 }
5428 
5429 /* Warning : caller is responsible to make sure rcu grace period
5430  * is respected before freeing memory containing @napi
5431  */
5432 bool napi_hash_del(struct napi_struct *napi)
5433 {
5434 	bool rcu_sync_needed = false;
5435 
5436 	spin_lock(&napi_hash_lock);
5437 
5438 	if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5439 		rcu_sync_needed = true;
5440 		hlist_del_rcu(&napi->napi_hash_node);
5441 	}
5442 	spin_unlock(&napi_hash_lock);
5443 	return rcu_sync_needed;
5444 }
5445 EXPORT_SYMBOL_GPL(napi_hash_del);
5446 
5447 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5448 {
5449 	struct napi_struct *napi;
5450 
5451 	napi = container_of(timer, struct napi_struct, timer);
5452 
5453 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
5454 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5455 	 */
5456 	if (napi->gro_list && !napi_disable_pending(napi) &&
5457 	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5458 		__napi_schedule_irqoff(napi);
5459 
5460 	return HRTIMER_NORESTART;
5461 }
5462 
5463 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5464 		    int (*poll)(struct napi_struct *, int), int weight)
5465 {
5466 	INIT_LIST_HEAD(&napi->poll_list);
5467 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5468 	napi->timer.function = napi_watchdog;
5469 	napi->gro_count = 0;
5470 	napi->gro_list = NULL;
5471 	napi->skb = NULL;
5472 	napi->poll = poll;
5473 	if (weight > NAPI_POLL_WEIGHT)
5474 		pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5475 			    weight, dev->name);
5476 	napi->weight = weight;
5477 	list_add(&napi->dev_list, &dev->napi_list);
5478 	napi->dev = dev;
5479 #ifdef CONFIG_NETPOLL
5480 	napi->poll_owner = -1;
5481 #endif
5482 	set_bit(NAPI_STATE_SCHED, &napi->state);
5483 	napi_hash_add(napi);
5484 }
5485 EXPORT_SYMBOL(netif_napi_add);
5486 
5487 void napi_disable(struct napi_struct *n)
5488 {
5489 	might_sleep();
5490 	set_bit(NAPI_STATE_DISABLE, &n->state);
5491 
5492 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5493 		msleep(1);
5494 	while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5495 		msleep(1);
5496 
5497 	hrtimer_cancel(&n->timer);
5498 
5499 	clear_bit(NAPI_STATE_DISABLE, &n->state);
5500 }
5501 EXPORT_SYMBOL(napi_disable);
5502 
5503 /* Must be called in process context */
5504 void netif_napi_del(struct napi_struct *napi)
5505 {
5506 	might_sleep();
5507 	if (napi_hash_del(napi))
5508 		synchronize_net();
5509 	list_del_init(&napi->dev_list);
5510 	napi_free_frags(napi);
5511 
5512 	kfree_skb_list(napi->gro_list);
5513 	napi->gro_list = NULL;
5514 	napi->gro_count = 0;
5515 }
5516 EXPORT_SYMBOL(netif_napi_del);
5517 
5518 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5519 {
5520 	void *have;
5521 	int work, weight;
5522 
5523 	list_del_init(&n->poll_list);
5524 
5525 	have = netpoll_poll_lock(n);
5526 
5527 	weight = n->weight;
5528 
5529 	/* This NAPI_STATE_SCHED test is for avoiding a race
5530 	 * with netpoll's poll_napi().  Only the entity which
5531 	 * obtains the lock and sees NAPI_STATE_SCHED set will
5532 	 * actually make the ->poll() call.  Therefore we avoid
5533 	 * accidentally calling ->poll() when NAPI is not scheduled.
5534 	 */
5535 	work = 0;
5536 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5537 		work = n->poll(n, weight);
5538 		trace_napi_poll(n, work, weight);
5539 	}
5540 
5541 	WARN_ON_ONCE(work > weight);
5542 
5543 	if (likely(work < weight))
5544 		goto out_unlock;
5545 
5546 	/* Drivers must not modify the NAPI state if they
5547 	 * consume the entire weight.  In such cases this code
5548 	 * still "owns" the NAPI instance and therefore can
5549 	 * move the instance around on the list at-will.
5550 	 */
5551 	if (unlikely(napi_disable_pending(n))) {
5552 		napi_complete(n);
5553 		goto out_unlock;
5554 	}
5555 
5556 	if (n->gro_list) {
5557 		/* flush too old packets
5558 		 * If HZ < 1000, flush all packets.
5559 		 */
5560 		napi_gro_flush(n, HZ >= 1000);
5561 	}
5562 
5563 	/* Some drivers may have called napi_schedule
5564 	 * prior to exhausting their budget.
5565 	 */
5566 	if (unlikely(!list_empty(&n->poll_list))) {
5567 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5568 			     n->dev ? n->dev->name : "backlog");
5569 		goto out_unlock;
5570 	}
5571 
5572 	list_add_tail(&n->poll_list, repoll);
5573 
5574 out_unlock:
5575 	netpoll_poll_unlock(have);
5576 
5577 	return work;
5578 }
5579 
5580 static __latent_entropy void net_rx_action(struct softirq_action *h)
5581 {
5582 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5583 	unsigned long time_limit = jiffies +
5584 		usecs_to_jiffies(netdev_budget_usecs);
5585 	int budget = netdev_budget;
5586 	LIST_HEAD(list);
5587 	LIST_HEAD(repoll);
5588 
5589 	local_irq_disable();
5590 	list_splice_init(&sd->poll_list, &list);
5591 	local_irq_enable();
5592 
5593 	for (;;) {
5594 		struct napi_struct *n;
5595 
5596 		if (list_empty(&list)) {
5597 			if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
5598 				goto out;
5599 			break;
5600 		}
5601 
5602 		n = list_first_entry(&list, struct napi_struct, poll_list);
5603 		budget -= napi_poll(n, &repoll);
5604 
5605 		/* If softirq window is exhausted then punt.
5606 		 * Allow this to run for 2 jiffies since which will allow
5607 		 * an average latency of 1.5/HZ.
5608 		 */
5609 		if (unlikely(budget <= 0 ||
5610 			     time_after_eq(jiffies, time_limit))) {
5611 			sd->time_squeeze++;
5612 			break;
5613 		}
5614 	}
5615 
5616 	local_irq_disable();
5617 
5618 	list_splice_tail_init(&sd->poll_list, &list);
5619 	list_splice_tail(&repoll, &list);
5620 	list_splice(&list, &sd->poll_list);
5621 	if (!list_empty(&sd->poll_list))
5622 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
5623 
5624 	net_rps_action_and_irq_enable(sd);
5625 out:
5626 	__kfree_skb_flush();
5627 }
5628 
5629 struct netdev_adjacent {
5630 	struct net_device *dev;
5631 
5632 	/* upper master flag, there can only be one master device per list */
5633 	bool master;
5634 
5635 	/* counter for the number of times this device was added to us */
5636 	u16 ref_nr;
5637 
5638 	/* private field for the users */
5639 	void *private;
5640 
5641 	struct list_head list;
5642 	struct rcu_head rcu;
5643 };
5644 
5645 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
5646 						 struct list_head *adj_list)
5647 {
5648 	struct netdev_adjacent *adj;
5649 
5650 	list_for_each_entry(adj, adj_list, list) {
5651 		if (adj->dev == adj_dev)
5652 			return adj;
5653 	}
5654 	return NULL;
5655 }
5656 
5657 static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5658 {
5659 	struct net_device *dev = data;
5660 
5661 	return upper_dev == dev;
5662 }
5663 
5664 /**
5665  * netdev_has_upper_dev - Check if device is linked to an upper device
5666  * @dev: device
5667  * @upper_dev: upper device to check
5668  *
5669  * Find out if a device is linked to specified upper device and return true
5670  * in case it is. Note that this checks only immediate upper device,
5671  * not through a complete stack of devices. The caller must hold the RTNL lock.
5672  */
5673 bool netdev_has_upper_dev(struct net_device *dev,
5674 			  struct net_device *upper_dev)
5675 {
5676 	ASSERT_RTNL();
5677 
5678 	return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5679 					     upper_dev);
5680 }
5681 EXPORT_SYMBOL(netdev_has_upper_dev);
5682 
5683 /**
5684  * netdev_has_upper_dev_all - Check if device is linked to an upper device
5685  * @dev: device
5686  * @upper_dev: upper device to check
5687  *
5688  * Find out if a device is linked to specified upper device and return true
5689  * in case it is. Note that this checks the entire upper device chain.
5690  * The caller must hold rcu lock.
5691  */
5692 
5693 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5694 				  struct net_device *upper_dev)
5695 {
5696 	return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5697 					       upper_dev);
5698 }
5699 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5700 
5701 /**
5702  * netdev_has_any_upper_dev - Check if device is linked to some device
5703  * @dev: device
5704  *
5705  * Find out if a device is linked to an upper device and return true in case
5706  * it is. The caller must hold the RTNL lock.
5707  */
5708 bool netdev_has_any_upper_dev(struct net_device *dev)
5709 {
5710 	ASSERT_RTNL();
5711 
5712 	return !list_empty(&dev->adj_list.upper);
5713 }
5714 EXPORT_SYMBOL(netdev_has_any_upper_dev);
5715 
5716 /**
5717  * netdev_master_upper_dev_get - Get master upper device
5718  * @dev: device
5719  *
5720  * Find a master upper device and return pointer to it or NULL in case
5721  * it's not there. The caller must hold the RTNL lock.
5722  */
5723 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5724 {
5725 	struct netdev_adjacent *upper;
5726 
5727 	ASSERT_RTNL();
5728 
5729 	if (list_empty(&dev->adj_list.upper))
5730 		return NULL;
5731 
5732 	upper = list_first_entry(&dev->adj_list.upper,
5733 				 struct netdev_adjacent, list);
5734 	if (likely(upper->master))
5735 		return upper->dev;
5736 	return NULL;
5737 }
5738 EXPORT_SYMBOL(netdev_master_upper_dev_get);
5739 
5740 /**
5741  * netdev_has_any_lower_dev - Check if device is linked to some device
5742  * @dev: device
5743  *
5744  * Find out if a device is linked to a lower device and return true in case
5745  * it is. The caller must hold the RTNL lock.
5746  */
5747 static bool netdev_has_any_lower_dev(struct net_device *dev)
5748 {
5749 	ASSERT_RTNL();
5750 
5751 	return !list_empty(&dev->adj_list.lower);
5752 }
5753 
5754 void *netdev_adjacent_get_private(struct list_head *adj_list)
5755 {
5756 	struct netdev_adjacent *adj;
5757 
5758 	adj = list_entry(adj_list, struct netdev_adjacent, list);
5759 
5760 	return adj->private;
5761 }
5762 EXPORT_SYMBOL(netdev_adjacent_get_private);
5763 
5764 /**
5765  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5766  * @dev: device
5767  * @iter: list_head ** of the current position
5768  *
5769  * Gets the next device from the dev's upper list, starting from iter
5770  * position. The caller must hold RCU read lock.
5771  */
5772 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5773 						 struct list_head **iter)
5774 {
5775 	struct netdev_adjacent *upper;
5776 
5777 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5778 
5779 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5780 
5781 	if (&upper->list == &dev->adj_list.upper)
5782 		return NULL;
5783 
5784 	*iter = &upper->list;
5785 
5786 	return upper->dev;
5787 }
5788 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5789 
5790 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5791 						    struct list_head **iter)
5792 {
5793 	struct netdev_adjacent *upper;
5794 
5795 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5796 
5797 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5798 
5799 	if (&upper->list == &dev->adj_list.upper)
5800 		return NULL;
5801 
5802 	*iter = &upper->list;
5803 
5804 	return upper->dev;
5805 }
5806 
5807 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5808 				  int (*fn)(struct net_device *dev,
5809 					    void *data),
5810 				  void *data)
5811 {
5812 	struct net_device *udev;
5813 	struct list_head *iter;
5814 	int ret;
5815 
5816 	for (iter = &dev->adj_list.upper,
5817 	     udev = netdev_next_upper_dev_rcu(dev, &iter);
5818 	     udev;
5819 	     udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5820 		/* first is the upper device itself */
5821 		ret = fn(udev, data);
5822 		if (ret)
5823 			return ret;
5824 
5825 		/* then look at all of its upper devices */
5826 		ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5827 		if (ret)
5828 			return ret;
5829 	}
5830 
5831 	return 0;
5832 }
5833 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5834 
5835 /**
5836  * netdev_lower_get_next_private - Get the next ->private from the
5837  *				   lower neighbour list
5838  * @dev: device
5839  * @iter: list_head ** of the current position
5840  *
5841  * Gets the next netdev_adjacent->private from the dev's lower neighbour
5842  * list, starting from iter position. The caller must hold either hold the
5843  * RTNL lock or its own locking that guarantees that the neighbour lower
5844  * list will remain unchanged.
5845  */
5846 void *netdev_lower_get_next_private(struct net_device *dev,
5847 				    struct list_head **iter)
5848 {
5849 	struct netdev_adjacent *lower;
5850 
5851 	lower = list_entry(*iter, struct netdev_adjacent, list);
5852 
5853 	if (&lower->list == &dev->adj_list.lower)
5854 		return NULL;
5855 
5856 	*iter = lower->list.next;
5857 
5858 	return lower->private;
5859 }
5860 EXPORT_SYMBOL(netdev_lower_get_next_private);
5861 
5862 /**
5863  * netdev_lower_get_next_private_rcu - Get the next ->private from the
5864  *				       lower neighbour list, RCU
5865  *				       variant
5866  * @dev: device
5867  * @iter: list_head ** of the current position
5868  *
5869  * Gets the next netdev_adjacent->private from the dev's lower neighbour
5870  * list, starting from iter position. The caller must hold RCU read lock.
5871  */
5872 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5873 					struct list_head **iter)
5874 {
5875 	struct netdev_adjacent *lower;
5876 
5877 	WARN_ON_ONCE(!rcu_read_lock_held());
5878 
5879 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5880 
5881 	if (&lower->list == &dev->adj_list.lower)
5882 		return NULL;
5883 
5884 	*iter = &lower->list;
5885 
5886 	return lower->private;
5887 }
5888 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5889 
5890 /**
5891  * netdev_lower_get_next - Get the next device from the lower neighbour
5892  *                         list
5893  * @dev: device
5894  * @iter: list_head ** of the current position
5895  *
5896  * Gets the next netdev_adjacent from the dev's lower neighbour
5897  * list, starting from iter position. The caller must hold RTNL lock or
5898  * its own locking that guarantees that the neighbour lower
5899  * list will remain unchanged.
5900  */
5901 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5902 {
5903 	struct netdev_adjacent *lower;
5904 
5905 	lower = list_entry(*iter, struct netdev_adjacent, list);
5906 
5907 	if (&lower->list == &dev->adj_list.lower)
5908 		return NULL;
5909 
5910 	*iter = lower->list.next;
5911 
5912 	return lower->dev;
5913 }
5914 EXPORT_SYMBOL(netdev_lower_get_next);
5915 
5916 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5917 						struct list_head **iter)
5918 {
5919 	struct netdev_adjacent *lower;
5920 
5921 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5922 
5923 	if (&lower->list == &dev->adj_list.lower)
5924 		return NULL;
5925 
5926 	*iter = &lower->list;
5927 
5928 	return lower->dev;
5929 }
5930 
5931 int netdev_walk_all_lower_dev(struct net_device *dev,
5932 			      int (*fn)(struct net_device *dev,
5933 					void *data),
5934 			      void *data)
5935 {
5936 	struct net_device *ldev;
5937 	struct list_head *iter;
5938 	int ret;
5939 
5940 	for (iter = &dev->adj_list.lower,
5941 	     ldev = netdev_next_lower_dev(dev, &iter);
5942 	     ldev;
5943 	     ldev = netdev_next_lower_dev(dev, &iter)) {
5944 		/* first is the lower device itself */
5945 		ret = fn(ldev, data);
5946 		if (ret)
5947 			return ret;
5948 
5949 		/* then look at all of its lower devices */
5950 		ret = netdev_walk_all_lower_dev(ldev, fn, data);
5951 		if (ret)
5952 			return ret;
5953 	}
5954 
5955 	return 0;
5956 }
5957 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
5958 
5959 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5960 						    struct list_head **iter)
5961 {
5962 	struct netdev_adjacent *lower;
5963 
5964 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5965 	if (&lower->list == &dev->adj_list.lower)
5966 		return NULL;
5967 
5968 	*iter = &lower->list;
5969 
5970 	return lower->dev;
5971 }
5972 
5973 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5974 				  int (*fn)(struct net_device *dev,
5975 					    void *data),
5976 				  void *data)
5977 {
5978 	struct net_device *ldev;
5979 	struct list_head *iter;
5980 	int ret;
5981 
5982 	for (iter = &dev->adj_list.lower,
5983 	     ldev = netdev_next_lower_dev_rcu(dev, &iter);
5984 	     ldev;
5985 	     ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
5986 		/* first is the lower device itself */
5987 		ret = fn(ldev, data);
5988 		if (ret)
5989 			return ret;
5990 
5991 		/* then look at all of its lower devices */
5992 		ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
5993 		if (ret)
5994 			return ret;
5995 	}
5996 
5997 	return 0;
5998 }
5999 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6000 
6001 /**
6002  * netdev_lower_get_first_private_rcu - Get the first ->private from the
6003  *				       lower neighbour list, RCU
6004  *				       variant
6005  * @dev: device
6006  *
6007  * Gets the first netdev_adjacent->private from the dev's lower neighbour
6008  * list. The caller must hold RCU read lock.
6009  */
6010 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6011 {
6012 	struct netdev_adjacent *lower;
6013 
6014 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
6015 			struct netdev_adjacent, list);
6016 	if (lower)
6017 		return lower->private;
6018 	return NULL;
6019 }
6020 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6021 
6022 /**
6023  * netdev_master_upper_dev_get_rcu - Get master upper device
6024  * @dev: device
6025  *
6026  * Find a master upper device and return pointer to it or NULL in case
6027  * it's not there. The caller must hold the RCU read lock.
6028  */
6029 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6030 {
6031 	struct netdev_adjacent *upper;
6032 
6033 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
6034 				       struct netdev_adjacent, list);
6035 	if (upper && likely(upper->master))
6036 		return upper->dev;
6037 	return NULL;
6038 }
6039 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6040 
6041 static int netdev_adjacent_sysfs_add(struct net_device *dev,
6042 			      struct net_device *adj_dev,
6043 			      struct list_head *dev_list)
6044 {
6045 	char linkname[IFNAMSIZ+7];
6046 
6047 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
6048 		"upper_%s" : "lower_%s", adj_dev->name);
6049 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6050 				 linkname);
6051 }
6052 static void netdev_adjacent_sysfs_del(struct net_device *dev,
6053 			       char *name,
6054 			       struct list_head *dev_list)
6055 {
6056 	char linkname[IFNAMSIZ+7];
6057 
6058 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
6059 		"upper_%s" : "lower_%s", name);
6060 	sysfs_remove_link(&(dev->dev.kobj), linkname);
6061 }
6062 
6063 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6064 						 struct net_device *adj_dev,
6065 						 struct list_head *dev_list)
6066 {
6067 	return (dev_list == &dev->adj_list.upper ||
6068 		dev_list == &dev->adj_list.lower) &&
6069 		net_eq(dev_net(dev), dev_net(adj_dev));
6070 }
6071 
6072 static int __netdev_adjacent_dev_insert(struct net_device *dev,
6073 					struct net_device *adj_dev,
6074 					struct list_head *dev_list,
6075 					void *private, bool master)
6076 {
6077 	struct netdev_adjacent *adj;
6078 	int ret;
6079 
6080 	adj = __netdev_find_adj(adj_dev, dev_list);
6081 
6082 	if (adj) {
6083 		adj->ref_nr += 1;
6084 		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6085 			 dev->name, adj_dev->name, adj->ref_nr);
6086 
6087 		return 0;
6088 	}
6089 
6090 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6091 	if (!adj)
6092 		return -ENOMEM;
6093 
6094 	adj->dev = adj_dev;
6095 	adj->master = master;
6096 	adj->ref_nr = 1;
6097 	adj->private = private;
6098 	dev_hold(adj_dev);
6099 
6100 	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6101 		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
6102 
6103 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
6104 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
6105 		if (ret)
6106 			goto free_adj;
6107 	}
6108 
6109 	/* Ensure that master link is always the first item in list. */
6110 	if (master) {
6111 		ret = sysfs_create_link(&(dev->dev.kobj),
6112 					&(adj_dev->dev.kobj), "master");
6113 		if (ret)
6114 			goto remove_symlinks;
6115 
6116 		list_add_rcu(&adj->list, dev_list);
6117 	} else {
6118 		list_add_tail_rcu(&adj->list, dev_list);
6119 	}
6120 
6121 	return 0;
6122 
6123 remove_symlinks:
6124 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6125 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6126 free_adj:
6127 	kfree(adj);
6128 	dev_put(adj_dev);
6129 
6130 	return ret;
6131 }
6132 
6133 static void __netdev_adjacent_dev_remove(struct net_device *dev,
6134 					 struct net_device *adj_dev,
6135 					 u16 ref_nr,
6136 					 struct list_head *dev_list)
6137 {
6138 	struct netdev_adjacent *adj;
6139 
6140 	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6141 		 dev->name, adj_dev->name, ref_nr);
6142 
6143 	adj = __netdev_find_adj(adj_dev, dev_list);
6144 
6145 	if (!adj) {
6146 		pr_err("Adjacency does not exist for device %s from %s\n",
6147 		       dev->name, adj_dev->name);
6148 		WARN_ON(1);
6149 		return;
6150 	}
6151 
6152 	if (adj->ref_nr > ref_nr) {
6153 		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6154 			 dev->name, adj_dev->name, ref_nr,
6155 			 adj->ref_nr - ref_nr);
6156 		adj->ref_nr -= ref_nr;
6157 		return;
6158 	}
6159 
6160 	if (adj->master)
6161 		sysfs_remove_link(&(dev->dev.kobj), "master");
6162 
6163 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6164 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6165 
6166 	list_del_rcu(&adj->list);
6167 	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6168 		 adj_dev->name, dev->name, adj_dev->name);
6169 	dev_put(adj_dev);
6170 	kfree_rcu(adj, rcu);
6171 }
6172 
6173 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6174 					    struct net_device *upper_dev,
6175 					    struct list_head *up_list,
6176 					    struct list_head *down_list,
6177 					    void *private, bool master)
6178 {
6179 	int ret;
6180 
6181 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
6182 					   private, master);
6183 	if (ret)
6184 		return ret;
6185 
6186 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
6187 					   private, false);
6188 	if (ret) {
6189 		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
6190 		return ret;
6191 	}
6192 
6193 	return 0;
6194 }
6195 
6196 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6197 					       struct net_device *upper_dev,
6198 					       u16 ref_nr,
6199 					       struct list_head *up_list,
6200 					       struct list_head *down_list)
6201 {
6202 	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6203 	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
6204 }
6205 
6206 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6207 						struct net_device *upper_dev,
6208 						void *private, bool master)
6209 {
6210 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6211 						&dev->adj_list.upper,
6212 						&upper_dev->adj_list.lower,
6213 						private, master);
6214 }
6215 
6216 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6217 						   struct net_device *upper_dev)
6218 {
6219 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
6220 					   &dev->adj_list.upper,
6221 					   &upper_dev->adj_list.lower);
6222 }
6223 
6224 static int __netdev_upper_dev_link(struct net_device *dev,
6225 				   struct net_device *upper_dev, bool master,
6226 				   void *upper_priv, void *upper_info)
6227 {
6228 	struct netdev_notifier_changeupper_info changeupper_info;
6229 	int ret = 0;
6230 
6231 	ASSERT_RTNL();
6232 
6233 	if (dev == upper_dev)
6234 		return -EBUSY;
6235 
6236 	/* To prevent loops, check if dev is not upper device to upper_dev. */
6237 	if (netdev_has_upper_dev(upper_dev, dev))
6238 		return -EBUSY;
6239 
6240 	if (netdev_has_upper_dev(dev, upper_dev))
6241 		return -EEXIST;
6242 
6243 	if (master && netdev_master_upper_dev_get(dev))
6244 		return -EBUSY;
6245 
6246 	changeupper_info.upper_dev = upper_dev;
6247 	changeupper_info.master = master;
6248 	changeupper_info.linking = true;
6249 	changeupper_info.upper_info = upper_info;
6250 
6251 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6252 					    &changeupper_info.info);
6253 	ret = notifier_to_errno(ret);
6254 	if (ret)
6255 		return ret;
6256 
6257 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
6258 						   master);
6259 	if (ret)
6260 		return ret;
6261 
6262 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6263 					    &changeupper_info.info);
6264 	ret = notifier_to_errno(ret);
6265 	if (ret)
6266 		goto rollback;
6267 
6268 	return 0;
6269 
6270 rollback:
6271 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
6272 
6273 	return ret;
6274 }
6275 
6276 /**
6277  * netdev_upper_dev_link - Add a link to the upper device
6278  * @dev: device
6279  * @upper_dev: new upper device
6280  *
6281  * Adds a link to device which is upper to this one. The caller must hold
6282  * the RTNL lock. On a failure a negative errno code is returned.
6283  * On success the reference counts are adjusted and the function
6284  * returns zero.
6285  */
6286 int netdev_upper_dev_link(struct net_device *dev,
6287 			  struct net_device *upper_dev)
6288 {
6289 	return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
6290 }
6291 EXPORT_SYMBOL(netdev_upper_dev_link);
6292 
6293 /**
6294  * netdev_master_upper_dev_link - Add a master link to the upper device
6295  * @dev: device
6296  * @upper_dev: new upper device
6297  * @upper_priv: upper device private
6298  * @upper_info: upper info to be passed down via notifier
6299  *
6300  * Adds a link to device which is upper to this one. In this case, only
6301  * one master upper device can be linked, although other non-master devices
6302  * might be linked as well. The caller must hold the RTNL lock.
6303  * On a failure a negative errno code is returned. On success the reference
6304  * counts are adjusted and the function returns zero.
6305  */
6306 int netdev_master_upper_dev_link(struct net_device *dev,
6307 				 struct net_device *upper_dev,
6308 				 void *upper_priv, void *upper_info)
6309 {
6310 	return __netdev_upper_dev_link(dev, upper_dev, true,
6311 				       upper_priv, upper_info);
6312 }
6313 EXPORT_SYMBOL(netdev_master_upper_dev_link);
6314 
6315 /**
6316  * netdev_upper_dev_unlink - Removes a link to upper device
6317  * @dev: device
6318  * @upper_dev: new upper device
6319  *
6320  * Removes a link to device which is upper to this one. The caller must hold
6321  * the RTNL lock.
6322  */
6323 void netdev_upper_dev_unlink(struct net_device *dev,
6324 			     struct net_device *upper_dev)
6325 {
6326 	struct netdev_notifier_changeupper_info changeupper_info;
6327 
6328 	ASSERT_RTNL();
6329 
6330 	changeupper_info.upper_dev = upper_dev;
6331 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
6332 	changeupper_info.linking = false;
6333 
6334 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6335 				      &changeupper_info.info);
6336 
6337 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
6338 
6339 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6340 				      &changeupper_info.info);
6341 }
6342 EXPORT_SYMBOL(netdev_upper_dev_unlink);
6343 
6344 /**
6345  * netdev_bonding_info_change - Dispatch event about slave change
6346  * @dev: device
6347  * @bonding_info: info to dispatch
6348  *
6349  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6350  * The caller must hold the RTNL lock.
6351  */
6352 void netdev_bonding_info_change(struct net_device *dev,
6353 				struct netdev_bonding_info *bonding_info)
6354 {
6355 	struct netdev_notifier_bonding_info	info;
6356 
6357 	memcpy(&info.bonding_info, bonding_info,
6358 	       sizeof(struct netdev_bonding_info));
6359 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
6360 				      &info.info);
6361 }
6362 EXPORT_SYMBOL(netdev_bonding_info_change);
6363 
6364 static void netdev_adjacent_add_links(struct net_device *dev)
6365 {
6366 	struct netdev_adjacent *iter;
6367 
6368 	struct net *net = dev_net(dev);
6369 
6370 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
6371 		if (!net_eq(net, dev_net(iter->dev)))
6372 			continue;
6373 		netdev_adjacent_sysfs_add(iter->dev, dev,
6374 					  &iter->dev->adj_list.lower);
6375 		netdev_adjacent_sysfs_add(dev, iter->dev,
6376 					  &dev->adj_list.upper);
6377 	}
6378 
6379 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
6380 		if (!net_eq(net, dev_net(iter->dev)))
6381 			continue;
6382 		netdev_adjacent_sysfs_add(iter->dev, dev,
6383 					  &iter->dev->adj_list.upper);
6384 		netdev_adjacent_sysfs_add(dev, iter->dev,
6385 					  &dev->adj_list.lower);
6386 	}
6387 }
6388 
6389 static void netdev_adjacent_del_links(struct net_device *dev)
6390 {
6391 	struct netdev_adjacent *iter;
6392 
6393 	struct net *net = dev_net(dev);
6394 
6395 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
6396 		if (!net_eq(net, dev_net(iter->dev)))
6397 			continue;
6398 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
6399 					  &iter->dev->adj_list.lower);
6400 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
6401 					  &dev->adj_list.upper);
6402 	}
6403 
6404 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
6405 		if (!net_eq(net, dev_net(iter->dev)))
6406 			continue;
6407 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
6408 					  &iter->dev->adj_list.upper);
6409 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
6410 					  &dev->adj_list.lower);
6411 	}
6412 }
6413 
6414 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
6415 {
6416 	struct netdev_adjacent *iter;
6417 
6418 	struct net *net = dev_net(dev);
6419 
6420 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
6421 		if (!net_eq(net, dev_net(iter->dev)))
6422 			continue;
6423 		netdev_adjacent_sysfs_del(iter->dev, oldname,
6424 					  &iter->dev->adj_list.lower);
6425 		netdev_adjacent_sysfs_add(iter->dev, dev,
6426 					  &iter->dev->adj_list.lower);
6427 	}
6428 
6429 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
6430 		if (!net_eq(net, dev_net(iter->dev)))
6431 			continue;
6432 		netdev_adjacent_sysfs_del(iter->dev, oldname,
6433 					  &iter->dev->adj_list.upper);
6434 		netdev_adjacent_sysfs_add(iter->dev, dev,
6435 					  &iter->dev->adj_list.upper);
6436 	}
6437 }
6438 
6439 void *netdev_lower_dev_get_private(struct net_device *dev,
6440 				   struct net_device *lower_dev)
6441 {
6442 	struct netdev_adjacent *lower;
6443 
6444 	if (!lower_dev)
6445 		return NULL;
6446 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
6447 	if (!lower)
6448 		return NULL;
6449 
6450 	return lower->private;
6451 }
6452 EXPORT_SYMBOL(netdev_lower_dev_get_private);
6453 
6454 
6455 int dev_get_nest_level(struct net_device *dev)
6456 {
6457 	struct net_device *lower = NULL;
6458 	struct list_head *iter;
6459 	int max_nest = -1;
6460 	int nest;
6461 
6462 	ASSERT_RTNL();
6463 
6464 	netdev_for_each_lower_dev(dev, lower, iter) {
6465 		nest = dev_get_nest_level(lower);
6466 		if (max_nest < nest)
6467 			max_nest = nest;
6468 	}
6469 
6470 	return max_nest + 1;
6471 }
6472 EXPORT_SYMBOL(dev_get_nest_level);
6473 
6474 /**
6475  * netdev_lower_change - Dispatch event about lower device state change
6476  * @lower_dev: device
6477  * @lower_state_info: state to dispatch
6478  *
6479  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6480  * The caller must hold the RTNL lock.
6481  */
6482 void netdev_lower_state_changed(struct net_device *lower_dev,
6483 				void *lower_state_info)
6484 {
6485 	struct netdev_notifier_changelowerstate_info changelowerstate_info;
6486 
6487 	ASSERT_RTNL();
6488 	changelowerstate_info.lower_state_info = lower_state_info;
6489 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
6490 				      &changelowerstate_info.info);
6491 }
6492 EXPORT_SYMBOL(netdev_lower_state_changed);
6493 
6494 static void dev_change_rx_flags(struct net_device *dev, int flags)
6495 {
6496 	const struct net_device_ops *ops = dev->netdev_ops;
6497 
6498 	if (ops->ndo_change_rx_flags)
6499 		ops->ndo_change_rx_flags(dev, flags);
6500 }
6501 
6502 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
6503 {
6504 	unsigned int old_flags = dev->flags;
6505 	kuid_t uid;
6506 	kgid_t gid;
6507 
6508 	ASSERT_RTNL();
6509 
6510 	dev->flags |= IFF_PROMISC;
6511 	dev->promiscuity += inc;
6512 	if (dev->promiscuity == 0) {
6513 		/*
6514 		 * Avoid overflow.
6515 		 * If inc causes overflow, untouch promisc and return error.
6516 		 */
6517 		if (inc < 0)
6518 			dev->flags &= ~IFF_PROMISC;
6519 		else {
6520 			dev->promiscuity -= inc;
6521 			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6522 				dev->name);
6523 			return -EOVERFLOW;
6524 		}
6525 	}
6526 	if (dev->flags != old_flags) {
6527 		pr_info("device %s %s promiscuous mode\n",
6528 			dev->name,
6529 			dev->flags & IFF_PROMISC ? "entered" : "left");
6530 		if (audit_enabled) {
6531 			current_uid_gid(&uid, &gid);
6532 			audit_log(current->audit_context, GFP_ATOMIC,
6533 				AUDIT_ANOM_PROMISCUOUS,
6534 				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6535 				dev->name, (dev->flags & IFF_PROMISC),
6536 				(old_flags & IFF_PROMISC),
6537 				from_kuid(&init_user_ns, audit_get_loginuid(current)),
6538 				from_kuid(&init_user_ns, uid),
6539 				from_kgid(&init_user_ns, gid),
6540 				audit_get_sessionid(current));
6541 		}
6542 
6543 		dev_change_rx_flags(dev, IFF_PROMISC);
6544 	}
6545 	if (notify)
6546 		__dev_notify_flags(dev, old_flags, IFF_PROMISC);
6547 	return 0;
6548 }
6549 
6550 /**
6551  *	dev_set_promiscuity	- update promiscuity count on a device
6552  *	@dev: device
6553  *	@inc: modifier
6554  *
6555  *	Add or remove promiscuity from a device. While the count in the device
6556  *	remains above zero the interface remains promiscuous. Once it hits zero
6557  *	the device reverts back to normal filtering operation. A negative inc
6558  *	value is used to drop promiscuity on the device.
6559  *	Return 0 if successful or a negative errno code on error.
6560  */
6561 int dev_set_promiscuity(struct net_device *dev, int inc)
6562 {
6563 	unsigned int old_flags = dev->flags;
6564 	int err;
6565 
6566 	err = __dev_set_promiscuity(dev, inc, true);
6567 	if (err < 0)
6568 		return err;
6569 	if (dev->flags != old_flags)
6570 		dev_set_rx_mode(dev);
6571 	return err;
6572 }
6573 EXPORT_SYMBOL(dev_set_promiscuity);
6574 
6575 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
6576 {
6577 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
6578 
6579 	ASSERT_RTNL();
6580 
6581 	dev->flags |= IFF_ALLMULTI;
6582 	dev->allmulti += inc;
6583 	if (dev->allmulti == 0) {
6584 		/*
6585 		 * Avoid overflow.
6586 		 * If inc causes overflow, untouch allmulti and return error.
6587 		 */
6588 		if (inc < 0)
6589 			dev->flags &= ~IFF_ALLMULTI;
6590 		else {
6591 			dev->allmulti -= inc;
6592 			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6593 				dev->name);
6594 			return -EOVERFLOW;
6595 		}
6596 	}
6597 	if (dev->flags ^ old_flags) {
6598 		dev_change_rx_flags(dev, IFF_ALLMULTI);
6599 		dev_set_rx_mode(dev);
6600 		if (notify)
6601 			__dev_notify_flags(dev, old_flags,
6602 					   dev->gflags ^ old_gflags);
6603 	}
6604 	return 0;
6605 }
6606 
6607 /**
6608  *	dev_set_allmulti	- update allmulti count on a device
6609  *	@dev: device
6610  *	@inc: modifier
6611  *
6612  *	Add or remove reception of all multicast frames to a device. While the
6613  *	count in the device remains above zero the interface remains listening
6614  *	to all interfaces. Once it hits zero the device reverts back to normal
6615  *	filtering operation. A negative @inc value is used to drop the counter
6616  *	when releasing a resource needing all multicasts.
6617  *	Return 0 if successful or a negative errno code on error.
6618  */
6619 
6620 int dev_set_allmulti(struct net_device *dev, int inc)
6621 {
6622 	return __dev_set_allmulti(dev, inc, true);
6623 }
6624 EXPORT_SYMBOL(dev_set_allmulti);
6625 
6626 /*
6627  *	Upload unicast and multicast address lists to device and
6628  *	configure RX filtering. When the device doesn't support unicast
6629  *	filtering it is put in promiscuous mode while unicast addresses
6630  *	are present.
6631  */
6632 void __dev_set_rx_mode(struct net_device *dev)
6633 {
6634 	const struct net_device_ops *ops = dev->netdev_ops;
6635 
6636 	/* dev_open will call this function so the list will stay sane. */
6637 	if (!(dev->flags&IFF_UP))
6638 		return;
6639 
6640 	if (!netif_device_present(dev))
6641 		return;
6642 
6643 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
6644 		/* Unicast addresses changes may only happen under the rtnl,
6645 		 * therefore calling __dev_set_promiscuity here is safe.
6646 		 */
6647 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
6648 			__dev_set_promiscuity(dev, 1, false);
6649 			dev->uc_promisc = true;
6650 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
6651 			__dev_set_promiscuity(dev, -1, false);
6652 			dev->uc_promisc = false;
6653 		}
6654 	}
6655 
6656 	if (ops->ndo_set_rx_mode)
6657 		ops->ndo_set_rx_mode(dev);
6658 }
6659 
6660 void dev_set_rx_mode(struct net_device *dev)
6661 {
6662 	netif_addr_lock_bh(dev);
6663 	__dev_set_rx_mode(dev);
6664 	netif_addr_unlock_bh(dev);
6665 }
6666 
6667 /**
6668  *	dev_get_flags - get flags reported to userspace
6669  *	@dev: device
6670  *
6671  *	Get the combination of flag bits exported through APIs to userspace.
6672  */
6673 unsigned int dev_get_flags(const struct net_device *dev)
6674 {
6675 	unsigned int flags;
6676 
6677 	flags = (dev->flags & ~(IFF_PROMISC |
6678 				IFF_ALLMULTI |
6679 				IFF_RUNNING |
6680 				IFF_LOWER_UP |
6681 				IFF_DORMANT)) |
6682 		(dev->gflags & (IFF_PROMISC |
6683 				IFF_ALLMULTI));
6684 
6685 	if (netif_running(dev)) {
6686 		if (netif_oper_up(dev))
6687 			flags |= IFF_RUNNING;
6688 		if (netif_carrier_ok(dev))
6689 			flags |= IFF_LOWER_UP;
6690 		if (netif_dormant(dev))
6691 			flags |= IFF_DORMANT;
6692 	}
6693 
6694 	return flags;
6695 }
6696 EXPORT_SYMBOL(dev_get_flags);
6697 
6698 int __dev_change_flags(struct net_device *dev, unsigned int flags)
6699 {
6700 	unsigned int old_flags = dev->flags;
6701 	int ret;
6702 
6703 	ASSERT_RTNL();
6704 
6705 	/*
6706 	 *	Set the flags on our device.
6707 	 */
6708 
6709 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6710 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6711 			       IFF_AUTOMEDIA)) |
6712 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6713 				    IFF_ALLMULTI));
6714 
6715 	/*
6716 	 *	Load in the correct multicast list now the flags have changed.
6717 	 */
6718 
6719 	if ((old_flags ^ flags) & IFF_MULTICAST)
6720 		dev_change_rx_flags(dev, IFF_MULTICAST);
6721 
6722 	dev_set_rx_mode(dev);
6723 
6724 	/*
6725 	 *	Have we downed the interface. We handle IFF_UP ourselves
6726 	 *	according to user attempts to set it, rather than blindly
6727 	 *	setting it.
6728 	 */
6729 
6730 	ret = 0;
6731 	if ((old_flags ^ flags) & IFF_UP) {
6732 		if (old_flags & IFF_UP)
6733 			__dev_close(dev);
6734 		else
6735 			ret = __dev_open(dev);
6736 	}
6737 
6738 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
6739 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
6740 		unsigned int old_flags = dev->flags;
6741 
6742 		dev->gflags ^= IFF_PROMISC;
6743 
6744 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
6745 			if (dev->flags != old_flags)
6746 				dev_set_rx_mode(dev);
6747 	}
6748 
6749 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6750 	 * is important. Some (broken) drivers set IFF_PROMISC, when
6751 	 * IFF_ALLMULTI is requested not asking us and not reporting.
6752 	 */
6753 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
6754 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6755 
6756 		dev->gflags ^= IFF_ALLMULTI;
6757 		__dev_set_allmulti(dev, inc, false);
6758 	}
6759 
6760 	return ret;
6761 }
6762 
6763 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6764 			unsigned int gchanges)
6765 {
6766 	unsigned int changes = dev->flags ^ old_flags;
6767 
6768 	if (gchanges)
6769 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
6770 
6771 	if (changes & IFF_UP) {
6772 		if (dev->flags & IFF_UP)
6773 			call_netdevice_notifiers(NETDEV_UP, dev);
6774 		else
6775 			call_netdevice_notifiers(NETDEV_DOWN, dev);
6776 	}
6777 
6778 	if (dev->flags & IFF_UP &&
6779 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6780 		struct netdev_notifier_change_info change_info;
6781 
6782 		change_info.flags_changed = changes;
6783 		call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6784 					      &change_info.info);
6785 	}
6786 }
6787 
6788 /**
6789  *	dev_change_flags - change device settings
6790  *	@dev: device
6791  *	@flags: device state flags
6792  *
6793  *	Change settings on device based state flags. The flags are
6794  *	in the userspace exported format.
6795  */
6796 int dev_change_flags(struct net_device *dev, unsigned int flags)
6797 {
6798 	int ret;
6799 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
6800 
6801 	ret = __dev_change_flags(dev, flags);
6802 	if (ret < 0)
6803 		return ret;
6804 
6805 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
6806 	__dev_notify_flags(dev, old_flags, changes);
6807 	return ret;
6808 }
6809 EXPORT_SYMBOL(dev_change_flags);
6810 
6811 int __dev_set_mtu(struct net_device *dev, int new_mtu)
6812 {
6813 	const struct net_device_ops *ops = dev->netdev_ops;
6814 
6815 	if (ops->ndo_change_mtu)
6816 		return ops->ndo_change_mtu(dev, new_mtu);
6817 
6818 	dev->mtu = new_mtu;
6819 	return 0;
6820 }
6821 EXPORT_SYMBOL(__dev_set_mtu);
6822 
6823 /**
6824  *	dev_set_mtu - Change maximum transfer unit
6825  *	@dev: device
6826  *	@new_mtu: new transfer unit
6827  *
6828  *	Change the maximum transfer size of the network device.
6829  */
6830 int dev_set_mtu(struct net_device *dev, int new_mtu)
6831 {
6832 	int err, orig_mtu;
6833 
6834 	if (new_mtu == dev->mtu)
6835 		return 0;
6836 
6837 	/* MTU must be positive, and in range */
6838 	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6839 		net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6840 				    dev->name, new_mtu, dev->min_mtu);
6841 		return -EINVAL;
6842 	}
6843 
6844 	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6845 		net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
6846 				    dev->name, new_mtu, dev->max_mtu);
6847 		return -EINVAL;
6848 	}
6849 
6850 	if (!netif_device_present(dev))
6851 		return -ENODEV;
6852 
6853 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6854 	err = notifier_to_errno(err);
6855 	if (err)
6856 		return err;
6857 
6858 	orig_mtu = dev->mtu;
6859 	err = __dev_set_mtu(dev, new_mtu);
6860 
6861 	if (!err) {
6862 		err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6863 		err = notifier_to_errno(err);
6864 		if (err) {
6865 			/* setting mtu back and notifying everyone again,
6866 			 * so that they have a chance to revert changes.
6867 			 */
6868 			__dev_set_mtu(dev, orig_mtu);
6869 			call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6870 		}
6871 	}
6872 	return err;
6873 }
6874 EXPORT_SYMBOL(dev_set_mtu);
6875 
6876 /**
6877  *	dev_set_group - Change group this device belongs to
6878  *	@dev: device
6879  *	@new_group: group this device should belong to
6880  */
6881 void dev_set_group(struct net_device *dev, int new_group)
6882 {
6883 	dev->group = new_group;
6884 }
6885 EXPORT_SYMBOL(dev_set_group);
6886 
6887 /**
6888  *	dev_set_mac_address - Change Media Access Control Address
6889  *	@dev: device
6890  *	@sa: new address
6891  *
6892  *	Change the hardware (MAC) address of the device
6893  */
6894 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6895 {
6896 	const struct net_device_ops *ops = dev->netdev_ops;
6897 	int err;
6898 
6899 	if (!ops->ndo_set_mac_address)
6900 		return -EOPNOTSUPP;
6901 	if (sa->sa_family != dev->type)
6902 		return -EINVAL;
6903 	if (!netif_device_present(dev))
6904 		return -ENODEV;
6905 	err = ops->ndo_set_mac_address(dev, sa);
6906 	if (err)
6907 		return err;
6908 	dev->addr_assign_type = NET_ADDR_SET;
6909 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
6910 	add_device_randomness(dev->dev_addr, dev->addr_len);
6911 	return 0;
6912 }
6913 EXPORT_SYMBOL(dev_set_mac_address);
6914 
6915 /**
6916  *	dev_change_carrier - Change device carrier
6917  *	@dev: device
6918  *	@new_carrier: new value
6919  *
6920  *	Change device carrier
6921  */
6922 int dev_change_carrier(struct net_device *dev, bool new_carrier)
6923 {
6924 	const struct net_device_ops *ops = dev->netdev_ops;
6925 
6926 	if (!ops->ndo_change_carrier)
6927 		return -EOPNOTSUPP;
6928 	if (!netif_device_present(dev))
6929 		return -ENODEV;
6930 	return ops->ndo_change_carrier(dev, new_carrier);
6931 }
6932 EXPORT_SYMBOL(dev_change_carrier);
6933 
6934 /**
6935  *	dev_get_phys_port_id - Get device physical port ID
6936  *	@dev: device
6937  *	@ppid: port ID
6938  *
6939  *	Get device physical port ID
6940  */
6941 int dev_get_phys_port_id(struct net_device *dev,
6942 			 struct netdev_phys_item_id *ppid)
6943 {
6944 	const struct net_device_ops *ops = dev->netdev_ops;
6945 
6946 	if (!ops->ndo_get_phys_port_id)
6947 		return -EOPNOTSUPP;
6948 	return ops->ndo_get_phys_port_id(dev, ppid);
6949 }
6950 EXPORT_SYMBOL(dev_get_phys_port_id);
6951 
6952 /**
6953  *	dev_get_phys_port_name - Get device physical port name
6954  *	@dev: device
6955  *	@name: port name
6956  *	@len: limit of bytes to copy to name
6957  *
6958  *	Get device physical port name
6959  */
6960 int dev_get_phys_port_name(struct net_device *dev,
6961 			   char *name, size_t len)
6962 {
6963 	const struct net_device_ops *ops = dev->netdev_ops;
6964 
6965 	if (!ops->ndo_get_phys_port_name)
6966 		return -EOPNOTSUPP;
6967 	return ops->ndo_get_phys_port_name(dev, name, len);
6968 }
6969 EXPORT_SYMBOL(dev_get_phys_port_name);
6970 
6971 /**
6972  *	dev_change_proto_down - update protocol port state information
6973  *	@dev: device
6974  *	@proto_down: new value
6975  *
6976  *	This info can be used by switch drivers to set the phys state of the
6977  *	port.
6978  */
6979 int dev_change_proto_down(struct net_device *dev, bool proto_down)
6980 {
6981 	const struct net_device_ops *ops = dev->netdev_ops;
6982 
6983 	if (!ops->ndo_change_proto_down)
6984 		return -EOPNOTSUPP;
6985 	if (!netif_device_present(dev))
6986 		return -ENODEV;
6987 	return ops->ndo_change_proto_down(dev, proto_down);
6988 }
6989 EXPORT_SYMBOL(dev_change_proto_down);
6990 
6991 u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id)
6992 {
6993 	struct netdev_xdp xdp;
6994 
6995 	memset(&xdp, 0, sizeof(xdp));
6996 	xdp.command = XDP_QUERY_PROG;
6997 
6998 	/* Query must always succeed. */
6999 	WARN_ON(xdp_op(dev, &xdp) < 0);
7000 	if (prog_id)
7001 		*prog_id = xdp.prog_id;
7002 
7003 	return xdp.prog_attached;
7004 }
7005 
7006 static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
7007 			   struct netlink_ext_ack *extack, u32 flags,
7008 			   struct bpf_prog *prog)
7009 {
7010 	struct netdev_xdp xdp;
7011 
7012 	memset(&xdp, 0, sizeof(xdp));
7013 	if (flags & XDP_FLAGS_HW_MODE)
7014 		xdp.command = XDP_SETUP_PROG_HW;
7015 	else
7016 		xdp.command = XDP_SETUP_PROG;
7017 	xdp.extack = extack;
7018 	xdp.flags = flags;
7019 	xdp.prog = prog;
7020 
7021 	return xdp_op(dev, &xdp);
7022 }
7023 
7024 /**
7025  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
7026  *	@dev: device
7027  *	@extack: netlink extended ack
7028  *	@fd: new program fd or negative value to clear
7029  *	@flags: xdp-related flags
7030  *
7031  *	Set or clear a bpf program for a device
7032  */
7033 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7034 		      int fd, u32 flags)
7035 {
7036 	const struct net_device_ops *ops = dev->netdev_ops;
7037 	struct bpf_prog *prog = NULL;
7038 	xdp_op_t xdp_op, xdp_chk;
7039 	int err;
7040 
7041 	ASSERT_RTNL();
7042 
7043 	xdp_op = xdp_chk = ops->ndo_xdp;
7044 	if (!xdp_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
7045 		return -EOPNOTSUPP;
7046 	if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
7047 		xdp_op = generic_xdp_install;
7048 	if (xdp_op == xdp_chk)
7049 		xdp_chk = generic_xdp_install;
7050 
7051 	if (fd >= 0) {
7052 		if (xdp_chk && __dev_xdp_attached(dev, xdp_chk, NULL))
7053 			return -EEXIST;
7054 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
7055 		    __dev_xdp_attached(dev, xdp_op, NULL))
7056 			return -EBUSY;
7057 
7058 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
7059 		if (IS_ERR(prog))
7060 			return PTR_ERR(prog);
7061 	}
7062 
7063 	err = dev_xdp_install(dev, xdp_op, extack, flags, prog);
7064 	if (err < 0 && prog)
7065 		bpf_prog_put(prog);
7066 
7067 	return err;
7068 }
7069 
7070 /**
7071  *	dev_new_index	-	allocate an ifindex
7072  *	@net: the applicable net namespace
7073  *
7074  *	Returns a suitable unique value for a new device interface
7075  *	number.  The caller must hold the rtnl semaphore or the
7076  *	dev_base_lock to be sure it remains unique.
7077  */
7078 static int dev_new_index(struct net *net)
7079 {
7080 	int ifindex = net->ifindex;
7081 
7082 	for (;;) {
7083 		if (++ifindex <= 0)
7084 			ifindex = 1;
7085 		if (!__dev_get_by_index(net, ifindex))
7086 			return net->ifindex = ifindex;
7087 	}
7088 }
7089 
7090 /* Delayed registration/unregisteration */
7091 static LIST_HEAD(net_todo_list);
7092 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
7093 
7094 static void net_set_todo(struct net_device *dev)
7095 {
7096 	list_add_tail(&dev->todo_list, &net_todo_list);
7097 	dev_net(dev)->dev_unreg_count++;
7098 }
7099 
7100 static void rollback_registered_many(struct list_head *head)
7101 {
7102 	struct net_device *dev, *tmp;
7103 	LIST_HEAD(close_head);
7104 
7105 	BUG_ON(dev_boot_phase);
7106 	ASSERT_RTNL();
7107 
7108 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
7109 		/* Some devices call without registering
7110 		 * for initialization unwind. Remove those
7111 		 * devices and proceed with the remaining.
7112 		 */
7113 		if (dev->reg_state == NETREG_UNINITIALIZED) {
7114 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7115 				 dev->name, dev);
7116 
7117 			WARN_ON(1);
7118 			list_del(&dev->unreg_list);
7119 			continue;
7120 		}
7121 		dev->dismantle = true;
7122 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
7123 	}
7124 
7125 	/* If device is running, close it first. */
7126 	list_for_each_entry(dev, head, unreg_list)
7127 		list_add_tail(&dev->close_list, &close_head);
7128 	dev_close_many(&close_head, true);
7129 
7130 	list_for_each_entry(dev, head, unreg_list) {
7131 		/* And unlink it from device chain. */
7132 		unlist_netdevice(dev);
7133 
7134 		dev->reg_state = NETREG_UNREGISTERING;
7135 	}
7136 	flush_all_backlogs();
7137 
7138 	synchronize_net();
7139 
7140 	list_for_each_entry(dev, head, unreg_list) {
7141 		struct sk_buff *skb = NULL;
7142 
7143 		/* Shutdown queueing discipline. */
7144 		dev_shutdown(dev);
7145 
7146 
7147 		/* Notify protocols, that we are about to destroy
7148 		 * this device. They should clean all the things.
7149 		 */
7150 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7151 
7152 		if (!dev->rtnl_link_ops ||
7153 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7154 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
7155 						     GFP_KERNEL);
7156 
7157 		/*
7158 		 *	Flush the unicast and multicast chains
7159 		 */
7160 		dev_uc_flush(dev);
7161 		dev_mc_flush(dev);
7162 
7163 		if (dev->netdev_ops->ndo_uninit)
7164 			dev->netdev_ops->ndo_uninit(dev);
7165 
7166 		if (skb)
7167 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
7168 
7169 		/* Notifier chain MUST detach us all upper devices. */
7170 		WARN_ON(netdev_has_any_upper_dev(dev));
7171 		WARN_ON(netdev_has_any_lower_dev(dev));
7172 
7173 		/* Remove entries from kobject tree */
7174 		netdev_unregister_kobject(dev);
7175 #ifdef CONFIG_XPS
7176 		/* Remove XPS queueing entries */
7177 		netif_reset_xps_queues_gt(dev, 0);
7178 #endif
7179 	}
7180 
7181 	synchronize_net();
7182 
7183 	list_for_each_entry(dev, head, unreg_list)
7184 		dev_put(dev);
7185 }
7186 
7187 static void rollback_registered(struct net_device *dev)
7188 {
7189 	LIST_HEAD(single);
7190 
7191 	list_add(&dev->unreg_list, &single);
7192 	rollback_registered_many(&single);
7193 	list_del(&single);
7194 }
7195 
7196 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7197 	struct net_device *upper, netdev_features_t features)
7198 {
7199 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7200 	netdev_features_t feature;
7201 	int feature_bit;
7202 
7203 	for_each_netdev_feature(&upper_disables, feature_bit) {
7204 		feature = __NETIF_F_BIT(feature_bit);
7205 		if (!(upper->wanted_features & feature)
7206 		    && (features & feature)) {
7207 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7208 				   &feature, upper->name);
7209 			features &= ~feature;
7210 		}
7211 	}
7212 
7213 	return features;
7214 }
7215 
7216 static void netdev_sync_lower_features(struct net_device *upper,
7217 	struct net_device *lower, netdev_features_t features)
7218 {
7219 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7220 	netdev_features_t feature;
7221 	int feature_bit;
7222 
7223 	for_each_netdev_feature(&upper_disables, feature_bit) {
7224 		feature = __NETIF_F_BIT(feature_bit);
7225 		if (!(features & feature) && (lower->features & feature)) {
7226 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7227 				   &feature, lower->name);
7228 			lower->wanted_features &= ~feature;
7229 			netdev_update_features(lower);
7230 
7231 			if (unlikely(lower->features & feature))
7232 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7233 					    &feature, lower->name);
7234 		}
7235 	}
7236 }
7237 
7238 static netdev_features_t netdev_fix_features(struct net_device *dev,
7239 	netdev_features_t features)
7240 {
7241 	/* Fix illegal checksum combinations */
7242 	if ((features & NETIF_F_HW_CSUM) &&
7243 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
7244 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
7245 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7246 	}
7247 
7248 	/* TSO requires that SG is present as well. */
7249 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
7250 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
7251 		features &= ~NETIF_F_ALL_TSO;
7252 	}
7253 
7254 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7255 					!(features & NETIF_F_IP_CSUM)) {
7256 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7257 		features &= ~NETIF_F_TSO;
7258 		features &= ~NETIF_F_TSO_ECN;
7259 	}
7260 
7261 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7262 					 !(features & NETIF_F_IPV6_CSUM)) {
7263 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7264 		features &= ~NETIF_F_TSO6;
7265 	}
7266 
7267 	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7268 	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7269 		features &= ~NETIF_F_TSO_MANGLEID;
7270 
7271 	/* TSO ECN requires that TSO is present as well. */
7272 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7273 		features &= ~NETIF_F_TSO_ECN;
7274 
7275 	/* Software GSO depends on SG. */
7276 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
7277 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
7278 		features &= ~NETIF_F_GSO;
7279 	}
7280 
7281 	/* GSO partial features require GSO partial be set */
7282 	if ((features & dev->gso_partial_features) &&
7283 	    !(features & NETIF_F_GSO_PARTIAL)) {
7284 		netdev_dbg(dev,
7285 			   "Dropping partially supported GSO features since no GSO partial.\n");
7286 		features &= ~dev->gso_partial_features;
7287 	}
7288 
7289 	return features;
7290 }
7291 
7292 int __netdev_update_features(struct net_device *dev)
7293 {
7294 	struct net_device *upper, *lower;
7295 	netdev_features_t features;
7296 	struct list_head *iter;
7297 	int err = -1;
7298 
7299 	ASSERT_RTNL();
7300 
7301 	features = netdev_get_wanted_features(dev);
7302 
7303 	if (dev->netdev_ops->ndo_fix_features)
7304 		features = dev->netdev_ops->ndo_fix_features(dev, features);
7305 
7306 	/* driver might be less strict about feature dependencies */
7307 	features = netdev_fix_features(dev, features);
7308 
7309 	/* some features can't be enabled if they're off an an upper device */
7310 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
7311 		features = netdev_sync_upper_features(dev, upper, features);
7312 
7313 	if (dev->features == features)
7314 		goto sync_lower;
7315 
7316 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7317 		&dev->features, &features);
7318 
7319 	if (dev->netdev_ops->ndo_set_features)
7320 		err = dev->netdev_ops->ndo_set_features(dev, features);
7321 	else
7322 		err = 0;
7323 
7324 	if (unlikely(err < 0)) {
7325 		netdev_err(dev,
7326 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
7327 			err, &features, &dev->features);
7328 		/* return non-0 since some features might have changed and
7329 		 * it's better to fire a spurious notification than miss it
7330 		 */
7331 		return -1;
7332 	}
7333 
7334 sync_lower:
7335 	/* some features must be disabled on lower devices when disabled
7336 	 * on an upper device (think: bonding master or bridge)
7337 	 */
7338 	netdev_for_each_lower_dev(dev, lower, iter)
7339 		netdev_sync_lower_features(dev, lower, features);
7340 
7341 	if (!err) {
7342 		netdev_features_t diff = features ^ dev->features;
7343 
7344 		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
7345 			/* udp_tunnel_{get,drop}_rx_info both need
7346 			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7347 			 * device, or they won't do anything.
7348 			 * Thus we need to update dev->features
7349 			 * *before* calling udp_tunnel_get_rx_info,
7350 			 * but *after* calling udp_tunnel_drop_rx_info.
7351 			 */
7352 			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
7353 				dev->features = features;
7354 				udp_tunnel_get_rx_info(dev);
7355 			} else {
7356 				udp_tunnel_drop_rx_info(dev);
7357 			}
7358 		}
7359 
7360 		dev->features = features;
7361 	}
7362 
7363 	return err < 0 ? 0 : 1;
7364 }
7365 
7366 /**
7367  *	netdev_update_features - recalculate device features
7368  *	@dev: the device to check
7369  *
7370  *	Recalculate dev->features set and send notifications if it
7371  *	has changed. Should be called after driver or hardware dependent
7372  *	conditions might have changed that influence the features.
7373  */
7374 void netdev_update_features(struct net_device *dev)
7375 {
7376 	if (__netdev_update_features(dev))
7377 		netdev_features_change(dev);
7378 }
7379 EXPORT_SYMBOL(netdev_update_features);
7380 
7381 /**
7382  *	netdev_change_features - recalculate device features
7383  *	@dev: the device to check
7384  *
7385  *	Recalculate dev->features set and send notifications even
7386  *	if they have not changed. Should be called instead of
7387  *	netdev_update_features() if also dev->vlan_features might
7388  *	have changed to allow the changes to be propagated to stacked
7389  *	VLAN devices.
7390  */
7391 void netdev_change_features(struct net_device *dev)
7392 {
7393 	__netdev_update_features(dev);
7394 	netdev_features_change(dev);
7395 }
7396 EXPORT_SYMBOL(netdev_change_features);
7397 
7398 /**
7399  *	netif_stacked_transfer_operstate -	transfer operstate
7400  *	@rootdev: the root or lower level device to transfer state from
7401  *	@dev: the device to transfer operstate to
7402  *
7403  *	Transfer operational state from root to device. This is normally
7404  *	called when a stacking relationship exists between the root
7405  *	device and the device(a leaf device).
7406  */
7407 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7408 					struct net_device *dev)
7409 {
7410 	if (rootdev->operstate == IF_OPER_DORMANT)
7411 		netif_dormant_on(dev);
7412 	else
7413 		netif_dormant_off(dev);
7414 
7415 	if (netif_carrier_ok(rootdev))
7416 		netif_carrier_on(dev);
7417 	else
7418 		netif_carrier_off(dev);
7419 }
7420 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7421 
7422 #ifdef CONFIG_SYSFS
7423 static int netif_alloc_rx_queues(struct net_device *dev)
7424 {
7425 	unsigned int i, count = dev->num_rx_queues;
7426 	struct netdev_rx_queue *rx;
7427 	size_t sz = count * sizeof(*rx);
7428 
7429 	BUG_ON(count < 1);
7430 
7431 	rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
7432 	if (!rx)
7433 		return -ENOMEM;
7434 
7435 	dev->_rx = rx;
7436 
7437 	for (i = 0; i < count; i++)
7438 		rx[i].dev = dev;
7439 	return 0;
7440 }
7441 #endif
7442 
7443 static void netdev_init_one_queue(struct net_device *dev,
7444 				  struct netdev_queue *queue, void *_unused)
7445 {
7446 	/* Initialize queue lock */
7447 	spin_lock_init(&queue->_xmit_lock);
7448 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7449 	queue->xmit_lock_owner = -1;
7450 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
7451 	queue->dev = dev;
7452 #ifdef CONFIG_BQL
7453 	dql_init(&queue->dql, HZ);
7454 #endif
7455 }
7456 
7457 static void netif_free_tx_queues(struct net_device *dev)
7458 {
7459 	kvfree(dev->_tx);
7460 }
7461 
7462 static int netif_alloc_netdev_queues(struct net_device *dev)
7463 {
7464 	unsigned int count = dev->num_tx_queues;
7465 	struct netdev_queue *tx;
7466 	size_t sz = count * sizeof(*tx);
7467 
7468 	if (count < 1 || count > 0xffff)
7469 		return -EINVAL;
7470 
7471 	tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
7472 	if (!tx)
7473 		return -ENOMEM;
7474 
7475 	dev->_tx = tx;
7476 
7477 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7478 	spin_lock_init(&dev->tx_global_lock);
7479 
7480 	return 0;
7481 }
7482 
7483 void netif_tx_stop_all_queues(struct net_device *dev)
7484 {
7485 	unsigned int i;
7486 
7487 	for (i = 0; i < dev->num_tx_queues; i++) {
7488 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
7489 
7490 		netif_tx_stop_queue(txq);
7491 	}
7492 }
7493 EXPORT_SYMBOL(netif_tx_stop_all_queues);
7494 
7495 /**
7496  *	register_netdevice	- register a network device
7497  *	@dev: device to register
7498  *
7499  *	Take a completed network device structure and add it to the kernel
7500  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7501  *	chain. 0 is returned on success. A negative errno code is returned
7502  *	on a failure to set up the device, or if the name is a duplicate.
7503  *
7504  *	Callers must hold the rtnl semaphore. You may want
7505  *	register_netdev() instead of this.
7506  *
7507  *	BUGS:
7508  *	The locking appears insufficient to guarantee two parallel registers
7509  *	will not get the same name.
7510  */
7511 
7512 int register_netdevice(struct net_device *dev)
7513 {
7514 	int ret;
7515 	struct net *net = dev_net(dev);
7516 
7517 	BUG_ON(dev_boot_phase);
7518 	ASSERT_RTNL();
7519 
7520 	might_sleep();
7521 
7522 	/* When net_device's are persistent, this will be fatal. */
7523 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
7524 	BUG_ON(!net);
7525 
7526 	spin_lock_init(&dev->addr_list_lock);
7527 	netdev_set_addr_lockdep_class(dev);
7528 
7529 	ret = dev_get_valid_name(net, dev, dev->name);
7530 	if (ret < 0)
7531 		goto out;
7532 
7533 	/* Init, if this function is available */
7534 	if (dev->netdev_ops->ndo_init) {
7535 		ret = dev->netdev_ops->ndo_init(dev);
7536 		if (ret) {
7537 			if (ret > 0)
7538 				ret = -EIO;
7539 			goto out;
7540 		}
7541 	}
7542 
7543 	if (((dev->hw_features | dev->features) &
7544 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
7545 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7546 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7547 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7548 		ret = -EINVAL;
7549 		goto err_uninit;
7550 	}
7551 
7552 	ret = -EBUSY;
7553 	if (!dev->ifindex)
7554 		dev->ifindex = dev_new_index(net);
7555 	else if (__dev_get_by_index(net, dev->ifindex))
7556 		goto err_uninit;
7557 
7558 	/* Transfer changeable features to wanted_features and enable
7559 	 * software offloads (GSO and GRO).
7560 	 */
7561 	dev->hw_features |= NETIF_F_SOFT_FEATURES;
7562 	dev->features |= NETIF_F_SOFT_FEATURES;
7563 
7564 	if (dev->netdev_ops->ndo_udp_tunnel_add) {
7565 		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7566 		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7567 	}
7568 
7569 	dev->wanted_features = dev->features & dev->hw_features;
7570 
7571 	if (!(dev->flags & IFF_LOOPBACK))
7572 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
7573 
7574 	/* If IPv4 TCP segmentation offload is supported we should also
7575 	 * allow the device to enable segmenting the frame with the option
7576 	 * of ignoring a static IP ID value.  This doesn't enable the
7577 	 * feature itself but allows the user to enable it later.
7578 	 */
7579 	if (dev->hw_features & NETIF_F_TSO)
7580 		dev->hw_features |= NETIF_F_TSO_MANGLEID;
7581 	if (dev->vlan_features & NETIF_F_TSO)
7582 		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7583 	if (dev->mpls_features & NETIF_F_TSO)
7584 		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7585 	if (dev->hw_enc_features & NETIF_F_TSO)
7586 		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
7587 
7588 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
7589 	 */
7590 	dev->vlan_features |= NETIF_F_HIGHDMA;
7591 
7592 	/* Make NETIF_F_SG inheritable to tunnel devices.
7593 	 */
7594 	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
7595 
7596 	/* Make NETIF_F_SG inheritable to MPLS.
7597 	 */
7598 	dev->mpls_features |= NETIF_F_SG;
7599 
7600 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7601 	ret = notifier_to_errno(ret);
7602 	if (ret)
7603 		goto err_uninit;
7604 
7605 	ret = netdev_register_kobject(dev);
7606 	if (ret)
7607 		goto err_uninit;
7608 	dev->reg_state = NETREG_REGISTERED;
7609 
7610 	__netdev_update_features(dev);
7611 
7612 	/*
7613 	 *	Default initial state at registry is that the
7614 	 *	device is present.
7615 	 */
7616 
7617 	set_bit(__LINK_STATE_PRESENT, &dev->state);
7618 
7619 	linkwatch_init_dev(dev);
7620 
7621 	dev_init_scheduler(dev);
7622 	dev_hold(dev);
7623 	list_netdevice(dev);
7624 	add_device_randomness(dev->dev_addr, dev->addr_len);
7625 
7626 	/* If the device has permanent device address, driver should
7627 	 * set dev_addr and also addr_assign_type should be set to
7628 	 * NET_ADDR_PERM (default value).
7629 	 */
7630 	if (dev->addr_assign_type == NET_ADDR_PERM)
7631 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7632 
7633 	/* Notify protocols, that a new device appeared. */
7634 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
7635 	ret = notifier_to_errno(ret);
7636 	if (ret) {
7637 		rollback_registered(dev);
7638 		dev->reg_state = NETREG_UNREGISTERED;
7639 	}
7640 	/*
7641 	 *	Prevent userspace races by waiting until the network
7642 	 *	device is fully setup before sending notifications.
7643 	 */
7644 	if (!dev->rtnl_link_ops ||
7645 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7646 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7647 
7648 out:
7649 	return ret;
7650 
7651 err_uninit:
7652 	if (dev->netdev_ops->ndo_uninit)
7653 		dev->netdev_ops->ndo_uninit(dev);
7654 	if (dev->priv_destructor)
7655 		dev->priv_destructor(dev);
7656 	goto out;
7657 }
7658 EXPORT_SYMBOL(register_netdevice);
7659 
7660 /**
7661  *	init_dummy_netdev	- init a dummy network device for NAPI
7662  *	@dev: device to init
7663  *
7664  *	This takes a network device structure and initialize the minimum
7665  *	amount of fields so it can be used to schedule NAPI polls without
7666  *	registering a full blown interface. This is to be used by drivers
7667  *	that need to tie several hardware interfaces to a single NAPI
7668  *	poll scheduler due to HW limitations.
7669  */
7670 int init_dummy_netdev(struct net_device *dev)
7671 {
7672 	/* Clear everything. Note we don't initialize spinlocks
7673 	 * are they aren't supposed to be taken by any of the
7674 	 * NAPI code and this dummy netdev is supposed to be
7675 	 * only ever used for NAPI polls
7676 	 */
7677 	memset(dev, 0, sizeof(struct net_device));
7678 
7679 	/* make sure we BUG if trying to hit standard
7680 	 * register/unregister code path
7681 	 */
7682 	dev->reg_state = NETREG_DUMMY;
7683 
7684 	/* NAPI wants this */
7685 	INIT_LIST_HEAD(&dev->napi_list);
7686 
7687 	/* a dummy interface is started by default */
7688 	set_bit(__LINK_STATE_PRESENT, &dev->state);
7689 	set_bit(__LINK_STATE_START, &dev->state);
7690 
7691 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
7692 	 * because users of this 'device' dont need to change
7693 	 * its refcount.
7694 	 */
7695 
7696 	return 0;
7697 }
7698 EXPORT_SYMBOL_GPL(init_dummy_netdev);
7699 
7700 
7701 /**
7702  *	register_netdev	- register a network device
7703  *	@dev: device to register
7704  *
7705  *	Take a completed network device structure and add it to the kernel
7706  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7707  *	chain. 0 is returned on success. A negative errno code is returned
7708  *	on a failure to set up the device, or if the name is a duplicate.
7709  *
7710  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
7711  *	and expands the device name if you passed a format string to
7712  *	alloc_netdev.
7713  */
7714 int register_netdev(struct net_device *dev)
7715 {
7716 	int err;
7717 
7718 	rtnl_lock();
7719 	err = register_netdevice(dev);
7720 	rtnl_unlock();
7721 	return err;
7722 }
7723 EXPORT_SYMBOL(register_netdev);
7724 
7725 int netdev_refcnt_read(const struct net_device *dev)
7726 {
7727 	int i, refcnt = 0;
7728 
7729 	for_each_possible_cpu(i)
7730 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7731 	return refcnt;
7732 }
7733 EXPORT_SYMBOL(netdev_refcnt_read);
7734 
7735 /**
7736  * netdev_wait_allrefs - wait until all references are gone.
7737  * @dev: target net_device
7738  *
7739  * This is called when unregistering network devices.
7740  *
7741  * Any protocol or device that holds a reference should register
7742  * for netdevice notification, and cleanup and put back the
7743  * reference if they receive an UNREGISTER event.
7744  * We can get stuck here if buggy protocols don't correctly
7745  * call dev_put.
7746  */
7747 static void netdev_wait_allrefs(struct net_device *dev)
7748 {
7749 	unsigned long rebroadcast_time, warning_time;
7750 	int refcnt;
7751 
7752 	linkwatch_forget_dev(dev);
7753 
7754 	rebroadcast_time = warning_time = jiffies;
7755 	refcnt = netdev_refcnt_read(dev);
7756 
7757 	while (refcnt != 0) {
7758 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
7759 			rtnl_lock();
7760 
7761 			/* Rebroadcast unregister notification */
7762 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7763 
7764 			__rtnl_unlock();
7765 			rcu_barrier();
7766 			rtnl_lock();
7767 
7768 			call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7769 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7770 				     &dev->state)) {
7771 				/* We must not have linkwatch events
7772 				 * pending on unregister. If this
7773 				 * happens, we simply run the queue
7774 				 * unscheduled, resulting in a noop
7775 				 * for this device.
7776 				 */
7777 				linkwatch_run_queue();
7778 			}
7779 
7780 			__rtnl_unlock();
7781 
7782 			rebroadcast_time = jiffies;
7783 		}
7784 
7785 		msleep(250);
7786 
7787 		refcnt = netdev_refcnt_read(dev);
7788 
7789 		if (time_after(jiffies, warning_time + 10 * HZ)) {
7790 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7791 				 dev->name, refcnt);
7792 			warning_time = jiffies;
7793 		}
7794 	}
7795 }
7796 
7797 /* The sequence is:
7798  *
7799  *	rtnl_lock();
7800  *	...
7801  *	register_netdevice(x1);
7802  *	register_netdevice(x2);
7803  *	...
7804  *	unregister_netdevice(y1);
7805  *	unregister_netdevice(y2);
7806  *      ...
7807  *	rtnl_unlock();
7808  *	free_netdev(y1);
7809  *	free_netdev(y2);
7810  *
7811  * We are invoked by rtnl_unlock().
7812  * This allows us to deal with problems:
7813  * 1) We can delete sysfs objects which invoke hotplug
7814  *    without deadlocking with linkwatch via keventd.
7815  * 2) Since we run with the RTNL semaphore not held, we can sleep
7816  *    safely in order to wait for the netdev refcnt to drop to zero.
7817  *
7818  * We must not return until all unregister events added during
7819  * the interval the lock was held have been completed.
7820  */
7821 void netdev_run_todo(void)
7822 {
7823 	struct list_head list;
7824 
7825 	/* Snapshot list, allow later requests */
7826 	list_replace_init(&net_todo_list, &list);
7827 
7828 	__rtnl_unlock();
7829 
7830 
7831 	/* Wait for rcu callbacks to finish before next phase */
7832 	if (!list_empty(&list))
7833 		rcu_barrier();
7834 
7835 	while (!list_empty(&list)) {
7836 		struct net_device *dev
7837 			= list_first_entry(&list, struct net_device, todo_list);
7838 		list_del(&dev->todo_list);
7839 
7840 		rtnl_lock();
7841 		call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7842 		__rtnl_unlock();
7843 
7844 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7845 			pr_err("network todo '%s' but state %d\n",
7846 			       dev->name, dev->reg_state);
7847 			dump_stack();
7848 			continue;
7849 		}
7850 
7851 		dev->reg_state = NETREG_UNREGISTERED;
7852 
7853 		netdev_wait_allrefs(dev);
7854 
7855 		/* paranoia */
7856 		BUG_ON(netdev_refcnt_read(dev));
7857 		BUG_ON(!list_empty(&dev->ptype_all));
7858 		BUG_ON(!list_empty(&dev->ptype_specific));
7859 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
7860 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
7861 		WARN_ON(dev->dn_ptr);
7862 
7863 		if (dev->priv_destructor)
7864 			dev->priv_destructor(dev);
7865 		if (dev->needs_free_netdev)
7866 			free_netdev(dev);
7867 
7868 		/* Report a network device has been unregistered */
7869 		rtnl_lock();
7870 		dev_net(dev)->dev_unreg_count--;
7871 		__rtnl_unlock();
7872 		wake_up(&netdev_unregistering_wq);
7873 
7874 		/* Free network device */
7875 		kobject_put(&dev->dev.kobj);
7876 	}
7877 }
7878 
7879 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7880  * all the same fields in the same order as net_device_stats, with only
7881  * the type differing, but rtnl_link_stats64 may have additional fields
7882  * at the end for newer counters.
7883  */
7884 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7885 			     const struct net_device_stats *netdev_stats)
7886 {
7887 #if BITS_PER_LONG == 64
7888 	BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
7889 	memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
7890 	/* zero out counters that only exist in rtnl_link_stats64 */
7891 	memset((char *)stats64 + sizeof(*netdev_stats), 0,
7892 	       sizeof(*stats64) - sizeof(*netdev_stats));
7893 #else
7894 	size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
7895 	const unsigned long *src = (const unsigned long *)netdev_stats;
7896 	u64 *dst = (u64 *)stats64;
7897 
7898 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
7899 	for (i = 0; i < n; i++)
7900 		dst[i] = src[i];
7901 	/* zero out counters that only exist in rtnl_link_stats64 */
7902 	memset((char *)stats64 + n * sizeof(u64), 0,
7903 	       sizeof(*stats64) - n * sizeof(u64));
7904 #endif
7905 }
7906 EXPORT_SYMBOL(netdev_stats_to_stats64);
7907 
7908 /**
7909  *	dev_get_stats	- get network device statistics
7910  *	@dev: device to get statistics from
7911  *	@storage: place to store stats
7912  *
7913  *	Get network statistics from device. Return @storage.
7914  *	The device driver may provide its own method by setting
7915  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7916  *	otherwise the internal statistics structure is used.
7917  */
7918 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7919 					struct rtnl_link_stats64 *storage)
7920 {
7921 	const struct net_device_ops *ops = dev->netdev_ops;
7922 
7923 	if (ops->ndo_get_stats64) {
7924 		memset(storage, 0, sizeof(*storage));
7925 		ops->ndo_get_stats64(dev, storage);
7926 	} else if (ops->ndo_get_stats) {
7927 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
7928 	} else {
7929 		netdev_stats_to_stats64(storage, &dev->stats);
7930 	}
7931 	storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
7932 	storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
7933 	storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
7934 	return storage;
7935 }
7936 EXPORT_SYMBOL(dev_get_stats);
7937 
7938 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
7939 {
7940 	struct netdev_queue *queue = dev_ingress_queue(dev);
7941 
7942 #ifdef CONFIG_NET_CLS_ACT
7943 	if (queue)
7944 		return queue;
7945 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7946 	if (!queue)
7947 		return NULL;
7948 	netdev_init_one_queue(dev, queue, NULL);
7949 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
7950 	queue->qdisc_sleeping = &noop_qdisc;
7951 	rcu_assign_pointer(dev->ingress_queue, queue);
7952 #endif
7953 	return queue;
7954 }
7955 
7956 static const struct ethtool_ops default_ethtool_ops;
7957 
7958 void netdev_set_default_ethtool_ops(struct net_device *dev,
7959 				    const struct ethtool_ops *ops)
7960 {
7961 	if (dev->ethtool_ops == &default_ethtool_ops)
7962 		dev->ethtool_ops = ops;
7963 }
7964 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7965 
7966 void netdev_freemem(struct net_device *dev)
7967 {
7968 	char *addr = (char *)dev - dev->padded;
7969 
7970 	kvfree(addr);
7971 }
7972 
7973 /**
7974  * alloc_netdev_mqs - allocate network device
7975  * @sizeof_priv: size of private data to allocate space for
7976  * @name: device name format string
7977  * @name_assign_type: origin of device name
7978  * @setup: callback to initialize device
7979  * @txqs: the number of TX subqueues to allocate
7980  * @rxqs: the number of RX subqueues to allocate
7981  *
7982  * Allocates a struct net_device with private data area for driver use
7983  * and performs basic initialization.  Also allocates subqueue structs
7984  * for each queue on the device.
7985  */
7986 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7987 		unsigned char name_assign_type,
7988 		void (*setup)(struct net_device *),
7989 		unsigned int txqs, unsigned int rxqs)
7990 {
7991 	struct net_device *dev;
7992 	size_t alloc_size;
7993 	struct net_device *p;
7994 
7995 	BUG_ON(strlen(name) >= sizeof(dev->name));
7996 
7997 	if (txqs < 1) {
7998 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
7999 		return NULL;
8000 	}
8001 
8002 #ifdef CONFIG_SYSFS
8003 	if (rxqs < 1) {
8004 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
8005 		return NULL;
8006 	}
8007 #endif
8008 
8009 	alloc_size = sizeof(struct net_device);
8010 	if (sizeof_priv) {
8011 		/* ensure 32-byte alignment of private area */
8012 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
8013 		alloc_size += sizeof_priv;
8014 	}
8015 	/* ensure 32-byte alignment of whole construct */
8016 	alloc_size += NETDEV_ALIGN - 1;
8017 
8018 	p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8019 	if (!p)
8020 		return NULL;
8021 
8022 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
8023 	dev->padded = (char *)dev - (char *)p;
8024 
8025 	dev->pcpu_refcnt = alloc_percpu(int);
8026 	if (!dev->pcpu_refcnt)
8027 		goto free_dev;
8028 
8029 	if (dev_addr_init(dev))
8030 		goto free_pcpu;
8031 
8032 	dev_mc_init(dev);
8033 	dev_uc_init(dev);
8034 
8035 	dev_net_set(dev, &init_net);
8036 
8037 	dev->gso_max_size = GSO_MAX_SIZE;
8038 	dev->gso_max_segs = GSO_MAX_SEGS;
8039 
8040 	INIT_LIST_HEAD(&dev->napi_list);
8041 	INIT_LIST_HEAD(&dev->unreg_list);
8042 	INIT_LIST_HEAD(&dev->close_list);
8043 	INIT_LIST_HEAD(&dev->link_watch_list);
8044 	INIT_LIST_HEAD(&dev->adj_list.upper);
8045 	INIT_LIST_HEAD(&dev->adj_list.lower);
8046 	INIT_LIST_HEAD(&dev->ptype_all);
8047 	INIT_LIST_HEAD(&dev->ptype_specific);
8048 #ifdef CONFIG_NET_SCHED
8049 	hash_init(dev->qdisc_hash);
8050 #endif
8051 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8052 	setup(dev);
8053 
8054 	if (!dev->tx_queue_len) {
8055 		dev->priv_flags |= IFF_NO_QUEUE;
8056 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
8057 	}
8058 
8059 	dev->num_tx_queues = txqs;
8060 	dev->real_num_tx_queues = txqs;
8061 	if (netif_alloc_netdev_queues(dev))
8062 		goto free_all;
8063 
8064 #ifdef CONFIG_SYSFS
8065 	dev->num_rx_queues = rxqs;
8066 	dev->real_num_rx_queues = rxqs;
8067 	if (netif_alloc_rx_queues(dev))
8068 		goto free_all;
8069 #endif
8070 
8071 	strcpy(dev->name, name);
8072 	dev->name_assign_type = name_assign_type;
8073 	dev->group = INIT_NETDEV_GROUP;
8074 	if (!dev->ethtool_ops)
8075 		dev->ethtool_ops = &default_ethtool_ops;
8076 
8077 	nf_hook_ingress_init(dev);
8078 
8079 	return dev;
8080 
8081 free_all:
8082 	free_netdev(dev);
8083 	return NULL;
8084 
8085 free_pcpu:
8086 	free_percpu(dev->pcpu_refcnt);
8087 free_dev:
8088 	netdev_freemem(dev);
8089 	return NULL;
8090 }
8091 EXPORT_SYMBOL(alloc_netdev_mqs);
8092 
8093 /**
8094  * free_netdev - free network device
8095  * @dev: device
8096  *
8097  * This function does the last stage of destroying an allocated device
8098  * interface. The reference to the device object is released. If this
8099  * is the last reference then it will be freed.Must be called in process
8100  * context.
8101  */
8102 void free_netdev(struct net_device *dev)
8103 {
8104 	struct napi_struct *p, *n;
8105 	struct bpf_prog *prog;
8106 
8107 	might_sleep();
8108 	netif_free_tx_queues(dev);
8109 #ifdef CONFIG_SYSFS
8110 	kvfree(dev->_rx);
8111 #endif
8112 
8113 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
8114 
8115 	/* Flush device addresses */
8116 	dev_addr_flush(dev);
8117 
8118 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8119 		netif_napi_del(p);
8120 
8121 	free_percpu(dev->pcpu_refcnt);
8122 	dev->pcpu_refcnt = NULL;
8123 
8124 	prog = rcu_dereference_protected(dev->xdp_prog, 1);
8125 	if (prog) {
8126 		bpf_prog_put(prog);
8127 		static_key_slow_dec(&generic_xdp_needed);
8128 	}
8129 
8130 	/*  Compatibility with error handling in drivers */
8131 	if (dev->reg_state == NETREG_UNINITIALIZED) {
8132 		netdev_freemem(dev);
8133 		return;
8134 	}
8135 
8136 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8137 	dev->reg_state = NETREG_RELEASED;
8138 
8139 	/* will free via device release */
8140 	put_device(&dev->dev);
8141 }
8142 EXPORT_SYMBOL(free_netdev);
8143 
8144 /**
8145  *	synchronize_net -  Synchronize with packet receive processing
8146  *
8147  *	Wait for packets currently being received to be done.
8148  *	Does not block later packets from starting.
8149  */
8150 void synchronize_net(void)
8151 {
8152 	might_sleep();
8153 	if (rtnl_is_locked())
8154 		synchronize_rcu_expedited();
8155 	else
8156 		synchronize_rcu();
8157 }
8158 EXPORT_SYMBOL(synchronize_net);
8159 
8160 /**
8161  *	unregister_netdevice_queue - remove device from the kernel
8162  *	@dev: device
8163  *	@head: list
8164  *
8165  *	This function shuts down a device interface and removes it
8166  *	from the kernel tables.
8167  *	If head not NULL, device is queued to be unregistered later.
8168  *
8169  *	Callers must hold the rtnl semaphore.  You may want
8170  *	unregister_netdev() instead of this.
8171  */
8172 
8173 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
8174 {
8175 	ASSERT_RTNL();
8176 
8177 	if (head) {
8178 		list_move_tail(&dev->unreg_list, head);
8179 	} else {
8180 		rollback_registered(dev);
8181 		/* Finish processing unregister after unlock */
8182 		net_set_todo(dev);
8183 	}
8184 }
8185 EXPORT_SYMBOL(unregister_netdevice_queue);
8186 
8187 /**
8188  *	unregister_netdevice_many - unregister many devices
8189  *	@head: list of devices
8190  *
8191  *  Note: As most callers use a stack allocated list_head,
8192  *  we force a list_del() to make sure stack wont be corrupted later.
8193  */
8194 void unregister_netdevice_many(struct list_head *head)
8195 {
8196 	struct net_device *dev;
8197 
8198 	if (!list_empty(head)) {
8199 		rollback_registered_many(head);
8200 		list_for_each_entry(dev, head, unreg_list)
8201 			net_set_todo(dev);
8202 		list_del(head);
8203 	}
8204 }
8205 EXPORT_SYMBOL(unregister_netdevice_many);
8206 
8207 /**
8208  *	unregister_netdev - remove device from the kernel
8209  *	@dev: device
8210  *
8211  *	This function shuts down a device interface and removes it
8212  *	from the kernel tables.
8213  *
8214  *	This is just a wrapper for unregister_netdevice that takes
8215  *	the rtnl semaphore.  In general you want to use this and not
8216  *	unregister_netdevice.
8217  */
8218 void unregister_netdev(struct net_device *dev)
8219 {
8220 	rtnl_lock();
8221 	unregister_netdevice(dev);
8222 	rtnl_unlock();
8223 }
8224 EXPORT_SYMBOL(unregister_netdev);
8225 
8226 /**
8227  *	dev_change_net_namespace - move device to different nethost namespace
8228  *	@dev: device
8229  *	@net: network namespace
8230  *	@pat: If not NULL name pattern to try if the current device name
8231  *	      is already taken in the destination network namespace.
8232  *
8233  *	This function shuts down a device interface and moves it
8234  *	to a new network namespace. On success 0 is returned, on
8235  *	a failure a netagive errno code is returned.
8236  *
8237  *	Callers must hold the rtnl semaphore.
8238  */
8239 
8240 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8241 {
8242 	int err;
8243 
8244 	ASSERT_RTNL();
8245 
8246 	/* Don't allow namespace local devices to be moved. */
8247 	err = -EINVAL;
8248 	if (dev->features & NETIF_F_NETNS_LOCAL)
8249 		goto out;
8250 
8251 	/* Ensure the device has been registrered */
8252 	if (dev->reg_state != NETREG_REGISTERED)
8253 		goto out;
8254 
8255 	/* Get out if there is nothing todo */
8256 	err = 0;
8257 	if (net_eq(dev_net(dev), net))
8258 		goto out;
8259 
8260 	/* Pick the destination device name, and ensure
8261 	 * we can use it in the destination network namespace.
8262 	 */
8263 	err = -EEXIST;
8264 	if (__dev_get_by_name(net, dev->name)) {
8265 		/* We get here if we can't use the current device name */
8266 		if (!pat)
8267 			goto out;
8268 		if (dev_get_valid_name(net, dev, pat) < 0)
8269 			goto out;
8270 	}
8271 
8272 	/*
8273 	 * And now a mini version of register_netdevice unregister_netdevice.
8274 	 */
8275 
8276 	/* If device is running close it first. */
8277 	dev_close(dev);
8278 
8279 	/* And unlink it from device chain */
8280 	err = -ENODEV;
8281 	unlist_netdevice(dev);
8282 
8283 	synchronize_net();
8284 
8285 	/* Shutdown queueing discipline. */
8286 	dev_shutdown(dev);
8287 
8288 	/* Notify protocols, that we are about to destroy
8289 	 * this device. They should clean all the things.
8290 	 *
8291 	 * Note that dev->reg_state stays at NETREG_REGISTERED.
8292 	 * This is wanted because this way 8021q and macvlan know
8293 	 * the device is just moving and can keep their slaves up.
8294 	 */
8295 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8296 	rcu_barrier();
8297 	call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
8298 	rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
8299 
8300 	/*
8301 	 *	Flush the unicast and multicast chains
8302 	 */
8303 	dev_uc_flush(dev);
8304 	dev_mc_flush(dev);
8305 
8306 	/* Send a netdev-removed uevent to the old namespace */
8307 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
8308 	netdev_adjacent_del_links(dev);
8309 
8310 	/* Actually switch the network namespace */
8311 	dev_net_set(dev, net);
8312 
8313 	/* If there is an ifindex conflict assign a new one */
8314 	if (__dev_get_by_index(net, dev->ifindex))
8315 		dev->ifindex = dev_new_index(net);
8316 
8317 	/* Send a netdev-add uevent to the new namespace */
8318 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
8319 	netdev_adjacent_add_links(dev);
8320 
8321 	/* Fixup kobjects */
8322 	err = device_rename(&dev->dev, dev->name);
8323 	WARN_ON(err);
8324 
8325 	/* Add the device back in the hashes */
8326 	list_netdevice(dev);
8327 
8328 	/* Notify protocols, that a new device appeared. */
8329 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
8330 
8331 	/*
8332 	 *	Prevent userspace races by waiting until the network
8333 	 *	device is fully setup before sending notifications.
8334 	 */
8335 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
8336 
8337 	synchronize_net();
8338 	err = 0;
8339 out:
8340 	return err;
8341 }
8342 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
8343 
8344 static int dev_cpu_dead(unsigned int oldcpu)
8345 {
8346 	struct sk_buff **list_skb;
8347 	struct sk_buff *skb;
8348 	unsigned int cpu;
8349 	struct softnet_data *sd, *oldsd, *remsd = NULL;
8350 
8351 	local_irq_disable();
8352 	cpu = smp_processor_id();
8353 	sd = &per_cpu(softnet_data, cpu);
8354 	oldsd = &per_cpu(softnet_data, oldcpu);
8355 
8356 	/* Find end of our completion_queue. */
8357 	list_skb = &sd->completion_queue;
8358 	while (*list_skb)
8359 		list_skb = &(*list_skb)->next;
8360 	/* Append completion queue from offline CPU. */
8361 	*list_skb = oldsd->completion_queue;
8362 	oldsd->completion_queue = NULL;
8363 
8364 	/* Append output queue from offline CPU. */
8365 	if (oldsd->output_queue) {
8366 		*sd->output_queue_tailp = oldsd->output_queue;
8367 		sd->output_queue_tailp = oldsd->output_queue_tailp;
8368 		oldsd->output_queue = NULL;
8369 		oldsd->output_queue_tailp = &oldsd->output_queue;
8370 	}
8371 	/* Append NAPI poll list from offline CPU, with one exception :
8372 	 * process_backlog() must be called by cpu owning percpu backlog.
8373 	 * We properly handle process_queue & input_pkt_queue later.
8374 	 */
8375 	while (!list_empty(&oldsd->poll_list)) {
8376 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8377 							    struct napi_struct,
8378 							    poll_list);
8379 
8380 		list_del_init(&napi->poll_list);
8381 		if (napi->poll == process_backlog)
8382 			napi->state = 0;
8383 		else
8384 			____napi_schedule(sd, napi);
8385 	}
8386 
8387 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
8388 	local_irq_enable();
8389 
8390 #ifdef CONFIG_RPS
8391 	remsd = oldsd->rps_ipi_list;
8392 	oldsd->rps_ipi_list = NULL;
8393 #endif
8394 	/* send out pending IPI's on offline CPU */
8395 	net_rps_send_ipi(remsd);
8396 
8397 	/* Process offline CPU's input_pkt_queue */
8398 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
8399 		netif_rx_ni(skb);
8400 		input_queue_head_incr(oldsd);
8401 	}
8402 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
8403 		netif_rx_ni(skb);
8404 		input_queue_head_incr(oldsd);
8405 	}
8406 
8407 	return 0;
8408 }
8409 
8410 /**
8411  *	netdev_increment_features - increment feature set by one
8412  *	@all: current feature set
8413  *	@one: new feature set
8414  *	@mask: mask feature set
8415  *
8416  *	Computes a new feature set after adding a device with feature set
8417  *	@one to the master device with current feature set @all.  Will not
8418  *	enable anything that is off in @mask. Returns the new feature set.
8419  */
8420 netdev_features_t netdev_increment_features(netdev_features_t all,
8421 	netdev_features_t one, netdev_features_t mask)
8422 {
8423 	if (mask & NETIF_F_HW_CSUM)
8424 		mask |= NETIF_F_CSUM_MASK;
8425 	mask |= NETIF_F_VLAN_CHALLENGED;
8426 
8427 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
8428 	all &= one | ~NETIF_F_ALL_FOR_ALL;
8429 
8430 	/* If one device supports hw checksumming, set for all. */
8431 	if (all & NETIF_F_HW_CSUM)
8432 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
8433 
8434 	return all;
8435 }
8436 EXPORT_SYMBOL(netdev_increment_features);
8437 
8438 static struct hlist_head * __net_init netdev_create_hash(void)
8439 {
8440 	int i;
8441 	struct hlist_head *hash;
8442 
8443 	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8444 	if (hash != NULL)
8445 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
8446 			INIT_HLIST_HEAD(&hash[i]);
8447 
8448 	return hash;
8449 }
8450 
8451 /* Initialize per network namespace state */
8452 static int __net_init netdev_init(struct net *net)
8453 {
8454 	if (net != &init_net)
8455 		INIT_LIST_HEAD(&net->dev_base_head);
8456 
8457 	net->dev_name_head = netdev_create_hash();
8458 	if (net->dev_name_head == NULL)
8459 		goto err_name;
8460 
8461 	net->dev_index_head = netdev_create_hash();
8462 	if (net->dev_index_head == NULL)
8463 		goto err_idx;
8464 
8465 	return 0;
8466 
8467 err_idx:
8468 	kfree(net->dev_name_head);
8469 err_name:
8470 	return -ENOMEM;
8471 }
8472 
8473 /**
8474  *	netdev_drivername - network driver for the device
8475  *	@dev: network device
8476  *
8477  *	Determine network driver for device.
8478  */
8479 const char *netdev_drivername(const struct net_device *dev)
8480 {
8481 	const struct device_driver *driver;
8482 	const struct device *parent;
8483 	const char *empty = "";
8484 
8485 	parent = dev->dev.parent;
8486 	if (!parent)
8487 		return empty;
8488 
8489 	driver = parent->driver;
8490 	if (driver && driver->name)
8491 		return driver->name;
8492 	return empty;
8493 }
8494 
8495 static void __netdev_printk(const char *level, const struct net_device *dev,
8496 			    struct va_format *vaf)
8497 {
8498 	if (dev && dev->dev.parent) {
8499 		dev_printk_emit(level[1] - '0',
8500 				dev->dev.parent,
8501 				"%s %s %s%s: %pV",
8502 				dev_driver_string(dev->dev.parent),
8503 				dev_name(dev->dev.parent),
8504 				netdev_name(dev), netdev_reg_state(dev),
8505 				vaf);
8506 	} else if (dev) {
8507 		printk("%s%s%s: %pV",
8508 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
8509 	} else {
8510 		printk("%s(NULL net_device): %pV", level, vaf);
8511 	}
8512 }
8513 
8514 void netdev_printk(const char *level, const struct net_device *dev,
8515 		   const char *format, ...)
8516 {
8517 	struct va_format vaf;
8518 	va_list args;
8519 
8520 	va_start(args, format);
8521 
8522 	vaf.fmt = format;
8523 	vaf.va = &args;
8524 
8525 	__netdev_printk(level, dev, &vaf);
8526 
8527 	va_end(args);
8528 }
8529 EXPORT_SYMBOL(netdev_printk);
8530 
8531 #define define_netdev_printk_level(func, level)			\
8532 void func(const struct net_device *dev, const char *fmt, ...)	\
8533 {								\
8534 	struct va_format vaf;					\
8535 	va_list args;						\
8536 								\
8537 	va_start(args, fmt);					\
8538 								\
8539 	vaf.fmt = fmt;						\
8540 	vaf.va = &args;						\
8541 								\
8542 	__netdev_printk(level, dev, &vaf);			\
8543 								\
8544 	va_end(args);						\
8545 }								\
8546 EXPORT_SYMBOL(func);
8547 
8548 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8549 define_netdev_printk_level(netdev_alert, KERN_ALERT);
8550 define_netdev_printk_level(netdev_crit, KERN_CRIT);
8551 define_netdev_printk_level(netdev_err, KERN_ERR);
8552 define_netdev_printk_level(netdev_warn, KERN_WARNING);
8553 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8554 define_netdev_printk_level(netdev_info, KERN_INFO);
8555 
8556 static void __net_exit netdev_exit(struct net *net)
8557 {
8558 	kfree(net->dev_name_head);
8559 	kfree(net->dev_index_head);
8560 }
8561 
8562 static struct pernet_operations __net_initdata netdev_net_ops = {
8563 	.init = netdev_init,
8564 	.exit = netdev_exit,
8565 };
8566 
8567 static void __net_exit default_device_exit(struct net *net)
8568 {
8569 	struct net_device *dev, *aux;
8570 	/*
8571 	 * Push all migratable network devices back to the
8572 	 * initial network namespace
8573 	 */
8574 	rtnl_lock();
8575 	for_each_netdev_safe(net, dev, aux) {
8576 		int err;
8577 		char fb_name[IFNAMSIZ];
8578 
8579 		/* Ignore unmoveable devices (i.e. loopback) */
8580 		if (dev->features & NETIF_F_NETNS_LOCAL)
8581 			continue;
8582 
8583 		/* Leave virtual devices for the generic cleanup */
8584 		if (dev->rtnl_link_ops)
8585 			continue;
8586 
8587 		/* Push remaining network devices to init_net */
8588 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8589 		err = dev_change_net_namespace(dev, &init_net, fb_name);
8590 		if (err) {
8591 			pr_emerg("%s: failed to move %s to init_net: %d\n",
8592 				 __func__, dev->name, err);
8593 			BUG();
8594 		}
8595 	}
8596 	rtnl_unlock();
8597 }
8598 
8599 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8600 {
8601 	/* Return with the rtnl_lock held when there are no network
8602 	 * devices unregistering in any network namespace in net_list.
8603 	 */
8604 	struct net *net;
8605 	bool unregistering;
8606 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
8607 
8608 	add_wait_queue(&netdev_unregistering_wq, &wait);
8609 	for (;;) {
8610 		unregistering = false;
8611 		rtnl_lock();
8612 		list_for_each_entry(net, net_list, exit_list) {
8613 			if (net->dev_unreg_count > 0) {
8614 				unregistering = true;
8615 				break;
8616 			}
8617 		}
8618 		if (!unregistering)
8619 			break;
8620 		__rtnl_unlock();
8621 
8622 		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
8623 	}
8624 	remove_wait_queue(&netdev_unregistering_wq, &wait);
8625 }
8626 
8627 static void __net_exit default_device_exit_batch(struct list_head *net_list)
8628 {
8629 	/* At exit all network devices most be removed from a network
8630 	 * namespace.  Do this in the reverse order of registration.
8631 	 * Do this across as many network namespaces as possible to
8632 	 * improve batching efficiency.
8633 	 */
8634 	struct net_device *dev;
8635 	struct net *net;
8636 	LIST_HEAD(dev_kill_list);
8637 
8638 	/* To prevent network device cleanup code from dereferencing
8639 	 * loopback devices or network devices that have been freed
8640 	 * wait here for all pending unregistrations to complete,
8641 	 * before unregistring the loopback device and allowing the
8642 	 * network namespace be freed.
8643 	 *
8644 	 * The netdev todo list containing all network devices
8645 	 * unregistrations that happen in default_device_exit_batch
8646 	 * will run in the rtnl_unlock() at the end of
8647 	 * default_device_exit_batch.
8648 	 */
8649 	rtnl_lock_unregistering(net_list);
8650 	list_for_each_entry(net, net_list, exit_list) {
8651 		for_each_netdev_reverse(net, dev) {
8652 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
8653 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8654 			else
8655 				unregister_netdevice_queue(dev, &dev_kill_list);
8656 		}
8657 	}
8658 	unregister_netdevice_many(&dev_kill_list);
8659 	rtnl_unlock();
8660 }
8661 
8662 static struct pernet_operations __net_initdata default_device_ops = {
8663 	.exit = default_device_exit,
8664 	.exit_batch = default_device_exit_batch,
8665 };
8666 
8667 /*
8668  *	Initialize the DEV module. At boot time this walks the device list and
8669  *	unhooks any devices that fail to initialise (normally hardware not
8670  *	present) and leaves us with a valid list of present and active devices.
8671  *
8672  */
8673 
8674 /*
8675  *       This is called single threaded during boot, so no need
8676  *       to take the rtnl semaphore.
8677  */
8678 static int __init net_dev_init(void)
8679 {
8680 	int i, rc = -ENOMEM;
8681 
8682 	BUG_ON(!dev_boot_phase);
8683 
8684 	if (dev_proc_init())
8685 		goto out;
8686 
8687 	if (netdev_kobject_init())
8688 		goto out;
8689 
8690 	INIT_LIST_HEAD(&ptype_all);
8691 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
8692 		INIT_LIST_HEAD(&ptype_base[i]);
8693 
8694 	INIT_LIST_HEAD(&offload_base);
8695 
8696 	if (register_pernet_subsys(&netdev_net_ops))
8697 		goto out;
8698 
8699 	/*
8700 	 *	Initialise the packet receive queues.
8701 	 */
8702 
8703 	for_each_possible_cpu(i) {
8704 		struct work_struct *flush = per_cpu_ptr(&flush_works, i);
8705 		struct softnet_data *sd = &per_cpu(softnet_data, i);
8706 
8707 		INIT_WORK(flush, flush_backlog);
8708 
8709 		skb_queue_head_init(&sd->input_pkt_queue);
8710 		skb_queue_head_init(&sd->process_queue);
8711 		INIT_LIST_HEAD(&sd->poll_list);
8712 		sd->output_queue_tailp = &sd->output_queue;
8713 #ifdef CONFIG_RPS
8714 		sd->csd.func = rps_trigger_softirq;
8715 		sd->csd.info = sd;
8716 		sd->cpu = i;
8717 #endif
8718 
8719 		sd->backlog.poll = process_backlog;
8720 		sd->backlog.weight = weight_p;
8721 	}
8722 
8723 	dev_boot_phase = 0;
8724 
8725 	/* The loopback device is special if any other network devices
8726 	 * is present in a network namespace the loopback device must
8727 	 * be present. Since we now dynamically allocate and free the
8728 	 * loopback device ensure this invariant is maintained by
8729 	 * keeping the loopback device as the first device on the
8730 	 * list of network devices.  Ensuring the loopback devices
8731 	 * is the first device that appears and the last network device
8732 	 * that disappears.
8733 	 */
8734 	if (register_pernet_device(&loopback_net_ops))
8735 		goto out;
8736 
8737 	if (register_pernet_device(&default_device_ops))
8738 		goto out;
8739 
8740 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8741 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
8742 
8743 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8744 				       NULL, dev_cpu_dead);
8745 	WARN_ON(rc < 0);
8746 	rc = 0;
8747 out:
8748 	return rc;
8749 }
8750 
8751 subsys_initcall(net_dev_init);
8752