xref: /openbmc/linux/net/core/sock.c (revision 179dd8c0)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic socket support routines. Memory allocators, socket lock/release
7  *		handler for protocols to use and generic option handler.
8  *
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  *
85  *
86  *		This program is free software; you can redistribute it and/or
87  *		modify it under the terms of the GNU General Public License
88  *		as published by the Free Software Foundation; either version
89  *		2 of the License, or (at your option) any later version.
90  */
91 
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93 
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
120 
121 #include <asm/uaccess.h>
122 
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134 #include <linux/sock_diag.h>
135 
136 #include <linux/filter.h>
137 
138 #include <trace/events/sock.h>
139 
140 #ifdef CONFIG_INET
141 #include <net/tcp.h>
142 #endif
143 
144 #include <net/busy_poll.h>
145 
146 static DEFINE_MUTEX(proto_list_mutex);
147 static LIST_HEAD(proto_list);
148 
149 /**
150  * sk_ns_capable - General socket capability test
151  * @sk: Socket to use a capability on or through
152  * @user_ns: The user namespace of the capability to use
153  * @cap: The capability to use
154  *
155  * Test to see if the opener of the socket had when the socket was
156  * created and the current process has the capability @cap in the user
157  * namespace @user_ns.
158  */
159 bool sk_ns_capable(const struct sock *sk,
160 		   struct user_namespace *user_ns, int cap)
161 {
162 	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
163 		ns_capable(user_ns, cap);
164 }
165 EXPORT_SYMBOL(sk_ns_capable);
166 
167 /**
168  * sk_capable - Socket global capability test
169  * @sk: Socket to use a capability on or through
170  * @cap: The global capability to use
171  *
172  * Test to see if the opener of the socket had when the socket was
173  * created and the current process has the capability @cap in all user
174  * namespaces.
175  */
176 bool sk_capable(const struct sock *sk, int cap)
177 {
178 	return sk_ns_capable(sk, &init_user_ns, cap);
179 }
180 EXPORT_SYMBOL(sk_capable);
181 
182 /**
183  * sk_net_capable - Network namespace socket capability test
184  * @sk: Socket to use a capability on or through
185  * @cap: The capability to use
186  *
187  * Test to see if the opener of the socket had when the socket was created
188  * and the current process has the capability @cap over the network namespace
189  * the socket is a member of.
190  */
191 bool sk_net_capable(const struct sock *sk, int cap)
192 {
193 	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
194 }
195 EXPORT_SYMBOL(sk_net_capable);
196 
197 
198 #ifdef CONFIG_MEMCG_KMEM
199 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
200 {
201 	struct proto *proto;
202 	int ret = 0;
203 
204 	mutex_lock(&proto_list_mutex);
205 	list_for_each_entry(proto, &proto_list, node) {
206 		if (proto->init_cgroup) {
207 			ret = proto->init_cgroup(memcg, ss);
208 			if (ret)
209 				goto out;
210 		}
211 	}
212 
213 	mutex_unlock(&proto_list_mutex);
214 	return ret;
215 out:
216 	list_for_each_entry_continue_reverse(proto, &proto_list, node)
217 		if (proto->destroy_cgroup)
218 			proto->destroy_cgroup(memcg);
219 	mutex_unlock(&proto_list_mutex);
220 	return ret;
221 }
222 
223 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
224 {
225 	struct proto *proto;
226 
227 	mutex_lock(&proto_list_mutex);
228 	list_for_each_entry_reverse(proto, &proto_list, node)
229 		if (proto->destroy_cgroup)
230 			proto->destroy_cgroup(memcg);
231 	mutex_unlock(&proto_list_mutex);
232 }
233 #endif
234 
235 /*
236  * Each address family might have different locking rules, so we have
237  * one slock key per address family:
238  */
239 static struct lock_class_key af_family_keys[AF_MAX];
240 static struct lock_class_key af_family_slock_keys[AF_MAX];
241 
242 #if defined(CONFIG_MEMCG_KMEM)
243 struct static_key memcg_socket_limit_enabled;
244 EXPORT_SYMBOL(memcg_socket_limit_enabled);
245 #endif
246 
247 /*
248  * Make lock validator output more readable. (we pre-construct these
249  * strings build-time, so that runtime initialization of socket
250  * locks is fast):
251  */
252 static const char *const af_family_key_strings[AF_MAX+1] = {
253   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
254   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
255   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
256   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
257   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
258   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
259   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
260   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
261   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
262   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
263   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
264   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
265   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
266   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
267 };
268 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
269   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
270   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
271   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
272   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
273   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
274   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
275   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
276   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
277   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
278   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
279   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
280   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
281   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
282   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
283 };
284 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
285   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
286   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
287   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
288   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
289   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
290   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
291   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
292   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
293   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
294   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
295   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
296   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
297   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
298   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
299 };
300 
301 /*
302  * sk_callback_lock locking rules are per-address-family,
303  * so split the lock classes by using a per-AF key:
304  */
305 static struct lock_class_key af_callback_keys[AF_MAX];
306 
307 /* Take into consideration the size of the struct sk_buff overhead in the
308  * determination of these values, since that is non-constant across
309  * platforms.  This makes socket queueing behavior and performance
310  * not depend upon such differences.
311  */
312 #define _SK_MEM_PACKETS		256
313 #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
314 #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315 #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
316 
317 /* Run time adjustable parameters. */
318 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
319 EXPORT_SYMBOL(sysctl_wmem_max);
320 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
321 EXPORT_SYMBOL(sysctl_rmem_max);
322 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
323 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
324 
325 /* Maximal space eaten by iovec or ancillary data plus some space */
326 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
327 EXPORT_SYMBOL(sysctl_optmem_max);
328 
329 int sysctl_tstamp_allow_data __read_mostly = 1;
330 
331 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
332 EXPORT_SYMBOL_GPL(memalloc_socks);
333 
334 /**
335  * sk_set_memalloc - sets %SOCK_MEMALLOC
336  * @sk: socket to set it on
337  *
338  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
339  * It's the responsibility of the admin to adjust min_free_kbytes
340  * to meet the requirements
341  */
342 void sk_set_memalloc(struct sock *sk)
343 {
344 	sock_set_flag(sk, SOCK_MEMALLOC);
345 	sk->sk_allocation |= __GFP_MEMALLOC;
346 	static_key_slow_inc(&memalloc_socks);
347 }
348 EXPORT_SYMBOL_GPL(sk_set_memalloc);
349 
350 void sk_clear_memalloc(struct sock *sk)
351 {
352 	sock_reset_flag(sk, SOCK_MEMALLOC);
353 	sk->sk_allocation &= ~__GFP_MEMALLOC;
354 	static_key_slow_dec(&memalloc_socks);
355 
356 	/*
357 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
358 	 * progress of swapping. SOCK_MEMALLOC may be cleared while
359 	 * it has rmem allocations due to the last swapfile being deactivated
360 	 * but there is a risk that the socket is unusable due to exceeding
361 	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
362 	 */
363 	sk_mem_reclaim(sk);
364 }
365 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
366 
367 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
368 {
369 	int ret;
370 	unsigned long pflags = current->flags;
371 
372 	/* these should have been dropped before queueing */
373 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
374 
375 	current->flags |= PF_MEMALLOC;
376 	ret = sk->sk_backlog_rcv(sk, skb);
377 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
378 
379 	return ret;
380 }
381 EXPORT_SYMBOL(__sk_backlog_rcv);
382 
383 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
384 {
385 	struct timeval tv;
386 
387 	if (optlen < sizeof(tv))
388 		return -EINVAL;
389 	if (copy_from_user(&tv, optval, sizeof(tv)))
390 		return -EFAULT;
391 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
392 		return -EDOM;
393 
394 	if (tv.tv_sec < 0) {
395 		static int warned __read_mostly;
396 
397 		*timeo_p = 0;
398 		if (warned < 10 && net_ratelimit()) {
399 			warned++;
400 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
401 				__func__, current->comm, task_pid_nr(current));
402 		}
403 		return 0;
404 	}
405 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
406 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
407 		return 0;
408 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
409 		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
410 	return 0;
411 }
412 
413 static void sock_warn_obsolete_bsdism(const char *name)
414 {
415 	static int warned;
416 	static char warncomm[TASK_COMM_LEN];
417 	if (strcmp(warncomm, current->comm) && warned < 5) {
418 		strcpy(warncomm,  current->comm);
419 		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
420 			warncomm, name);
421 		warned++;
422 	}
423 }
424 
425 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
426 
427 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
428 {
429 	if (sk->sk_flags & flags) {
430 		sk->sk_flags &= ~flags;
431 		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
432 			net_disable_timestamp();
433 	}
434 }
435 
436 
437 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
438 {
439 	int err;
440 	unsigned long flags;
441 	struct sk_buff_head *list = &sk->sk_receive_queue;
442 
443 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
444 		atomic_inc(&sk->sk_drops);
445 		trace_sock_rcvqueue_full(sk, skb);
446 		return -ENOMEM;
447 	}
448 
449 	err = sk_filter(sk, skb);
450 	if (err)
451 		return err;
452 
453 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
454 		atomic_inc(&sk->sk_drops);
455 		return -ENOBUFS;
456 	}
457 
458 	skb->dev = NULL;
459 	skb_set_owner_r(skb, sk);
460 
461 	/* we escape from rcu protected region, make sure we dont leak
462 	 * a norefcounted dst
463 	 */
464 	skb_dst_force(skb);
465 
466 	spin_lock_irqsave(&list->lock, flags);
467 	sock_skb_set_dropcount(sk, skb);
468 	__skb_queue_tail(list, skb);
469 	spin_unlock_irqrestore(&list->lock, flags);
470 
471 	if (!sock_flag(sk, SOCK_DEAD))
472 		sk->sk_data_ready(sk);
473 	return 0;
474 }
475 EXPORT_SYMBOL(sock_queue_rcv_skb);
476 
477 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
478 {
479 	int rc = NET_RX_SUCCESS;
480 
481 	if (sk_filter(sk, skb))
482 		goto discard_and_relse;
483 
484 	skb->dev = NULL;
485 
486 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
487 		atomic_inc(&sk->sk_drops);
488 		goto discard_and_relse;
489 	}
490 	if (nested)
491 		bh_lock_sock_nested(sk);
492 	else
493 		bh_lock_sock(sk);
494 	if (!sock_owned_by_user(sk)) {
495 		/*
496 		 * trylock + unlock semantics:
497 		 */
498 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
499 
500 		rc = sk_backlog_rcv(sk, skb);
501 
502 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
503 	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
504 		bh_unlock_sock(sk);
505 		atomic_inc(&sk->sk_drops);
506 		goto discard_and_relse;
507 	}
508 
509 	bh_unlock_sock(sk);
510 out:
511 	sock_put(sk);
512 	return rc;
513 discard_and_relse:
514 	kfree_skb(skb);
515 	goto out;
516 }
517 EXPORT_SYMBOL(sk_receive_skb);
518 
519 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
520 {
521 	struct dst_entry *dst = __sk_dst_get(sk);
522 
523 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
524 		sk_tx_queue_clear(sk);
525 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
526 		dst_release(dst);
527 		return NULL;
528 	}
529 
530 	return dst;
531 }
532 EXPORT_SYMBOL(__sk_dst_check);
533 
534 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
535 {
536 	struct dst_entry *dst = sk_dst_get(sk);
537 
538 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
539 		sk_dst_reset(sk);
540 		dst_release(dst);
541 		return NULL;
542 	}
543 
544 	return dst;
545 }
546 EXPORT_SYMBOL(sk_dst_check);
547 
548 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
549 				int optlen)
550 {
551 	int ret = -ENOPROTOOPT;
552 #ifdef CONFIG_NETDEVICES
553 	struct net *net = sock_net(sk);
554 	char devname[IFNAMSIZ];
555 	int index;
556 
557 	/* Sorry... */
558 	ret = -EPERM;
559 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
560 		goto out;
561 
562 	ret = -EINVAL;
563 	if (optlen < 0)
564 		goto out;
565 
566 	/* Bind this socket to a particular device like "eth0",
567 	 * as specified in the passed interface name. If the
568 	 * name is "" or the option length is zero the socket
569 	 * is not bound.
570 	 */
571 	if (optlen > IFNAMSIZ - 1)
572 		optlen = IFNAMSIZ - 1;
573 	memset(devname, 0, sizeof(devname));
574 
575 	ret = -EFAULT;
576 	if (copy_from_user(devname, optval, optlen))
577 		goto out;
578 
579 	index = 0;
580 	if (devname[0] != '\0') {
581 		struct net_device *dev;
582 
583 		rcu_read_lock();
584 		dev = dev_get_by_name_rcu(net, devname);
585 		if (dev)
586 			index = dev->ifindex;
587 		rcu_read_unlock();
588 		ret = -ENODEV;
589 		if (!dev)
590 			goto out;
591 	}
592 
593 	lock_sock(sk);
594 	sk->sk_bound_dev_if = index;
595 	sk_dst_reset(sk);
596 	release_sock(sk);
597 
598 	ret = 0;
599 
600 out:
601 #endif
602 
603 	return ret;
604 }
605 
606 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
607 				int __user *optlen, int len)
608 {
609 	int ret = -ENOPROTOOPT;
610 #ifdef CONFIG_NETDEVICES
611 	struct net *net = sock_net(sk);
612 	char devname[IFNAMSIZ];
613 
614 	if (sk->sk_bound_dev_if == 0) {
615 		len = 0;
616 		goto zero;
617 	}
618 
619 	ret = -EINVAL;
620 	if (len < IFNAMSIZ)
621 		goto out;
622 
623 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
624 	if (ret)
625 		goto out;
626 
627 	len = strlen(devname) + 1;
628 
629 	ret = -EFAULT;
630 	if (copy_to_user(optval, devname, len))
631 		goto out;
632 
633 zero:
634 	ret = -EFAULT;
635 	if (put_user(len, optlen))
636 		goto out;
637 
638 	ret = 0;
639 
640 out:
641 #endif
642 
643 	return ret;
644 }
645 
646 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
647 {
648 	if (valbool)
649 		sock_set_flag(sk, bit);
650 	else
651 		sock_reset_flag(sk, bit);
652 }
653 
654 bool sk_mc_loop(struct sock *sk)
655 {
656 	if (dev_recursion_level())
657 		return false;
658 	if (!sk)
659 		return true;
660 	switch (sk->sk_family) {
661 	case AF_INET:
662 		return inet_sk(sk)->mc_loop;
663 #if IS_ENABLED(CONFIG_IPV6)
664 	case AF_INET6:
665 		return inet6_sk(sk)->mc_loop;
666 #endif
667 	}
668 	WARN_ON(1);
669 	return true;
670 }
671 EXPORT_SYMBOL(sk_mc_loop);
672 
673 /*
674  *	This is meant for all protocols to use and covers goings on
675  *	at the socket level. Everything here is generic.
676  */
677 
678 int sock_setsockopt(struct socket *sock, int level, int optname,
679 		    char __user *optval, unsigned int optlen)
680 {
681 	struct sock *sk = sock->sk;
682 	int val;
683 	int valbool;
684 	struct linger ling;
685 	int ret = 0;
686 
687 	/*
688 	 *	Options without arguments
689 	 */
690 
691 	if (optname == SO_BINDTODEVICE)
692 		return sock_setbindtodevice(sk, optval, optlen);
693 
694 	if (optlen < sizeof(int))
695 		return -EINVAL;
696 
697 	if (get_user(val, (int __user *)optval))
698 		return -EFAULT;
699 
700 	valbool = val ? 1 : 0;
701 
702 	lock_sock(sk);
703 
704 	switch (optname) {
705 	case SO_DEBUG:
706 		if (val && !capable(CAP_NET_ADMIN))
707 			ret = -EACCES;
708 		else
709 			sock_valbool_flag(sk, SOCK_DBG, valbool);
710 		break;
711 	case SO_REUSEADDR:
712 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
713 		break;
714 	case SO_REUSEPORT:
715 		sk->sk_reuseport = valbool;
716 		break;
717 	case SO_TYPE:
718 	case SO_PROTOCOL:
719 	case SO_DOMAIN:
720 	case SO_ERROR:
721 		ret = -ENOPROTOOPT;
722 		break;
723 	case SO_DONTROUTE:
724 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
725 		break;
726 	case SO_BROADCAST:
727 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
728 		break;
729 	case SO_SNDBUF:
730 		/* Don't error on this BSD doesn't and if you think
731 		 * about it this is right. Otherwise apps have to
732 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
733 		 * are treated in BSD as hints
734 		 */
735 		val = min_t(u32, val, sysctl_wmem_max);
736 set_sndbuf:
737 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
738 		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
739 		/* Wake up sending tasks if we upped the value. */
740 		sk->sk_write_space(sk);
741 		break;
742 
743 	case SO_SNDBUFFORCE:
744 		if (!capable(CAP_NET_ADMIN)) {
745 			ret = -EPERM;
746 			break;
747 		}
748 		goto set_sndbuf;
749 
750 	case SO_RCVBUF:
751 		/* Don't error on this BSD doesn't and if you think
752 		 * about it this is right. Otherwise apps have to
753 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
754 		 * are treated in BSD as hints
755 		 */
756 		val = min_t(u32, val, sysctl_rmem_max);
757 set_rcvbuf:
758 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
759 		/*
760 		 * We double it on the way in to account for
761 		 * "struct sk_buff" etc. overhead.   Applications
762 		 * assume that the SO_RCVBUF setting they make will
763 		 * allow that much actual data to be received on that
764 		 * socket.
765 		 *
766 		 * Applications are unaware that "struct sk_buff" and
767 		 * other overheads allocate from the receive buffer
768 		 * during socket buffer allocation.
769 		 *
770 		 * And after considering the possible alternatives,
771 		 * returning the value we actually used in getsockopt
772 		 * is the most desirable behavior.
773 		 */
774 		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
775 		break;
776 
777 	case SO_RCVBUFFORCE:
778 		if (!capable(CAP_NET_ADMIN)) {
779 			ret = -EPERM;
780 			break;
781 		}
782 		goto set_rcvbuf;
783 
784 	case SO_KEEPALIVE:
785 #ifdef CONFIG_INET
786 		if (sk->sk_protocol == IPPROTO_TCP &&
787 		    sk->sk_type == SOCK_STREAM)
788 			tcp_set_keepalive(sk, valbool);
789 #endif
790 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
791 		break;
792 
793 	case SO_OOBINLINE:
794 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
795 		break;
796 
797 	case SO_NO_CHECK:
798 		sk->sk_no_check_tx = valbool;
799 		break;
800 
801 	case SO_PRIORITY:
802 		if ((val >= 0 && val <= 6) ||
803 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
804 			sk->sk_priority = val;
805 		else
806 			ret = -EPERM;
807 		break;
808 
809 	case SO_LINGER:
810 		if (optlen < sizeof(ling)) {
811 			ret = -EINVAL;	/* 1003.1g */
812 			break;
813 		}
814 		if (copy_from_user(&ling, optval, sizeof(ling))) {
815 			ret = -EFAULT;
816 			break;
817 		}
818 		if (!ling.l_onoff)
819 			sock_reset_flag(sk, SOCK_LINGER);
820 		else {
821 #if (BITS_PER_LONG == 32)
822 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
823 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
824 			else
825 #endif
826 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
827 			sock_set_flag(sk, SOCK_LINGER);
828 		}
829 		break;
830 
831 	case SO_BSDCOMPAT:
832 		sock_warn_obsolete_bsdism("setsockopt");
833 		break;
834 
835 	case SO_PASSCRED:
836 		if (valbool)
837 			set_bit(SOCK_PASSCRED, &sock->flags);
838 		else
839 			clear_bit(SOCK_PASSCRED, &sock->flags);
840 		break;
841 
842 	case SO_TIMESTAMP:
843 	case SO_TIMESTAMPNS:
844 		if (valbool)  {
845 			if (optname == SO_TIMESTAMP)
846 				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
847 			else
848 				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
849 			sock_set_flag(sk, SOCK_RCVTSTAMP);
850 			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
851 		} else {
852 			sock_reset_flag(sk, SOCK_RCVTSTAMP);
853 			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
854 		}
855 		break;
856 
857 	case SO_TIMESTAMPING:
858 		if (val & ~SOF_TIMESTAMPING_MASK) {
859 			ret = -EINVAL;
860 			break;
861 		}
862 
863 		if (val & SOF_TIMESTAMPING_OPT_ID &&
864 		    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
865 			if (sk->sk_protocol == IPPROTO_TCP) {
866 				if (sk->sk_state != TCP_ESTABLISHED) {
867 					ret = -EINVAL;
868 					break;
869 				}
870 				sk->sk_tskey = tcp_sk(sk)->snd_una;
871 			} else {
872 				sk->sk_tskey = 0;
873 			}
874 		}
875 		sk->sk_tsflags = val;
876 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
877 			sock_enable_timestamp(sk,
878 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
879 		else
880 			sock_disable_timestamp(sk,
881 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
882 		break;
883 
884 	case SO_RCVLOWAT:
885 		if (val < 0)
886 			val = INT_MAX;
887 		sk->sk_rcvlowat = val ? : 1;
888 		break;
889 
890 	case SO_RCVTIMEO:
891 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
892 		break;
893 
894 	case SO_SNDTIMEO:
895 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
896 		break;
897 
898 	case SO_ATTACH_FILTER:
899 		ret = -EINVAL;
900 		if (optlen == sizeof(struct sock_fprog)) {
901 			struct sock_fprog fprog;
902 
903 			ret = -EFAULT;
904 			if (copy_from_user(&fprog, optval, sizeof(fprog)))
905 				break;
906 
907 			ret = sk_attach_filter(&fprog, sk);
908 		}
909 		break;
910 
911 	case SO_ATTACH_BPF:
912 		ret = -EINVAL;
913 		if (optlen == sizeof(u32)) {
914 			u32 ufd;
915 
916 			ret = -EFAULT;
917 			if (copy_from_user(&ufd, optval, sizeof(ufd)))
918 				break;
919 
920 			ret = sk_attach_bpf(ufd, sk);
921 		}
922 		break;
923 
924 	case SO_DETACH_FILTER:
925 		ret = sk_detach_filter(sk);
926 		break;
927 
928 	case SO_LOCK_FILTER:
929 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
930 			ret = -EPERM;
931 		else
932 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
933 		break;
934 
935 	case SO_PASSSEC:
936 		if (valbool)
937 			set_bit(SOCK_PASSSEC, &sock->flags);
938 		else
939 			clear_bit(SOCK_PASSSEC, &sock->flags);
940 		break;
941 	case SO_MARK:
942 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
943 			ret = -EPERM;
944 		else
945 			sk->sk_mark = val;
946 		break;
947 
948 	case SO_RXQ_OVFL:
949 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
950 		break;
951 
952 	case SO_WIFI_STATUS:
953 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
954 		break;
955 
956 	case SO_PEEK_OFF:
957 		if (sock->ops->set_peek_off)
958 			ret = sock->ops->set_peek_off(sk, val);
959 		else
960 			ret = -EOPNOTSUPP;
961 		break;
962 
963 	case SO_NOFCS:
964 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
965 		break;
966 
967 	case SO_SELECT_ERR_QUEUE:
968 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
969 		break;
970 
971 #ifdef CONFIG_NET_RX_BUSY_POLL
972 	case SO_BUSY_POLL:
973 		/* allow unprivileged users to decrease the value */
974 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
975 			ret = -EPERM;
976 		else {
977 			if (val < 0)
978 				ret = -EINVAL;
979 			else
980 				sk->sk_ll_usec = val;
981 		}
982 		break;
983 #endif
984 
985 	case SO_MAX_PACING_RATE:
986 		sk->sk_max_pacing_rate = val;
987 		sk->sk_pacing_rate = min(sk->sk_pacing_rate,
988 					 sk->sk_max_pacing_rate);
989 		break;
990 
991 	default:
992 		ret = -ENOPROTOOPT;
993 		break;
994 	}
995 	release_sock(sk);
996 	return ret;
997 }
998 EXPORT_SYMBOL(sock_setsockopt);
999 
1000 
1001 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1002 			  struct ucred *ucred)
1003 {
1004 	ucred->pid = pid_vnr(pid);
1005 	ucred->uid = ucred->gid = -1;
1006 	if (cred) {
1007 		struct user_namespace *current_ns = current_user_ns();
1008 
1009 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
1010 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1011 	}
1012 }
1013 
1014 int sock_getsockopt(struct socket *sock, int level, int optname,
1015 		    char __user *optval, int __user *optlen)
1016 {
1017 	struct sock *sk = sock->sk;
1018 
1019 	union {
1020 		int val;
1021 		struct linger ling;
1022 		struct timeval tm;
1023 	} v;
1024 
1025 	int lv = sizeof(int);
1026 	int len;
1027 
1028 	if (get_user(len, optlen))
1029 		return -EFAULT;
1030 	if (len < 0)
1031 		return -EINVAL;
1032 
1033 	memset(&v, 0, sizeof(v));
1034 
1035 	switch (optname) {
1036 	case SO_DEBUG:
1037 		v.val = sock_flag(sk, SOCK_DBG);
1038 		break;
1039 
1040 	case SO_DONTROUTE:
1041 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1042 		break;
1043 
1044 	case SO_BROADCAST:
1045 		v.val = sock_flag(sk, SOCK_BROADCAST);
1046 		break;
1047 
1048 	case SO_SNDBUF:
1049 		v.val = sk->sk_sndbuf;
1050 		break;
1051 
1052 	case SO_RCVBUF:
1053 		v.val = sk->sk_rcvbuf;
1054 		break;
1055 
1056 	case SO_REUSEADDR:
1057 		v.val = sk->sk_reuse;
1058 		break;
1059 
1060 	case SO_REUSEPORT:
1061 		v.val = sk->sk_reuseport;
1062 		break;
1063 
1064 	case SO_KEEPALIVE:
1065 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1066 		break;
1067 
1068 	case SO_TYPE:
1069 		v.val = sk->sk_type;
1070 		break;
1071 
1072 	case SO_PROTOCOL:
1073 		v.val = sk->sk_protocol;
1074 		break;
1075 
1076 	case SO_DOMAIN:
1077 		v.val = sk->sk_family;
1078 		break;
1079 
1080 	case SO_ERROR:
1081 		v.val = -sock_error(sk);
1082 		if (v.val == 0)
1083 			v.val = xchg(&sk->sk_err_soft, 0);
1084 		break;
1085 
1086 	case SO_OOBINLINE:
1087 		v.val = sock_flag(sk, SOCK_URGINLINE);
1088 		break;
1089 
1090 	case SO_NO_CHECK:
1091 		v.val = sk->sk_no_check_tx;
1092 		break;
1093 
1094 	case SO_PRIORITY:
1095 		v.val = sk->sk_priority;
1096 		break;
1097 
1098 	case SO_LINGER:
1099 		lv		= sizeof(v.ling);
1100 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1101 		v.ling.l_linger	= sk->sk_lingertime / HZ;
1102 		break;
1103 
1104 	case SO_BSDCOMPAT:
1105 		sock_warn_obsolete_bsdism("getsockopt");
1106 		break;
1107 
1108 	case SO_TIMESTAMP:
1109 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1110 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1111 		break;
1112 
1113 	case SO_TIMESTAMPNS:
1114 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1115 		break;
1116 
1117 	case SO_TIMESTAMPING:
1118 		v.val = sk->sk_tsflags;
1119 		break;
1120 
1121 	case SO_RCVTIMEO:
1122 		lv = sizeof(struct timeval);
1123 		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1124 			v.tm.tv_sec = 0;
1125 			v.tm.tv_usec = 0;
1126 		} else {
1127 			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1128 			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1129 		}
1130 		break;
1131 
1132 	case SO_SNDTIMEO:
1133 		lv = sizeof(struct timeval);
1134 		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1135 			v.tm.tv_sec = 0;
1136 			v.tm.tv_usec = 0;
1137 		} else {
1138 			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1139 			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1140 		}
1141 		break;
1142 
1143 	case SO_RCVLOWAT:
1144 		v.val = sk->sk_rcvlowat;
1145 		break;
1146 
1147 	case SO_SNDLOWAT:
1148 		v.val = 1;
1149 		break;
1150 
1151 	case SO_PASSCRED:
1152 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1153 		break;
1154 
1155 	case SO_PEERCRED:
1156 	{
1157 		struct ucred peercred;
1158 		if (len > sizeof(peercred))
1159 			len = sizeof(peercred);
1160 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1161 		if (copy_to_user(optval, &peercred, len))
1162 			return -EFAULT;
1163 		goto lenout;
1164 	}
1165 
1166 	case SO_PEERNAME:
1167 	{
1168 		char address[128];
1169 
1170 		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1171 			return -ENOTCONN;
1172 		if (lv < len)
1173 			return -EINVAL;
1174 		if (copy_to_user(optval, address, len))
1175 			return -EFAULT;
1176 		goto lenout;
1177 	}
1178 
1179 	/* Dubious BSD thing... Probably nobody even uses it, but
1180 	 * the UNIX standard wants it for whatever reason... -DaveM
1181 	 */
1182 	case SO_ACCEPTCONN:
1183 		v.val = sk->sk_state == TCP_LISTEN;
1184 		break;
1185 
1186 	case SO_PASSSEC:
1187 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1188 		break;
1189 
1190 	case SO_PEERSEC:
1191 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1192 
1193 	case SO_MARK:
1194 		v.val = sk->sk_mark;
1195 		break;
1196 
1197 	case SO_RXQ_OVFL:
1198 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1199 		break;
1200 
1201 	case SO_WIFI_STATUS:
1202 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1203 		break;
1204 
1205 	case SO_PEEK_OFF:
1206 		if (!sock->ops->set_peek_off)
1207 			return -EOPNOTSUPP;
1208 
1209 		v.val = sk->sk_peek_off;
1210 		break;
1211 	case SO_NOFCS:
1212 		v.val = sock_flag(sk, SOCK_NOFCS);
1213 		break;
1214 
1215 	case SO_BINDTODEVICE:
1216 		return sock_getbindtodevice(sk, optval, optlen, len);
1217 
1218 	case SO_GET_FILTER:
1219 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1220 		if (len < 0)
1221 			return len;
1222 
1223 		goto lenout;
1224 
1225 	case SO_LOCK_FILTER:
1226 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1227 		break;
1228 
1229 	case SO_BPF_EXTENSIONS:
1230 		v.val = bpf_tell_extensions();
1231 		break;
1232 
1233 	case SO_SELECT_ERR_QUEUE:
1234 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1235 		break;
1236 
1237 #ifdef CONFIG_NET_RX_BUSY_POLL
1238 	case SO_BUSY_POLL:
1239 		v.val = sk->sk_ll_usec;
1240 		break;
1241 #endif
1242 
1243 	case SO_MAX_PACING_RATE:
1244 		v.val = sk->sk_max_pacing_rate;
1245 		break;
1246 
1247 	case SO_INCOMING_CPU:
1248 		v.val = sk->sk_incoming_cpu;
1249 		break;
1250 
1251 	default:
1252 		/* We implement the SO_SNDLOWAT etc to not be settable
1253 		 * (1003.1g 7).
1254 		 */
1255 		return -ENOPROTOOPT;
1256 	}
1257 
1258 	if (len > lv)
1259 		len = lv;
1260 	if (copy_to_user(optval, &v, len))
1261 		return -EFAULT;
1262 lenout:
1263 	if (put_user(len, optlen))
1264 		return -EFAULT;
1265 	return 0;
1266 }
1267 
1268 /*
1269  * Initialize an sk_lock.
1270  *
1271  * (We also register the sk_lock with the lock validator.)
1272  */
1273 static inline void sock_lock_init(struct sock *sk)
1274 {
1275 	sock_lock_init_class_and_name(sk,
1276 			af_family_slock_key_strings[sk->sk_family],
1277 			af_family_slock_keys + sk->sk_family,
1278 			af_family_key_strings[sk->sk_family],
1279 			af_family_keys + sk->sk_family);
1280 }
1281 
1282 /*
1283  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1284  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1285  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1286  */
1287 static void sock_copy(struct sock *nsk, const struct sock *osk)
1288 {
1289 #ifdef CONFIG_SECURITY_NETWORK
1290 	void *sptr = nsk->sk_security;
1291 #endif
1292 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1293 
1294 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1295 	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1296 
1297 #ifdef CONFIG_SECURITY_NETWORK
1298 	nsk->sk_security = sptr;
1299 	security_sk_clone(osk, nsk);
1300 #endif
1301 }
1302 
1303 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1304 {
1305 	unsigned long nulls1, nulls2;
1306 
1307 	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1308 	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1309 	if (nulls1 > nulls2)
1310 		swap(nulls1, nulls2);
1311 
1312 	if (nulls1 != 0)
1313 		memset((char *)sk, 0, nulls1);
1314 	memset((char *)sk + nulls1 + sizeof(void *), 0,
1315 	       nulls2 - nulls1 - sizeof(void *));
1316 	memset((char *)sk + nulls2 + sizeof(void *), 0,
1317 	       size - nulls2 - sizeof(void *));
1318 }
1319 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1320 
1321 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1322 		int family)
1323 {
1324 	struct sock *sk;
1325 	struct kmem_cache *slab;
1326 
1327 	slab = prot->slab;
1328 	if (slab != NULL) {
1329 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1330 		if (!sk)
1331 			return sk;
1332 		if (priority & __GFP_ZERO) {
1333 			if (prot->clear_sk)
1334 				prot->clear_sk(sk, prot->obj_size);
1335 			else
1336 				sk_prot_clear_nulls(sk, prot->obj_size);
1337 		}
1338 	} else
1339 		sk = kmalloc(prot->obj_size, priority);
1340 
1341 	if (sk != NULL) {
1342 		kmemcheck_annotate_bitfield(sk, flags);
1343 
1344 		if (security_sk_alloc(sk, family, priority))
1345 			goto out_free;
1346 
1347 		if (!try_module_get(prot->owner))
1348 			goto out_free_sec;
1349 		sk_tx_queue_clear(sk);
1350 	}
1351 
1352 	return sk;
1353 
1354 out_free_sec:
1355 	security_sk_free(sk);
1356 out_free:
1357 	if (slab != NULL)
1358 		kmem_cache_free(slab, sk);
1359 	else
1360 		kfree(sk);
1361 	return NULL;
1362 }
1363 
1364 static void sk_prot_free(struct proto *prot, struct sock *sk)
1365 {
1366 	struct kmem_cache *slab;
1367 	struct module *owner;
1368 
1369 	owner = prot->owner;
1370 	slab = prot->slab;
1371 
1372 	security_sk_free(sk);
1373 	if (slab != NULL)
1374 		kmem_cache_free(slab, sk);
1375 	else
1376 		kfree(sk);
1377 	module_put(owner);
1378 }
1379 
1380 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1381 void sock_update_netprioidx(struct sock *sk)
1382 {
1383 	if (in_interrupt())
1384 		return;
1385 
1386 	sk->sk_cgrp_prioidx = task_netprioidx(current);
1387 }
1388 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1389 #endif
1390 
1391 /**
1392  *	sk_alloc - All socket objects are allocated here
1393  *	@net: the applicable net namespace
1394  *	@family: protocol family
1395  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1396  *	@prot: struct proto associated with this new sock instance
1397  *	@kern: is this to be a kernel socket?
1398  */
1399 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1400 		      struct proto *prot, int kern)
1401 {
1402 	struct sock *sk;
1403 
1404 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1405 	if (sk) {
1406 		sk->sk_family = family;
1407 		/*
1408 		 * See comment in struct sock definition to understand
1409 		 * why we need sk_prot_creator -acme
1410 		 */
1411 		sk->sk_prot = sk->sk_prot_creator = prot;
1412 		sock_lock_init(sk);
1413 		sk->sk_net_refcnt = kern ? 0 : 1;
1414 		if (likely(sk->sk_net_refcnt))
1415 			get_net(net);
1416 		sock_net_set(sk, net);
1417 		atomic_set(&sk->sk_wmem_alloc, 1);
1418 
1419 		sock_update_classid(sk);
1420 		sock_update_netprioidx(sk);
1421 	}
1422 
1423 	return sk;
1424 }
1425 EXPORT_SYMBOL(sk_alloc);
1426 
1427 void sk_destruct(struct sock *sk)
1428 {
1429 	struct sk_filter *filter;
1430 
1431 	if (sk->sk_destruct)
1432 		sk->sk_destruct(sk);
1433 
1434 	filter = rcu_dereference_check(sk->sk_filter,
1435 				       atomic_read(&sk->sk_wmem_alloc) == 0);
1436 	if (filter) {
1437 		sk_filter_uncharge(sk, filter);
1438 		RCU_INIT_POINTER(sk->sk_filter, NULL);
1439 	}
1440 
1441 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1442 
1443 	if (atomic_read(&sk->sk_omem_alloc))
1444 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1445 			 __func__, atomic_read(&sk->sk_omem_alloc));
1446 
1447 	if (sk->sk_peer_cred)
1448 		put_cred(sk->sk_peer_cred);
1449 	put_pid(sk->sk_peer_pid);
1450 	if (likely(sk->sk_net_refcnt))
1451 		put_net(sock_net(sk));
1452 	sk_prot_free(sk->sk_prot_creator, sk);
1453 }
1454 
1455 static void __sk_free(struct sock *sk)
1456 {
1457 	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
1458 		sock_diag_broadcast_destroy(sk);
1459 	else
1460 		sk_destruct(sk);
1461 }
1462 
1463 void sk_free(struct sock *sk)
1464 {
1465 	/*
1466 	 * We subtract one from sk_wmem_alloc and can know if
1467 	 * some packets are still in some tx queue.
1468 	 * If not null, sock_wfree() will call __sk_free(sk) later
1469 	 */
1470 	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1471 		__sk_free(sk);
1472 }
1473 EXPORT_SYMBOL(sk_free);
1474 
1475 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1476 {
1477 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1478 		sock_update_memcg(newsk);
1479 }
1480 
1481 /**
1482  *	sk_clone_lock - clone a socket, and lock its clone
1483  *	@sk: the socket to clone
1484  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1485  *
1486  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1487  */
1488 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1489 {
1490 	struct sock *newsk;
1491 	bool is_charged = true;
1492 
1493 	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1494 	if (newsk != NULL) {
1495 		struct sk_filter *filter;
1496 
1497 		sock_copy(newsk, sk);
1498 
1499 		/* SANITY */
1500 		get_net(sock_net(newsk));
1501 		sk_node_init(&newsk->sk_node);
1502 		sock_lock_init(newsk);
1503 		bh_lock_sock(newsk);
1504 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1505 		newsk->sk_backlog.len = 0;
1506 
1507 		atomic_set(&newsk->sk_rmem_alloc, 0);
1508 		/*
1509 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1510 		 */
1511 		atomic_set(&newsk->sk_wmem_alloc, 1);
1512 		atomic_set(&newsk->sk_omem_alloc, 0);
1513 		skb_queue_head_init(&newsk->sk_receive_queue);
1514 		skb_queue_head_init(&newsk->sk_write_queue);
1515 
1516 		spin_lock_init(&newsk->sk_dst_lock);
1517 		rwlock_init(&newsk->sk_callback_lock);
1518 		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1519 				af_callback_keys + newsk->sk_family,
1520 				af_family_clock_key_strings[newsk->sk_family]);
1521 
1522 		newsk->sk_dst_cache	= NULL;
1523 		newsk->sk_wmem_queued	= 0;
1524 		newsk->sk_forward_alloc = 0;
1525 		newsk->sk_send_head	= NULL;
1526 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1527 
1528 		sock_reset_flag(newsk, SOCK_DONE);
1529 		skb_queue_head_init(&newsk->sk_error_queue);
1530 
1531 		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1532 		if (filter != NULL)
1533 			/* though it's an empty new sock, the charging may fail
1534 			 * if sysctl_optmem_max was changed between creation of
1535 			 * original socket and cloning
1536 			 */
1537 			is_charged = sk_filter_charge(newsk, filter);
1538 
1539 		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
1540 			/* It is still raw copy of parent, so invalidate
1541 			 * destructor and make plain sk_free() */
1542 			newsk->sk_destruct = NULL;
1543 			bh_unlock_sock(newsk);
1544 			sk_free(newsk);
1545 			newsk = NULL;
1546 			goto out;
1547 		}
1548 
1549 		newsk->sk_err	   = 0;
1550 		newsk->sk_priority = 0;
1551 		newsk->sk_incoming_cpu = raw_smp_processor_id();
1552 		atomic64_set(&newsk->sk_cookie, 0);
1553 		/*
1554 		 * Before updating sk_refcnt, we must commit prior changes to memory
1555 		 * (Documentation/RCU/rculist_nulls.txt for details)
1556 		 */
1557 		smp_wmb();
1558 		atomic_set(&newsk->sk_refcnt, 2);
1559 
1560 		/*
1561 		 * Increment the counter in the same struct proto as the master
1562 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1563 		 * is the same as sk->sk_prot->socks, as this field was copied
1564 		 * with memcpy).
1565 		 *
1566 		 * This _changes_ the previous behaviour, where
1567 		 * tcp_create_openreq_child always was incrementing the
1568 		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1569 		 * to be taken into account in all callers. -acme
1570 		 */
1571 		sk_refcnt_debug_inc(newsk);
1572 		sk_set_socket(newsk, NULL);
1573 		newsk->sk_wq = NULL;
1574 
1575 		sk_update_clone(sk, newsk);
1576 
1577 		if (newsk->sk_prot->sockets_allocated)
1578 			sk_sockets_allocated_inc(newsk);
1579 
1580 		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1581 			net_enable_timestamp();
1582 	}
1583 out:
1584 	return newsk;
1585 }
1586 EXPORT_SYMBOL_GPL(sk_clone_lock);
1587 
1588 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1589 {
1590 	u32 max_segs = 1;
1591 
1592 	__sk_dst_set(sk, dst);
1593 	sk->sk_route_caps = dst->dev->features;
1594 	if (sk->sk_route_caps & NETIF_F_GSO)
1595 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1596 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1597 	if (sk_can_gso(sk)) {
1598 		if (dst->header_len) {
1599 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1600 		} else {
1601 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1602 			sk->sk_gso_max_size = dst->dev->gso_max_size;
1603 			max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1604 		}
1605 	}
1606 	sk->sk_gso_max_segs = max_segs;
1607 }
1608 EXPORT_SYMBOL_GPL(sk_setup_caps);
1609 
1610 /*
1611  *	Simple resource managers for sockets.
1612  */
1613 
1614 
1615 /*
1616  * Write buffer destructor automatically called from kfree_skb.
1617  */
1618 void sock_wfree(struct sk_buff *skb)
1619 {
1620 	struct sock *sk = skb->sk;
1621 	unsigned int len = skb->truesize;
1622 
1623 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1624 		/*
1625 		 * Keep a reference on sk_wmem_alloc, this will be released
1626 		 * after sk_write_space() call
1627 		 */
1628 		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1629 		sk->sk_write_space(sk);
1630 		len = 1;
1631 	}
1632 	/*
1633 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1634 	 * could not do because of in-flight packets
1635 	 */
1636 	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1637 		__sk_free(sk);
1638 }
1639 EXPORT_SYMBOL(sock_wfree);
1640 
1641 void skb_orphan_partial(struct sk_buff *skb)
1642 {
1643 	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1644 	 * so we do not completely orphan skb, but transfert all
1645 	 * accounted bytes but one, to avoid unexpected reorders.
1646 	 */
1647 	if (skb->destructor == sock_wfree
1648 #ifdef CONFIG_INET
1649 	    || skb->destructor == tcp_wfree
1650 #endif
1651 		) {
1652 		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1653 		skb->truesize = 1;
1654 	} else {
1655 		skb_orphan(skb);
1656 	}
1657 }
1658 EXPORT_SYMBOL(skb_orphan_partial);
1659 
1660 /*
1661  * Read buffer destructor automatically called from kfree_skb.
1662  */
1663 void sock_rfree(struct sk_buff *skb)
1664 {
1665 	struct sock *sk = skb->sk;
1666 	unsigned int len = skb->truesize;
1667 
1668 	atomic_sub(len, &sk->sk_rmem_alloc);
1669 	sk_mem_uncharge(sk, len);
1670 }
1671 EXPORT_SYMBOL(sock_rfree);
1672 
1673 /*
1674  * Buffer destructor for skbs that are not used directly in read or write
1675  * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1676  */
1677 void sock_efree(struct sk_buff *skb)
1678 {
1679 	sock_put(skb->sk);
1680 }
1681 EXPORT_SYMBOL(sock_efree);
1682 
1683 kuid_t sock_i_uid(struct sock *sk)
1684 {
1685 	kuid_t uid;
1686 
1687 	read_lock_bh(&sk->sk_callback_lock);
1688 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1689 	read_unlock_bh(&sk->sk_callback_lock);
1690 	return uid;
1691 }
1692 EXPORT_SYMBOL(sock_i_uid);
1693 
1694 unsigned long sock_i_ino(struct sock *sk)
1695 {
1696 	unsigned long ino;
1697 
1698 	read_lock_bh(&sk->sk_callback_lock);
1699 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1700 	read_unlock_bh(&sk->sk_callback_lock);
1701 	return ino;
1702 }
1703 EXPORT_SYMBOL(sock_i_ino);
1704 
1705 /*
1706  * Allocate a skb from the socket's send buffer.
1707  */
1708 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1709 			     gfp_t priority)
1710 {
1711 	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1712 		struct sk_buff *skb = alloc_skb(size, priority);
1713 		if (skb) {
1714 			skb_set_owner_w(skb, sk);
1715 			return skb;
1716 		}
1717 	}
1718 	return NULL;
1719 }
1720 EXPORT_SYMBOL(sock_wmalloc);
1721 
1722 /*
1723  * Allocate a memory block from the socket's option memory buffer.
1724  */
1725 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1726 {
1727 	if ((unsigned int)size <= sysctl_optmem_max &&
1728 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1729 		void *mem;
1730 		/* First do the add, to avoid the race if kmalloc
1731 		 * might sleep.
1732 		 */
1733 		atomic_add(size, &sk->sk_omem_alloc);
1734 		mem = kmalloc(size, priority);
1735 		if (mem)
1736 			return mem;
1737 		atomic_sub(size, &sk->sk_omem_alloc);
1738 	}
1739 	return NULL;
1740 }
1741 EXPORT_SYMBOL(sock_kmalloc);
1742 
1743 /* Free an option memory block. Note, we actually want the inline
1744  * here as this allows gcc to detect the nullify and fold away the
1745  * condition entirely.
1746  */
1747 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1748 				  const bool nullify)
1749 {
1750 	if (WARN_ON_ONCE(!mem))
1751 		return;
1752 	if (nullify)
1753 		kzfree(mem);
1754 	else
1755 		kfree(mem);
1756 	atomic_sub(size, &sk->sk_omem_alloc);
1757 }
1758 
1759 void sock_kfree_s(struct sock *sk, void *mem, int size)
1760 {
1761 	__sock_kfree_s(sk, mem, size, false);
1762 }
1763 EXPORT_SYMBOL(sock_kfree_s);
1764 
1765 void sock_kzfree_s(struct sock *sk, void *mem, int size)
1766 {
1767 	__sock_kfree_s(sk, mem, size, true);
1768 }
1769 EXPORT_SYMBOL(sock_kzfree_s);
1770 
1771 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1772    I think, these locks should be removed for datagram sockets.
1773  */
1774 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1775 {
1776 	DEFINE_WAIT(wait);
1777 
1778 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1779 	for (;;) {
1780 		if (!timeo)
1781 			break;
1782 		if (signal_pending(current))
1783 			break;
1784 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1785 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1786 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1787 			break;
1788 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1789 			break;
1790 		if (sk->sk_err)
1791 			break;
1792 		timeo = schedule_timeout(timeo);
1793 	}
1794 	finish_wait(sk_sleep(sk), &wait);
1795 	return timeo;
1796 }
1797 
1798 
1799 /*
1800  *	Generic send/receive buffer handlers
1801  */
1802 
1803 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1804 				     unsigned long data_len, int noblock,
1805 				     int *errcode, int max_page_order)
1806 {
1807 	struct sk_buff *skb;
1808 	long timeo;
1809 	int err;
1810 
1811 	timeo = sock_sndtimeo(sk, noblock);
1812 	for (;;) {
1813 		err = sock_error(sk);
1814 		if (err != 0)
1815 			goto failure;
1816 
1817 		err = -EPIPE;
1818 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1819 			goto failure;
1820 
1821 		if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1822 			break;
1823 
1824 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1825 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1826 		err = -EAGAIN;
1827 		if (!timeo)
1828 			goto failure;
1829 		if (signal_pending(current))
1830 			goto interrupted;
1831 		timeo = sock_wait_for_wmem(sk, timeo);
1832 	}
1833 	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1834 				   errcode, sk->sk_allocation);
1835 	if (skb)
1836 		skb_set_owner_w(skb, sk);
1837 	return skb;
1838 
1839 interrupted:
1840 	err = sock_intr_errno(timeo);
1841 failure:
1842 	*errcode = err;
1843 	return NULL;
1844 }
1845 EXPORT_SYMBOL(sock_alloc_send_pskb);
1846 
1847 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1848 				    int noblock, int *errcode)
1849 {
1850 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1851 }
1852 EXPORT_SYMBOL(sock_alloc_send_skb);
1853 
1854 /* On 32bit arches, an skb frag is limited to 2^15 */
1855 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
1856 
1857 /**
1858  * skb_page_frag_refill - check that a page_frag contains enough room
1859  * @sz: minimum size of the fragment we want to get
1860  * @pfrag: pointer to page_frag
1861  * @gfp: priority for memory allocation
1862  *
1863  * Note: While this allocator tries to use high order pages, there is
1864  * no guarantee that allocations succeed. Therefore, @sz MUST be
1865  * less or equal than PAGE_SIZE.
1866  */
1867 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
1868 {
1869 	if (pfrag->page) {
1870 		if (atomic_read(&pfrag->page->_count) == 1) {
1871 			pfrag->offset = 0;
1872 			return true;
1873 		}
1874 		if (pfrag->offset + sz <= pfrag->size)
1875 			return true;
1876 		put_page(pfrag->page);
1877 	}
1878 
1879 	pfrag->offset = 0;
1880 	if (SKB_FRAG_PAGE_ORDER) {
1881 		pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP |
1882 					  __GFP_NOWARN | __GFP_NORETRY,
1883 					  SKB_FRAG_PAGE_ORDER);
1884 		if (likely(pfrag->page)) {
1885 			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
1886 			return true;
1887 		}
1888 	}
1889 	pfrag->page = alloc_page(gfp);
1890 	if (likely(pfrag->page)) {
1891 		pfrag->size = PAGE_SIZE;
1892 		return true;
1893 	}
1894 	return false;
1895 }
1896 EXPORT_SYMBOL(skb_page_frag_refill);
1897 
1898 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1899 {
1900 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1901 		return true;
1902 
1903 	sk_enter_memory_pressure(sk);
1904 	sk_stream_moderate_sndbuf(sk);
1905 	return false;
1906 }
1907 EXPORT_SYMBOL(sk_page_frag_refill);
1908 
1909 static void __lock_sock(struct sock *sk)
1910 	__releases(&sk->sk_lock.slock)
1911 	__acquires(&sk->sk_lock.slock)
1912 {
1913 	DEFINE_WAIT(wait);
1914 
1915 	for (;;) {
1916 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1917 					TASK_UNINTERRUPTIBLE);
1918 		spin_unlock_bh(&sk->sk_lock.slock);
1919 		schedule();
1920 		spin_lock_bh(&sk->sk_lock.slock);
1921 		if (!sock_owned_by_user(sk))
1922 			break;
1923 	}
1924 	finish_wait(&sk->sk_lock.wq, &wait);
1925 }
1926 
1927 static void __release_sock(struct sock *sk)
1928 	__releases(&sk->sk_lock.slock)
1929 	__acquires(&sk->sk_lock.slock)
1930 {
1931 	struct sk_buff *skb = sk->sk_backlog.head;
1932 
1933 	do {
1934 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1935 		bh_unlock_sock(sk);
1936 
1937 		do {
1938 			struct sk_buff *next = skb->next;
1939 
1940 			prefetch(next);
1941 			WARN_ON_ONCE(skb_dst_is_noref(skb));
1942 			skb->next = NULL;
1943 			sk_backlog_rcv(sk, skb);
1944 
1945 			/*
1946 			 * We are in process context here with softirqs
1947 			 * disabled, use cond_resched_softirq() to preempt.
1948 			 * This is safe to do because we've taken the backlog
1949 			 * queue private:
1950 			 */
1951 			cond_resched_softirq();
1952 
1953 			skb = next;
1954 		} while (skb != NULL);
1955 
1956 		bh_lock_sock(sk);
1957 	} while ((skb = sk->sk_backlog.head) != NULL);
1958 
1959 	/*
1960 	 * Doing the zeroing here guarantee we can not loop forever
1961 	 * while a wild producer attempts to flood us.
1962 	 */
1963 	sk->sk_backlog.len = 0;
1964 }
1965 
1966 /**
1967  * sk_wait_data - wait for data to arrive at sk_receive_queue
1968  * @sk:    sock to wait on
1969  * @timeo: for how long
1970  *
1971  * Now socket state including sk->sk_err is changed only under lock,
1972  * hence we may omit checks after joining wait queue.
1973  * We check receive queue before schedule() only as optimization;
1974  * it is very likely that release_sock() added new data.
1975  */
1976 int sk_wait_data(struct sock *sk, long *timeo)
1977 {
1978 	int rc;
1979 	DEFINE_WAIT(wait);
1980 
1981 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1982 	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1983 	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1984 	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1985 	finish_wait(sk_sleep(sk), &wait);
1986 	return rc;
1987 }
1988 EXPORT_SYMBOL(sk_wait_data);
1989 
1990 /**
1991  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1992  *	@sk: socket
1993  *	@size: memory size to allocate
1994  *	@kind: allocation type
1995  *
1996  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1997  *	rmem allocation. This function assumes that protocols which have
1998  *	memory_pressure use sk_wmem_queued as write buffer accounting.
1999  */
2000 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2001 {
2002 	struct proto *prot = sk->sk_prot;
2003 	int amt = sk_mem_pages(size);
2004 	long allocated;
2005 	int parent_status = UNDER_LIMIT;
2006 
2007 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2008 
2009 	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
2010 
2011 	/* Under limit. */
2012 	if (parent_status == UNDER_LIMIT &&
2013 			allocated <= sk_prot_mem_limits(sk, 0)) {
2014 		sk_leave_memory_pressure(sk);
2015 		return 1;
2016 	}
2017 
2018 	/* Under pressure. (we or our parents) */
2019 	if ((parent_status > SOFT_LIMIT) ||
2020 			allocated > sk_prot_mem_limits(sk, 1))
2021 		sk_enter_memory_pressure(sk);
2022 
2023 	/* Over hard limit (we or our parents) */
2024 	if ((parent_status == OVER_LIMIT) ||
2025 			(allocated > sk_prot_mem_limits(sk, 2)))
2026 		goto suppress_allocation;
2027 
2028 	/* guarantee minimum buffer size under pressure */
2029 	if (kind == SK_MEM_RECV) {
2030 		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2031 			return 1;
2032 
2033 	} else { /* SK_MEM_SEND */
2034 		if (sk->sk_type == SOCK_STREAM) {
2035 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2036 				return 1;
2037 		} else if (atomic_read(&sk->sk_wmem_alloc) <
2038 			   prot->sysctl_wmem[0])
2039 				return 1;
2040 	}
2041 
2042 	if (sk_has_memory_pressure(sk)) {
2043 		int alloc;
2044 
2045 		if (!sk_under_memory_pressure(sk))
2046 			return 1;
2047 		alloc = sk_sockets_allocated_read_positive(sk);
2048 		if (sk_prot_mem_limits(sk, 2) > alloc *
2049 		    sk_mem_pages(sk->sk_wmem_queued +
2050 				 atomic_read(&sk->sk_rmem_alloc) +
2051 				 sk->sk_forward_alloc))
2052 			return 1;
2053 	}
2054 
2055 suppress_allocation:
2056 
2057 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2058 		sk_stream_moderate_sndbuf(sk);
2059 
2060 		/* Fail only if socket is _under_ its sndbuf.
2061 		 * In this case we cannot block, so that we have to fail.
2062 		 */
2063 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2064 			return 1;
2065 	}
2066 
2067 	trace_sock_exceed_buf_limit(sk, prot, allocated);
2068 
2069 	/* Alas. Undo changes. */
2070 	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2071 
2072 	sk_memory_allocated_sub(sk, amt);
2073 
2074 	return 0;
2075 }
2076 EXPORT_SYMBOL(__sk_mem_schedule);
2077 
2078 /**
2079  *	__sk_reclaim - reclaim memory_allocated
2080  *	@sk: socket
2081  *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2082  */
2083 void __sk_mem_reclaim(struct sock *sk, int amount)
2084 {
2085 	amount >>= SK_MEM_QUANTUM_SHIFT;
2086 	sk_memory_allocated_sub(sk, amount);
2087 	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2088 
2089 	if (sk_under_memory_pressure(sk) &&
2090 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2091 		sk_leave_memory_pressure(sk);
2092 }
2093 EXPORT_SYMBOL(__sk_mem_reclaim);
2094 
2095 
2096 /*
2097  * Set of default routines for initialising struct proto_ops when
2098  * the protocol does not support a particular function. In certain
2099  * cases where it makes no sense for a protocol to have a "do nothing"
2100  * function, some default processing is provided.
2101  */
2102 
2103 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2104 {
2105 	return -EOPNOTSUPP;
2106 }
2107 EXPORT_SYMBOL(sock_no_bind);
2108 
2109 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2110 		    int len, int flags)
2111 {
2112 	return -EOPNOTSUPP;
2113 }
2114 EXPORT_SYMBOL(sock_no_connect);
2115 
2116 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2117 {
2118 	return -EOPNOTSUPP;
2119 }
2120 EXPORT_SYMBOL(sock_no_socketpair);
2121 
2122 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2123 {
2124 	return -EOPNOTSUPP;
2125 }
2126 EXPORT_SYMBOL(sock_no_accept);
2127 
2128 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2129 		    int *len, int peer)
2130 {
2131 	return -EOPNOTSUPP;
2132 }
2133 EXPORT_SYMBOL(sock_no_getname);
2134 
2135 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2136 {
2137 	return 0;
2138 }
2139 EXPORT_SYMBOL(sock_no_poll);
2140 
2141 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2142 {
2143 	return -EOPNOTSUPP;
2144 }
2145 EXPORT_SYMBOL(sock_no_ioctl);
2146 
2147 int sock_no_listen(struct socket *sock, int backlog)
2148 {
2149 	return -EOPNOTSUPP;
2150 }
2151 EXPORT_SYMBOL(sock_no_listen);
2152 
2153 int sock_no_shutdown(struct socket *sock, int how)
2154 {
2155 	return -EOPNOTSUPP;
2156 }
2157 EXPORT_SYMBOL(sock_no_shutdown);
2158 
2159 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2160 		    char __user *optval, unsigned int optlen)
2161 {
2162 	return -EOPNOTSUPP;
2163 }
2164 EXPORT_SYMBOL(sock_no_setsockopt);
2165 
2166 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2167 		    char __user *optval, int __user *optlen)
2168 {
2169 	return -EOPNOTSUPP;
2170 }
2171 EXPORT_SYMBOL(sock_no_getsockopt);
2172 
2173 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2174 {
2175 	return -EOPNOTSUPP;
2176 }
2177 EXPORT_SYMBOL(sock_no_sendmsg);
2178 
2179 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2180 		    int flags)
2181 {
2182 	return -EOPNOTSUPP;
2183 }
2184 EXPORT_SYMBOL(sock_no_recvmsg);
2185 
2186 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2187 {
2188 	/* Mirror missing mmap method error code */
2189 	return -ENODEV;
2190 }
2191 EXPORT_SYMBOL(sock_no_mmap);
2192 
2193 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2194 {
2195 	ssize_t res;
2196 	struct msghdr msg = {.msg_flags = flags};
2197 	struct kvec iov;
2198 	char *kaddr = kmap(page);
2199 	iov.iov_base = kaddr + offset;
2200 	iov.iov_len = size;
2201 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2202 	kunmap(page);
2203 	return res;
2204 }
2205 EXPORT_SYMBOL(sock_no_sendpage);
2206 
2207 /*
2208  *	Default Socket Callbacks
2209  */
2210 
2211 static void sock_def_wakeup(struct sock *sk)
2212 {
2213 	struct socket_wq *wq;
2214 
2215 	rcu_read_lock();
2216 	wq = rcu_dereference(sk->sk_wq);
2217 	if (wq_has_sleeper(wq))
2218 		wake_up_interruptible_all(&wq->wait);
2219 	rcu_read_unlock();
2220 }
2221 
2222 static void sock_def_error_report(struct sock *sk)
2223 {
2224 	struct socket_wq *wq;
2225 
2226 	rcu_read_lock();
2227 	wq = rcu_dereference(sk->sk_wq);
2228 	if (wq_has_sleeper(wq))
2229 		wake_up_interruptible_poll(&wq->wait, POLLERR);
2230 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2231 	rcu_read_unlock();
2232 }
2233 
2234 static void sock_def_readable(struct sock *sk)
2235 {
2236 	struct socket_wq *wq;
2237 
2238 	rcu_read_lock();
2239 	wq = rcu_dereference(sk->sk_wq);
2240 	if (wq_has_sleeper(wq))
2241 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2242 						POLLRDNORM | POLLRDBAND);
2243 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2244 	rcu_read_unlock();
2245 }
2246 
2247 static void sock_def_write_space(struct sock *sk)
2248 {
2249 	struct socket_wq *wq;
2250 
2251 	rcu_read_lock();
2252 
2253 	/* Do not wake up a writer until he can make "significant"
2254 	 * progress.  --DaveM
2255 	 */
2256 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2257 		wq = rcu_dereference(sk->sk_wq);
2258 		if (wq_has_sleeper(wq))
2259 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2260 						POLLWRNORM | POLLWRBAND);
2261 
2262 		/* Should agree with poll, otherwise some programs break */
2263 		if (sock_writeable(sk))
2264 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2265 	}
2266 
2267 	rcu_read_unlock();
2268 }
2269 
2270 static void sock_def_destruct(struct sock *sk)
2271 {
2272 }
2273 
2274 void sk_send_sigurg(struct sock *sk)
2275 {
2276 	if (sk->sk_socket && sk->sk_socket->file)
2277 		if (send_sigurg(&sk->sk_socket->file->f_owner))
2278 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2279 }
2280 EXPORT_SYMBOL(sk_send_sigurg);
2281 
2282 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2283 		    unsigned long expires)
2284 {
2285 	if (!mod_timer(timer, expires))
2286 		sock_hold(sk);
2287 }
2288 EXPORT_SYMBOL(sk_reset_timer);
2289 
2290 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2291 {
2292 	if (del_timer(timer))
2293 		__sock_put(sk);
2294 }
2295 EXPORT_SYMBOL(sk_stop_timer);
2296 
2297 void sock_init_data(struct socket *sock, struct sock *sk)
2298 {
2299 	skb_queue_head_init(&sk->sk_receive_queue);
2300 	skb_queue_head_init(&sk->sk_write_queue);
2301 	skb_queue_head_init(&sk->sk_error_queue);
2302 
2303 	sk->sk_send_head	=	NULL;
2304 
2305 	init_timer(&sk->sk_timer);
2306 
2307 	sk->sk_allocation	=	GFP_KERNEL;
2308 	sk->sk_rcvbuf		=	sysctl_rmem_default;
2309 	sk->sk_sndbuf		=	sysctl_wmem_default;
2310 	sk->sk_state		=	TCP_CLOSE;
2311 	sk_set_socket(sk, sock);
2312 
2313 	sock_set_flag(sk, SOCK_ZAPPED);
2314 
2315 	if (sock) {
2316 		sk->sk_type	=	sock->type;
2317 		sk->sk_wq	=	sock->wq;
2318 		sock->sk	=	sk;
2319 	} else
2320 		sk->sk_wq	=	NULL;
2321 
2322 	spin_lock_init(&sk->sk_dst_lock);
2323 	rwlock_init(&sk->sk_callback_lock);
2324 	lockdep_set_class_and_name(&sk->sk_callback_lock,
2325 			af_callback_keys + sk->sk_family,
2326 			af_family_clock_key_strings[sk->sk_family]);
2327 
2328 	sk->sk_state_change	=	sock_def_wakeup;
2329 	sk->sk_data_ready	=	sock_def_readable;
2330 	sk->sk_write_space	=	sock_def_write_space;
2331 	sk->sk_error_report	=	sock_def_error_report;
2332 	sk->sk_destruct		=	sock_def_destruct;
2333 
2334 	sk->sk_frag.page	=	NULL;
2335 	sk->sk_frag.offset	=	0;
2336 	sk->sk_peek_off		=	-1;
2337 
2338 	sk->sk_peer_pid 	=	NULL;
2339 	sk->sk_peer_cred	=	NULL;
2340 	sk->sk_write_pending	=	0;
2341 	sk->sk_rcvlowat		=	1;
2342 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2343 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2344 
2345 	sk->sk_stamp = ktime_set(-1L, 0);
2346 
2347 #ifdef CONFIG_NET_RX_BUSY_POLL
2348 	sk->sk_napi_id		=	0;
2349 	sk->sk_ll_usec		=	sysctl_net_busy_read;
2350 #endif
2351 
2352 	sk->sk_max_pacing_rate = ~0U;
2353 	sk->sk_pacing_rate = ~0U;
2354 	/*
2355 	 * Before updating sk_refcnt, we must commit prior changes to memory
2356 	 * (Documentation/RCU/rculist_nulls.txt for details)
2357 	 */
2358 	smp_wmb();
2359 	atomic_set(&sk->sk_refcnt, 1);
2360 	atomic_set(&sk->sk_drops, 0);
2361 }
2362 EXPORT_SYMBOL(sock_init_data);
2363 
2364 void lock_sock_nested(struct sock *sk, int subclass)
2365 {
2366 	might_sleep();
2367 	spin_lock_bh(&sk->sk_lock.slock);
2368 	if (sk->sk_lock.owned)
2369 		__lock_sock(sk);
2370 	sk->sk_lock.owned = 1;
2371 	spin_unlock(&sk->sk_lock.slock);
2372 	/*
2373 	 * The sk_lock has mutex_lock() semantics here:
2374 	 */
2375 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2376 	local_bh_enable();
2377 }
2378 EXPORT_SYMBOL(lock_sock_nested);
2379 
2380 void release_sock(struct sock *sk)
2381 {
2382 	/*
2383 	 * The sk_lock has mutex_unlock() semantics:
2384 	 */
2385 	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2386 
2387 	spin_lock_bh(&sk->sk_lock.slock);
2388 	if (sk->sk_backlog.tail)
2389 		__release_sock(sk);
2390 
2391 	/* Warning : release_cb() might need to release sk ownership,
2392 	 * ie call sock_release_ownership(sk) before us.
2393 	 */
2394 	if (sk->sk_prot->release_cb)
2395 		sk->sk_prot->release_cb(sk);
2396 
2397 	sock_release_ownership(sk);
2398 	if (waitqueue_active(&sk->sk_lock.wq))
2399 		wake_up(&sk->sk_lock.wq);
2400 	spin_unlock_bh(&sk->sk_lock.slock);
2401 }
2402 EXPORT_SYMBOL(release_sock);
2403 
2404 /**
2405  * lock_sock_fast - fast version of lock_sock
2406  * @sk: socket
2407  *
2408  * This version should be used for very small section, where process wont block
2409  * return false if fast path is taken
2410  *   sk_lock.slock locked, owned = 0, BH disabled
2411  * return true if slow path is taken
2412  *   sk_lock.slock unlocked, owned = 1, BH enabled
2413  */
2414 bool lock_sock_fast(struct sock *sk)
2415 {
2416 	might_sleep();
2417 	spin_lock_bh(&sk->sk_lock.slock);
2418 
2419 	if (!sk->sk_lock.owned)
2420 		/*
2421 		 * Note : We must disable BH
2422 		 */
2423 		return false;
2424 
2425 	__lock_sock(sk);
2426 	sk->sk_lock.owned = 1;
2427 	spin_unlock(&sk->sk_lock.slock);
2428 	/*
2429 	 * The sk_lock has mutex_lock() semantics here:
2430 	 */
2431 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2432 	local_bh_enable();
2433 	return true;
2434 }
2435 EXPORT_SYMBOL(lock_sock_fast);
2436 
2437 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2438 {
2439 	struct timeval tv;
2440 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2441 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2442 	tv = ktime_to_timeval(sk->sk_stamp);
2443 	if (tv.tv_sec == -1)
2444 		return -ENOENT;
2445 	if (tv.tv_sec == 0) {
2446 		sk->sk_stamp = ktime_get_real();
2447 		tv = ktime_to_timeval(sk->sk_stamp);
2448 	}
2449 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2450 }
2451 EXPORT_SYMBOL(sock_get_timestamp);
2452 
2453 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2454 {
2455 	struct timespec ts;
2456 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2457 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2458 	ts = ktime_to_timespec(sk->sk_stamp);
2459 	if (ts.tv_sec == -1)
2460 		return -ENOENT;
2461 	if (ts.tv_sec == 0) {
2462 		sk->sk_stamp = ktime_get_real();
2463 		ts = ktime_to_timespec(sk->sk_stamp);
2464 	}
2465 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2466 }
2467 EXPORT_SYMBOL(sock_get_timestampns);
2468 
2469 void sock_enable_timestamp(struct sock *sk, int flag)
2470 {
2471 	if (!sock_flag(sk, flag)) {
2472 		unsigned long previous_flags = sk->sk_flags;
2473 
2474 		sock_set_flag(sk, flag);
2475 		/*
2476 		 * we just set one of the two flags which require net
2477 		 * time stamping, but time stamping might have been on
2478 		 * already because of the other one
2479 		 */
2480 		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2481 			net_enable_timestamp();
2482 	}
2483 }
2484 
2485 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2486 		       int level, int type)
2487 {
2488 	struct sock_exterr_skb *serr;
2489 	struct sk_buff *skb;
2490 	int copied, err;
2491 
2492 	err = -EAGAIN;
2493 	skb = sock_dequeue_err_skb(sk);
2494 	if (skb == NULL)
2495 		goto out;
2496 
2497 	copied = skb->len;
2498 	if (copied > len) {
2499 		msg->msg_flags |= MSG_TRUNC;
2500 		copied = len;
2501 	}
2502 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
2503 	if (err)
2504 		goto out_free_skb;
2505 
2506 	sock_recv_timestamp(msg, sk, skb);
2507 
2508 	serr = SKB_EXT_ERR(skb);
2509 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2510 
2511 	msg->msg_flags |= MSG_ERRQUEUE;
2512 	err = copied;
2513 
2514 out_free_skb:
2515 	kfree_skb(skb);
2516 out:
2517 	return err;
2518 }
2519 EXPORT_SYMBOL(sock_recv_errqueue);
2520 
2521 /*
2522  *	Get a socket option on an socket.
2523  *
2524  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2525  *	asynchronous errors should be reported by getsockopt. We assume
2526  *	this means if you specify SO_ERROR (otherwise whats the point of it).
2527  */
2528 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2529 			   char __user *optval, int __user *optlen)
2530 {
2531 	struct sock *sk = sock->sk;
2532 
2533 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2534 }
2535 EXPORT_SYMBOL(sock_common_getsockopt);
2536 
2537 #ifdef CONFIG_COMPAT
2538 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2539 				  char __user *optval, int __user *optlen)
2540 {
2541 	struct sock *sk = sock->sk;
2542 
2543 	if (sk->sk_prot->compat_getsockopt != NULL)
2544 		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2545 						      optval, optlen);
2546 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2547 }
2548 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2549 #endif
2550 
2551 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2552 			int flags)
2553 {
2554 	struct sock *sk = sock->sk;
2555 	int addr_len = 0;
2556 	int err;
2557 
2558 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
2559 				   flags & ~MSG_DONTWAIT, &addr_len);
2560 	if (err >= 0)
2561 		msg->msg_namelen = addr_len;
2562 	return err;
2563 }
2564 EXPORT_SYMBOL(sock_common_recvmsg);
2565 
2566 /*
2567  *	Set socket options on an inet socket.
2568  */
2569 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2570 			   char __user *optval, unsigned int optlen)
2571 {
2572 	struct sock *sk = sock->sk;
2573 
2574 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2575 }
2576 EXPORT_SYMBOL(sock_common_setsockopt);
2577 
2578 #ifdef CONFIG_COMPAT
2579 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2580 				  char __user *optval, unsigned int optlen)
2581 {
2582 	struct sock *sk = sock->sk;
2583 
2584 	if (sk->sk_prot->compat_setsockopt != NULL)
2585 		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2586 						      optval, optlen);
2587 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2588 }
2589 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2590 #endif
2591 
2592 void sk_common_release(struct sock *sk)
2593 {
2594 	if (sk->sk_prot->destroy)
2595 		sk->sk_prot->destroy(sk);
2596 
2597 	/*
2598 	 * Observation: when sock_common_release is called, processes have
2599 	 * no access to socket. But net still has.
2600 	 * Step one, detach it from networking:
2601 	 *
2602 	 * A. Remove from hash tables.
2603 	 */
2604 
2605 	sk->sk_prot->unhash(sk);
2606 
2607 	/*
2608 	 * In this point socket cannot receive new packets, but it is possible
2609 	 * that some packets are in flight because some CPU runs receiver and
2610 	 * did hash table lookup before we unhashed socket. They will achieve
2611 	 * receive queue and will be purged by socket destructor.
2612 	 *
2613 	 * Also we still have packets pending on receive queue and probably,
2614 	 * our own packets waiting in device queues. sock_destroy will drain
2615 	 * receive queue, but transmitted packets will delay socket destruction
2616 	 * until the last reference will be released.
2617 	 */
2618 
2619 	sock_orphan(sk);
2620 
2621 	xfrm_sk_free_policy(sk);
2622 
2623 	sk_refcnt_debug_release(sk);
2624 
2625 	if (sk->sk_frag.page) {
2626 		put_page(sk->sk_frag.page);
2627 		sk->sk_frag.page = NULL;
2628 	}
2629 
2630 	sock_put(sk);
2631 }
2632 EXPORT_SYMBOL(sk_common_release);
2633 
2634 #ifdef CONFIG_PROC_FS
2635 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
2636 struct prot_inuse {
2637 	int val[PROTO_INUSE_NR];
2638 };
2639 
2640 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2641 
2642 #ifdef CONFIG_NET_NS
2643 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2644 {
2645 	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2646 }
2647 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2648 
2649 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2650 {
2651 	int cpu, idx = prot->inuse_idx;
2652 	int res = 0;
2653 
2654 	for_each_possible_cpu(cpu)
2655 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2656 
2657 	return res >= 0 ? res : 0;
2658 }
2659 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2660 
2661 static int __net_init sock_inuse_init_net(struct net *net)
2662 {
2663 	net->core.inuse = alloc_percpu(struct prot_inuse);
2664 	return net->core.inuse ? 0 : -ENOMEM;
2665 }
2666 
2667 static void __net_exit sock_inuse_exit_net(struct net *net)
2668 {
2669 	free_percpu(net->core.inuse);
2670 }
2671 
2672 static struct pernet_operations net_inuse_ops = {
2673 	.init = sock_inuse_init_net,
2674 	.exit = sock_inuse_exit_net,
2675 };
2676 
2677 static __init int net_inuse_init(void)
2678 {
2679 	if (register_pernet_subsys(&net_inuse_ops))
2680 		panic("Cannot initialize net inuse counters");
2681 
2682 	return 0;
2683 }
2684 
2685 core_initcall(net_inuse_init);
2686 #else
2687 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2688 
2689 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2690 {
2691 	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2692 }
2693 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2694 
2695 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2696 {
2697 	int cpu, idx = prot->inuse_idx;
2698 	int res = 0;
2699 
2700 	for_each_possible_cpu(cpu)
2701 		res += per_cpu(prot_inuse, cpu).val[idx];
2702 
2703 	return res >= 0 ? res : 0;
2704 }
2705 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2706 #endif
2707 
2708 static void assign_proto_idx(struct proto *prot)
2709 {
2710 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2711 
2712 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2713 		pr_err("PROTO_INUSE_NR exhausted\n");
2714 		return;
2715 	}
2716 
2717 	set_bit(prot->inuse_idx, proto_inuse_idx);
2718 }
2719 
2720 static void release_proto_idx(struct proto *prot)
2721 {
2722 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2723 		clear_bit(prot->inuse_idx, proto_inuse_idx);
2724 }
2725 #else
2726 static inline void assign_proto_idx(struct proto *prot)
2727 {
2728 }
2729 
2730 static inline void release_proto_idx(struct proto *prot)
2731 {
2732 }
2733 #endif
2734 
2735 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2736 {
2737 	if (!rsk_prot)
2738 		return;
2739 	kfree(rsk_prot->slab_name);
2740 	rsk_prot->slab_name = NULL;
2741 	if (rsk_prot->slab) {
2742 		kmem_cache_destroy(rsk_prot->slab);
2743 		rsk_prot->slab = NULL;
2744 	}
2745 }
2746 
2747 static int req_prot_init(const struct proto *prot)
2748 {
2749 	struct request_sock_ops *rsk_prot = prot->rsk_prot;
2750 
2751 	if (!rsk_prot)
2752 		return 0;
2753 
2754 	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2755 					prot->name);
2756 	if (!rsk_prot->slab_name)
2757 		return -ENOMEM;
2758 
2759 	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2760 					   rsk_prot->obj_size, 0,
2761 					   0, NULL);
2762 
2763 	if (!rsk_prot->slab) {
2764 		pr_crit("%s: Can't create request sock SLAB cache!\n",
2765 			prot->name);
2766 		return -ENOMEM;
2767 	}
2768 	return 0;
2769 }
2770 
2771 int proto_register(struct proto *prot, int alloc_slab)
2772 {
2773 	if (alloc_slab) {
2774 		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2775 					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2776 					NULL);
2777 
2778 		if (prot->slab == NULL) {
2779 			pr_crit("%s: Can't create sock SLAB cache!\n",
2780 				prot->name);
2781 			goto out;
2782 		}
2783 
2784 		if (req_prot_init(prot))
2785 			goto out_free_request_sock_slab;
2786 
2787 		if (prot->twsk_prot != NULL) {
2788 			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2789 
2790 			if (prot->twsk_prot->twsk_slab_name == NULL)
2791 				goto out_free_request_sock_slab;
2792 
2793 			prot->twsk_prot->twsk_slab =
2794 				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2795 						  prot->twsk_prot->twsk_obj_size,
2796 						  0,
2797 						  prot->slab_flags,
2798 						  NULL);
2799 			if (prot->twsk_prot->twsk_slab == NULL)
2800 				goto out_free_timewait_sock_slab_name;
2801 		}
2802 	}
2803 
2804 	mutex_lock(&proto_list_mutex);
2805 	list_add(&prot->node, &proto_list);
2806 	assign_proto_idx(prot);
2807 	mutex_unlock(&proto_list_mutex);
2808 	return 0;
2809 
2810 out_free_timewait_sock_slab_name:
2811 	kfree(prot->twsk_prot->twsk_slab_name);
2812 out_free_request_sock_slab:
2813 	req_prot_cleanup(prot->rsk_prot);
2814 
2815 	kmem_cache_destroy(prot->slab);
2816 	prot->slab = NULL;
2817 out:
2818 	return -ENOBUFS;
2819 }
2820 EXPORT_SYMBOL(proto_register);
2821 
2822 void proto_unregister(struct proto *prot)
2823 {
2824 	mutex_lock(&proto_list_mutex);
2825 	release_proto_idx(prot);
2826 	list_del(&prot->node);
2827 	mutex_unlock(&proto_list_mutex);
2828 
2829 	if (prot->slab != NULL) {
2830 		kmem_cache_destroy(prot->slab);
2831 		prot->slab = NULL;
2832 	}
2833 
2834 	req_prot_cleanup(prot->rsk_prot);
2835 
2836 	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2837 		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2838 		kfree(prot->twsk_prot->twsk_slab_name);
2839 		prot->twsk_prot->twsk_slab = NULL;
2840 	}
2841 }
2842 EXPORT_SYMBOL(proto_unregister);
2843 
2844 #ifdef CONFIG_PROC_FS
2845 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2846 	__acquires(proto_list_mutex)
2847 {
2848 	mutex_lock(&proto_list_mutex);
2849 	return seq_list_start_head(&proto_list, *pos);
2850 }
2851 
2852 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2853 {
2854 	return seq_list_next(v, &proto_list, pos);
2855 }
2856 
2857 static void proto_seq_stop(struct seq_file *seq, void *v)
2858 	__releases(proto_list_mutex)
2859 {
2860 	mutex_unlock(&proto_list_mutex);
2861 }
2862 
2863 static char proto_method_implemented(const void *method)
2864 {
2865 	return method == NULL ? 'n' : 'y';
2866 }
2867 static long sock_prot_memory_allocated(struct proto *proto)
2868 {
2869 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2870 }
2871 
2872 static char *sock_prot_memory_pressure(struct proto *proto)
2873 {
2874 	return proto->memory_pressure != NULL ?
2875 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2876 }
2877 
2878 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2879 {
2880 
2881 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2882 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2883 		   proto->name,
2884 		   proto->obj_size,
2885 		   sock_prot_inuse_get(seq_file_net(seq), proto),
2886 		   sock_prot_memory_allocated(proto),
2887 		   sock_prot_memory_pressure(proto),
2888 		   proto->max_header,
2889 		   proto->slab == NULL ? "no" : "yes",
2890 		   module_name(proto->owner),
2891 		   proto_method_implemented(proto->close),
2892 		   proto_method_implemented(proto->connect),
2893 		   proto_method_implemented(proto->disconnect),
2894 		   proto_method_implemented(proto->accept),
2895 		   proto_method_implemented(proto->ioctl),
2896 		   proto_method_implemented(proto->init),
2897 		   proto_method_implemented(proto->destroy),
2898 		   proto_method_implemented(proto->shutdown),
2899 		   proto_method_implemented(proto->setsockopt),
2900 		   proto_method_implemented(proto->getsockopt),
2901 		   proto_method_implemented(proto->sendmsg),
2902 		   proto_method_implemented(proto->recvmsg),
2903 		   proto_method_implemented(proto->sendpage),
2904 		   proto_method_implemented(proto->bind),
2905 		   proto_method_implemented(proto->backlog_rcv),
2906 		   proto_method_implemented(proto->hash),
2907 		   proto_method_implemented(proto->unhash),
2908 		   proto_method_implemented(proto->get_port),
2909 		   proto_method_implemented(proto->enter_memory_pressure));
2910 }
2911 
2912 static int proto_seq_show(struct seq_file *seq, void *v)
2913 {
2914 	if (v == &proto_list)
2915 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2916 			   "protocol",
2917 			   "size",
2918 			   "sockets",
2919 			   "memory",
2920 			   "press",
2921 			   "maxhdr",
2922 			   "slab",
2923 			   "module",
2924 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2925 	else
2926 		proto_seq_printf(seq, list_entry(v, struct proto, node));
2927 	return 0;
2928 }
2929 
2930 static const struct seq_operations proto_seq_ops = {
2931 	.start  = proto_seq_start,
2932 	.next   = proto_seq_next,
2933 	.stop   = proto_seq_stop,
2934 	.show   = proto_seq_show,
2935 };
2936 
2937 static int proto_seq_open(struct inode *inode, struct file *file)
2938 {
2939 	return seq_open_net(inode, file, &proto_seq_ops,
2940 			    sizeof(struct seq_net_private));
2941 }
2942 
2943 static const struct file_operations proto_seq_fops = {
2944 	.owner		= THIS_MODULE,
2945 	.open		= proto_seq_open,
2946 	.read		= seq_read,
2947 	.llseek		= seq_lseek,
2948 	.release	= seq_release_net,
2949 };
2950 
2951 static __net_init int proto_init_net(struct net *net)
2952 {
2953 	if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2954 		return -ENOMEM;
2955 
2956 	return 0;
2957 }
2958 
2959 static __net_exit void proto_exit_net(struct net *net)
2960 {
2961 	remove_proc_entry("protocols", net->proc_net);
2962 }
2963 
2964 
2965 static __net_initdata struct pernet_operations proto_net_ops = {
2966 	.init = proto_init_net,
2967 	.exit = proto_exit_net,
2968 };
2969 
2970 static int __init proto_init(void)
2971 {
2972 	return register_pernet_subsys(&proto_net_ops);
2973 }
2974 
2975 subsys_initcall(proto_init);
2976 
2977 #endif /* PROC_FS */
2978