xref: /openbmc/linux/net/core/sock.c (revision 84744377)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic socket support routines. Memory allocators, socket lock/release
7  *		handler for protocols to use and generic option handler.
8  *
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  *
85  *
86  *		This program is free software; you can redistribute it and/or
87  *		modify it under the terms of the GNU General Public License
88  *		as published by the Free Software Foundation; either version
89  *		2 of the License, or (at your option) any later version.
90  */
91 
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93 
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
120 
121 #include <asm/uaccess.h>
122 
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134 
135 #include <linux/filter.h>
136 
137 #include <trace/events/sock.h>
138 
139 #ifdef CONFIG_INET
140 #include <net/tcp.h>
141 #endif
142 
143 #include <net/busy_poll.h>
144 
145 static DEFINE_MUTEX(proto_list_mutex);
146 static LIST_HEAD(proto_list);
147 
148 #ifdef CONFIG_MEMCG_KMEM
149 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
150 {
151 	struct proto *proto;
152 	int ret = 0;
153 
154 	mutex_lock(&proto_list_mutex);
155 	list_for_each_entry(proto, &proto_list, node) {
156 		if (proto->init_cgroup) {
157 			ret = proto->init_cgroup(memcg, ss);
158 			if (ret)
159 				goto out;
160 		}
161 	}
162 
163 	mutex_unlock(&proto_list_mutex);
164 	return ret;
165 out:
166 	list_for_each_entry_continue_reverse(proto, &proto_list, node)
167 		if (proto->destroy_cgroup)
168 			proto->destroy_cgroup(memcg);
169 	mutex_unlock(&proto_list_mutex);
170 	return ret;
171 }
172 
173 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
174 {
175 	struct proto *proto;
176 
177 	mutex_lock(&proto_list_mutex);
178 	list_for_each_entry_reverse(proto, &proto_list, node)
179 		if (proto->destroy_cgroup)
180 			proto->destroy_cgroup(memcg);
181 	mutex_unlock(&proto_list_mutex);
182 }
183 #endif
184 
185 /*
186  * Each address family might have different locking rules, so we have
187  * one slock key per address family:
188  */
189 static struct lock_class_key af_family_keys[AF_MAX];
190 static struct lock_class_key af_family_slock_keys[AF_MAX];
191 
192 #if defined(CONFIG_MEMCG_KMEM)
193 struct static_key memcg_socket_limit_enabled;
194 EXPORT_SYMBOL(memcg_socket_limit_enabled);
195 #endif
196 
197 /*
198  * Make lock validator output more readable. (we pre-construct these
199  * strings build-time, so that runtime initialization of socket
200  * locks is fast):
201  */
202 static const char *const af_family_key_strings[AF_MAX+1] = {
203   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
204   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
205   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
206   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
207   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
208   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
209   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
210   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
211   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
212   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
213   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
214   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
215   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
216   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
217 };
218 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
219   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
220   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
221   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
222   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
223   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
224   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
225   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
226   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
227   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
228   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
229   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
230   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
231   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
232   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
233 };
234 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
235   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
236   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
237   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
238   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
239   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
240   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
241   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
242   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
243   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
244   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
245   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
246   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
247   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
248   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
249 };
250 
251 /*
252  * sk_callback_lock locking rules are per-address-family,
253  * so split the lock classes by using a per-AF key:
254  */
255 static struct lock_class_key af_callback_keys[AF_MAX];
256 
257 /* Take into consideration the size of the struct sk_buff overhead in the
258  * determination of these values, since that is non-constant across
259  * platforms.  This makes socket queueing behavior and performance
260  * not depend upon such differences.
261  */
262 #define _SK_MEM_PACKETS		256
263 #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
264 #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
265 #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
266 
267 /* Run time adjustable parameters. */
268 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
269 EXPORT_SYMBOL(sysctl_wmem_max);
270 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
271 EXPORT_SYMBOL(sysctl_rmem_max);
272 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
273 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
274 
275 /* Maximal space eaten by iovec or ancillary data plus some space */
276 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
277 EXPORT_SYMBOL(sysctl_optmem_max);
278 
279 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
280 EXPORT_SYMBOL_GPL(memalloc_socks);
281 
282 /**
283  * sk_set_memalloc - sets %SOCK_MEMALLOC
284  * @sk: socket to set it on
285  *
286  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
287  * It's the responsibility of the admin to adjust min_free_kbytes
288  * to meet the requirements
289  */
290 void sk_set_memalloc(struct sock *sk)
291 {
292 	sock_set_flag(sk, SOCK_MEMALLOC);
293 	sk->sk_allocation |= __GFP_MEMALLOC;
294 	static_key_slow_inc(&memalloc_socks);
295 }
296 EXPORT_SYMBOL_GPL(sk_set_memalloc);
297 
298 void sk_clear_memalloc(struct sock *sk)
299 {
300 	sock_reset_flag(sk, SOCK_MEMALLOC);
301 	sk->sk_allocation &= ~__GFP_MEMALLOC;
302 	static_key_slow_dec(&memalloc_socks);
303 
304 	/*
305 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
306 	 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
307 	 * it has rmem allocations there is a risk that the user of the
308 	 * socket cannot make forward progress due to exceeding the rmem
309 	 * limits. By rights, sk_clear_memalloc() should only be called
310 	 * on sockets being torn down but warn and reset the accounting if
311 	 * that assumption breaks.
312 	 */
313 	if (WARN_ON(sk->sk_forward_alloc))
314 		sk_mem_reclaim(sk);
315 }
316 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
317 
318 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
319 {
320 	int ret;
321 	unsigned long pflags = current->flags;
322 
323 	/* these should have been dropped before queueing */
324 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
325 
326 	current->flags |= PF_MEMALLOC;
327 	ret = sk->sk_backlog_rcv(sk, skb);
328 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
329 
330 	return ret;
331 }
332 EXPORT_SYMBOL(__sk_backlog_rcv);
333 
334 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
335 {
336 	struct timeval tv;
337 
338 	if (optlen < sizeof(tv))
339 		return -EINVAL;
340 	if (copy_from_user(&tv, optval, sizeof(tv)))
341 		return -EFAULT;
342 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
343 		return -EDOM;
344 
345 	if (tv.tv_sec < 0) {
346 		static int warned __read_mostly;
347 
348 		*timeo_p = 0;
349 		if (warned < 10 && net_ratelimit()) {
350 			warned++;
351 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
352 				__func__, current->comm, task_pid_nr(current));
353 		}
354 		return 0;
355 	}
356 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
357 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
358 		return 0;
359 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
360 		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
361 	return 0;
362 }
363 
364 static void sock_warn_obsolete_bsdism(const char *name)
365 {
366 	static int warned;
367 	static char warncomm[TASK_COMM_LEN];
368 	if (strcmp(warncomm, current->comm) && warned < 5) {
369 		strcpy(warncomm,  current->comm);
370 		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
371 			warncomm, name);
372 		warned++;
373 	}
374 }
375 
376 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
377 
378 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
379 {
380 	if (sk->sk_flags & flags) {
381 		sk->sk_flags &= ~flags;
382 		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
383 			net_disable_timestamp();
384 	}
385 }
386 
387 
388 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
389 {
390 	int err;
391 	int skb_len;
392 	unsigned long flags;
393 	struct sk_buff_head *list = &sk->sk_receive_queue;
394 
395 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
396 		atomic_inc(&sk->sk_drops);
397 		trace_sock_rcvqueue_full(sk, skb);
398 		return -ENOMEM;
399 	}
400 
401 	err = sk_filter(sk, skb);
402 	if (err)
403 		return err;
404 
405 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
406 		atomic_inc(&sk->sk_drops);
407 		return -ENOBUFS;
408 	}
409 
410 	skb->dev = NULL;
411 	skb_set_owner_r(skb, sk);
412 
413 	/* Cache the SKB length before we tack it onto the receive
414 	 * queue.  Once it is added it no longer belongs to us and
415 	 * may be freed by other threads of control pulling packets
416 	 * from the queue.
417 	 */
418 	skb_len = skb->len;
419 
420 	/* we escape from rcu protected region, make sure we dont leak
421 	 * a norefcounted dst
422 	 */
423 	skb_dst_force(skb);
424 
425 	spin_lock_irqsave(&list->lock, flags);
426 	skb->dropcount = atomic_read(&sk->sk_drops);
427 	__skb_queue_tail(list, skb);
428 	spin_unlock_irqrestore(&list->lock, flags);
429 
430 	if (!sock_flag(sk, SOCK_DEAD))
431 		sk->sk_data_ready(sk, skb_len);
432 	return 0;
433 }
434 EXPORT_SYMBOL(sock_queue_rcv_skb);
435 
436 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
437 {
438 	int rc = NET_RX_SUCCESS;
439 
440 	if (sk_filter(sk, skb))
441 		goto discard_and_relse;
442 
443 	skb->dev = NULL;
444 
445 	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
446 		atomic_inc(&sk->sk_drops);
447 		goto discard_and_relse;
448 	}
449 	if (nested)
450 		bh_lock_sock_nested(sk);
451 	else
452 		bh_lock_sock(sk);
453 	if (!sock_owned_by_user(sk)) {
454 		/*
455 		 * trylock + unlock semantics:
456 		 */
457 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
458 
459 		rc = sk_backlog_rcv(sk, skb);
460 
461 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
462 	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
463 		bh_unlock_sock(sk);
464 		atomic_inc(&sk->sk_drops);
465 		goto discard_and_relse;
466 	}
467 
468 	bh_unlock_sock(sk);
469 out:
470 	sock_put(sk);
471 	return rc;
472 discard_and_relse:
473 	kfree_skb(skb);
474 	goto out;
475 }
476 EXPORT_SYMBOL(sk_receive_skb);
477 
478 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
479 {
480 	struct dst_entry *dst = __sk_dst_get(sk);
481 
482 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
483 		sk_tx_queue_clear(sk);
484 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
485 		dst_release(dst);
486 		return NULL;
487 	}
488 
489 	return dst;
490 }
491 EXPORT_SYMBOL(__sk_dst_check);
492 
493 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
494 {
495 	struct dst_entry *dst = sk_dst_get(sk);
496 
497 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
498 		sk_dst_reset(sk);
499 		dst_release(dst);
500 		return NULL;
501 	}
502 
503 	return dst;
504 }
505 EXPORT_SYMBOL(sk_dst_check);
506 
507 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
508 				int optlen)
509 {
510 	int ret = -ENOPROTOOPT;
511 #ifdef CONFIG_NETDEVICES
512 	struct net *net = sock_net(sk);
513 	char devname[IFNAMSIZ];
514 	int index;
515 
516 	/* Sorry... */
517 	ret = -EPERM;
518 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
519 		goto out;
520 
521 	ret = -EINVAL;
522 	if (optlen < 0)
523 		goto out;
524 
525 	/* Bind this socket to a particular device like "eth0",
526 	 * as specified in the passed interface name. If the
527 	 * name is "" or the option length is zero the socket
528 	 * is not bound.
529 	 */
530 	if (optlen > IFNAMSIZ - 1)
531 		optlen = IFNAMSIZ - 1;
532 	memset(devname, 0, sizeof(devname));
533 
534 	ret = -EFAULT;
535 	if (copy_from_user(devname, optval, optlen))
536 		goto out;
537 
538 	index = 0;
539 	if (devname[0] != '\0') {
540 		struct net_device *dev;
541 
542 		rcu_read_lock();
543 		dev = dev_get_by_name_rcu(net, devname);
544 		if (dev)
545 			index = dev->ifindex;
546 		rcu_read_unlock();
547 		ret = -ENODEV;
548 		if (!dev)
549 			goto out;
550 	}
551 
552 	lock_sock(sk);
553 	sk->sk_bound_dev_if = index;
554 	sk_dst_reset(sk);
555 	release_sock(sk);
556 
557 	ret = 0;
558 
559 out:
560 #endif
561 
562 	return ret;
563 }
564 
565 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
566 				int __user *optlen, int len)
567 {
568 	int ret = -ENOPROTOOPT;
569 #ifdef CONFIG_NETDEVICES
570 	struct net *net = sock_net(sk);
571 	char devname[IFNAMSIZ];
572 
573 	if (sk->sk_bound_dev_if == 0) {
574 		len = 0;
575 		goto zero;
576 	}
577 
578 	ret = -EINVAL;
579 	if (len < IFNAMSIZ)
580 		goto out;
581 
582 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
583 	if (ret)
584 		goto out;
585 
586 	len = strlen(devname) + 1;
587 
588 	ret = -EFAULT;
589 	if (copy_to_user(optval, devname, len))
590 		goto out;
591 
592 zero:
593 	ret = -EFAULT;
594 	if (put_user(len, optlen))
595 		goto out;
596 
597 	ret = 0;
598 
599 out:
600 #endif
601 
602 	return ret;
603 }
604 
605 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
606 {
607 	if (valbool)
608 		sock_set_flag(sk, bit);
609 	else
610 		sock_reset_flag(sk, bit);
611 }
612 
613 /*
614  *	This is meant for all protocols to use and covers goings on
615  *	at the socket level. Everything here is generic.
616  */
617 
618 int sock_setsockopt(struct socket *sock, int level, int optname,
619 		    char __user *optval, unsigned int optlen)
620 {
621 	struct sock *sk = sock->sk;
622 	int val;
623 	int valbool;
624 	struct linger ling;
625 	int ret = 0;
626 
627 	/*
628 	 *	Options without arguments
629 	 */
630 
631 	if (optname == SO_BINDTODEVICE)
632 		return sock_setbindtodevice(sk, optval, optlen);
633 
634 	if (optlen < sizeof(int))
635 		return -EINVAL;
636 
637 	if (get_user(val, (int __user *)optval))
638 		return -EFAULT;
639 
640 	valbool = val ? 1 : 0;
641 
642 	lock_sock(sk);
643 
644 	switch (optname) {
645 	case SO_DEBUG:
646 		if (val && !capable(CAP_NET_ADMIN))
647 			ret = -EACCES;
648 		else
649 			sock_valbool_flag(sk, SOCK_DBG, valbool);
650 		break;
651 	case SO_REUSEADDR:
652 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
653 		break;
654 	case SO_REUSEPORT:
655 		sk->sk_reuseport = valbool;
656 		break;
657 	case SO_TYPE:
658 	case SO_PROTOCOL:
659 	case SO_DOMAIN:
660 	case SO_ERROR:
661 		ret = -ENOPROTOOPT;
662 		break;
663 	case SO_DONTROUTE:
664 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
665 		break;
666 	case SO_BROADCAST:
667 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
668 		break;
669 	case SO_SNDBUF:
670 		/* Don't error on this BSD doesn't and if you think
671 		 * about it this is right. Otherwise apps have to
672 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
673 		 * are treated in BSD as hints
674 		 */
675 		val = min_t(u32, val, sysctl_wmem_max);
676 set_sndbuf:
677 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
678 		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
679 		/* Wake up sending tasks if we upped the value. */
680 		sk->sk_write_space(sk);
681 		break;
682 
683 	case SO_SNDBUFFORCE:
684 		if (!capable(CAP_NET_ADMIN)) {
685 			ret = -EPERM;
686 			break;
687 		}
688 		goto set_sndbuf;
689 
690 	case SO_RCVBUF:
691 		/* Don't error on this BSD doesn't and if you think
692 		 * about it this is right. Otherwise apps have to
693 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
694 		 * are treated in BSD as hints
695 		 */
696 		val = min_t(u32, val, sysctl_rmem_max);
697 set_rcvbuf:
698 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
699 		/*
700 		 * We double it on the way in to account for
701 		 * "struct sk_buff" etc. overhead.   Applications
702 		 * assume that the SO_RCVBUF setting they make will
703 		 * allow that much actual data to be received on that
704 		 * socket.
705 		 *
706 		 * Applications are unaware that "struct sk_buff" and
707 		 * other overheads allocate from the receive buffer
708 		 * during socket buffer allocation.
709 		 *
710 		 * And after considering the possible alternatives,
711 		 * returning the value we actually used in getsockopt
712 		 * is the most desirable behavior.
713 		 */
714 		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
715 		break;
716 
717 	case SO_RCVBUFFORCE:
718 		if (!capable(CAP_NET_ADMIN)) {
719 			ret = -EPERM;
720 			break;
721 		}
722 		goto set_rcvbuf;
723 
724 	case SO_KEEPALIVE:
725 #ifdef CONFIG_INET
726 		if (sk->sk_protocol == IPPROTO_TCP &&
727 		    sk->sk_type == SOCK_STREAM)
728 			tcp_set_keepalive(sk, valbool);
729 #endif
730 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
731 		break;
732 
733 	case SO_OOBINLINE:
734 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
735 		break;
736 
737 	case SO_NO_CHECK:
738 		sk->sk_no_check = valbool;
739 		break;
740 
741 	case SO_PRIORITY:
742 		if ((val >= 0 && val <= 6) ||
743 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
744 			sk->sk_priority = val;
745 		else
746 			ret = -EPERM;
747 		break;
748 
749 	case SO_LINGER:
750 		if (optlen < sizeof(ling)) {
751 			ret = -EINVAL;	/* 1003.1g */
752 			break;
753 		}
754 		if (copy_from_user(&ling, optval, sizeof(ling))) {
755 			ret = -EFAULT;
756 			break;
757 		}
758 		if (!ling.l_onoff)
759 			sock_reset_flag(sk, SOCK_LINGER);
760 		else {
761 #if (BITS_PER_LONG == 32)
762 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
763 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
764 			else
765 #endif
766 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
767 			sock_set_flag(sk, SOCK_LINGER);
768 		}
769 		break;
770 
771 	case SO_BSDCOMPAT:
772 		sock_warn_obsolete_bsdism("setsockopt");
773 		break;
774 
775 	case SO_PASSCRED:
776 		if (valbool)
777 			set_bit(SOCK_PASSCRED, &sock->flags);
778 		else
779 			clear_bit(SOCK_PASSCRED, &sock->flags);
780 		break;
781 
782 	case SO_TIMESTAMP:
783 	case SO_TIMESTAMPNS:
784 		if (valbool)  {
785 			if (optname == SO_TIMESTAMP)
786 				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
787 			else
788 				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
789 			sock_set_flag(sk, SOCK_RCVTSTAMP);
790 			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
791 		} else {
792 			sock_reset_flag(sk, SOCK_RCVTSTAMP);
793 			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
794 		}
795 		break;
796 
797 	case SO_TIMESTAMPING:
798 		if (val & ~SOF_TIMESTAMPING_MASK) {
799 			ret = -EINVAL;
800 			break;
801 		}
802 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
803 				  val & SOF_TIMESTAMPING_TX_HARDWARE);
804 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
805 				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
806 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
807 				  val & SOF_TIMESTAMPING_RX_HARDWARE);
808 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
809 			sock_enable_timestamp(sk,
810 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
811 		else
812 			sock_disable_timestamp(sk,
813 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
814 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
815 				  val & SOF_TIMESTAMPING_SOFTWARE);
816 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
817 				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
818 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
819 				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
820 		break;
821 
822 	case SO_RCVLOWAT:
823 		if (val < 0)
824 			val = INT_MAX;
825 		sk->sk_rcvlowat = val ? : 1;
826 		break;
827 
828 	case SO_RCVTIMEO:
829 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
830 		break;
831 
832 	case SO_SNDTIMEO:
833 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
834 		break;
835 
836 	case SO_ATTACH_FILTER:
837 		ret = -EINVAL;
838 		if (optlen == sizeof(struct sock_fprog)) {
839 			struct sock_fprog fprog;
840 
841 			ret = -EFAULT;
842 			if (copy_from_user(&fprog, optval, sizeof(fprog)))
843 				break;
844 
845 			ret = sk_attach_filter(&fprog, sk);
846 		}
847 		break;
848 
849 	case SO_DETACH_FILTER:
850 		ret = sk_detach_filter(sk);
851 		break;
852 
853 	case SO_LOCK_FILTER:
854 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
855 			ret = -EPERM;
856 		else
857 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
858 		break;
859 
860 	case SO_PASSSEC:
861 		if (valbool)
862 			set_bit(SOCK_PASSSEC, &sock->flags);
863 		else
864 			clear_bit(SOCK_PASSSEC, &sock->flags);
865 		break;
866 	case SO_MARK:
867 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
868 			ret = -EPERM;
869 		else
870 			sk->sk_mark = val;
871 		break;
872 
873 		/* We implement the SO_SNDLOWAT etc to
874 		   not be settable (1003.1g 5.3) */
875 	case SO_RXQ_OVFL:
876 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
877 		break;
878 
879 	case SO_WIFI_STATUS:
880 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
881 		break;
882 
883 	case SO_PEEK_OFF:
884 		if (sock->ops->set_peek_off)
885 			ret = sock->ops->set_peek_off(sk, val);
886 		else
887 			ret = -EOPNOTSUPP;
888 		break;
889 
890 	case SO_NOFCS:
891 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
892 		break;
893 
894 	case SO_SELECT_ERR_QUEUE:
895 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
896 		break;
897 
898 #ifdef CONFIG_NET_RX_BUSY_POLL
899 	case SO_BUSY_POLL:
900 		/* allow unprivileged users to decrease the value */
901 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
902 			ret = -EPERM;
903 		else {
904 			if (val < 0)
905 				ret = -EINVAL;
906 			else
907 				sk->sk_ll_usec = val;
908 		}
909 		break;
910 #endif
911 
912 	case SO_MAX_PACING_RATE:
913 		sk->sk_max_pacing_rate = val;
914 		sk->sk_pacing_rate = min(sk->sk_pacing_rate,
915 					 sk->sk_max_pacing_rate);
916 		break;
917 
918 	default:
919 		ret = -ENOPROTOOPT;
920 		break;
921 	}
922 	release_sock(sk);
923 	return ret;
924 }
925 EXPORT_SYMBOL(sock_setsockopt);
926 
927 
928 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
929 			  struct ucred *ucred)
930 {
931 	ucred->pid = pid_vnr(pid);
932 	ucred->uid = ucred->gid = -1;
933 	if (cred) {
934 		struct user_namespace *current_ns = current_user_ns();
935 
936 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
937 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
938 	}
939 }
940 
941 int sock_getsockopt(struct socket *sock, int level, int optname,
942 		    char __user *optval, int __user *optlen)
943 {
944 	struct sock *sk = sock->sk;
945 
946 	union {
947 		int val;
948 		struct linger ling;
949 		struct timeval tm;
950 	} v;
951 
952 	int lv = sizeof(int);
953 	int len;
954 
955 	if (get_user(len, optlen))
956 		return -EFAULT;
957 	if (len < 0)
958 		return -EINVAL;
959 
960 	memset(&v, 0, sizeof(v));
961 
962 	switch (optname) {
963 	case SO_DEBUG:
964 		v.val = sock_flag(sk, SOCK_DBG);
965 		break;
966 
967 	case SO_DONTROUTE:
968 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
969 		break;
970 
971 	case SO_BROADCAST:
972 		v.val = sock_flag(sk, SOCK_BROADCAST);
973 		break;
974 
975 	case SO_SNDBUF:
976 		v.val = sk->sk_sndbuf;
977 		break;
978 
979 	case SO_RCVBUF:
980 		v.val = sk->sk_rcvbuf;
981 		break;
982 
983 	case SO_REUSEADDR:
984 		v.val = sk->sk_reuse;
985 		break;
986 
987 	case SO_REUSEPORT:
988 		v.val = sk->sk_reuseport;
989 		break;
990 
991 	case SO_KEEPALIVE:
992 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
993 		break;
994 
995 	case SO_TYPE:
996 		v.val = sk->sk_type;
997 		break;
998 
999 	case SO_PROTOCOL:
1000 		v.val = sk->sk_protocol;
1001 		break;
1002 
1003 	case SO_DOMAIN:
1004 		v.val = sk->sk_family;
1005 		break;
1006 
1007 	case SO_ERROR:
1008 		v.val = -sock_error(sk);
1009 		if (v.val == 0)
1010 			v.val = xchg(&sk->sk_err_soft, 0);
1011 		break;
1012 
1013 	case SO_OOBINLINE:
1014 		v.val = sock_flag(sk, SOCK_URGINLINE);
1015 		break;
1016 
1017 	case SO_NO_CHECK:
1018 		v.val = sk->sk_no_check;
1019 		break;
1020 
1021 	case SO_PRIORITY:
1022 		v.val = sk->sk_priority;
1023 		break;
1024 
1025 	case SO_LINGER:
1026 		lv		= sizeof(v.ling);
1027 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1028 		v.ling.l_linger	= sk->sk_lingertime / HZ;
1029 		break;
1030 
1031 	case SO_BSDCOMPAT:
1032 		sock_warn_obsolete_bsdism("getsockopt");
1033 		break;
1034 
1035 	case SO_TIMESTAMP:
1036 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1037 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1038 		break;
1039 
1040 	case SO_TIMESTAMPNS:
1041 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1042 		break;
1043 
1044 	case SO_TIMESTAMPING:
1045 		v.val = 0;
1046 		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1047 			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1048 		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1049 			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1050 		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1051 			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1052 		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1053 			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1054 		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1055 			v.val |= SOF_TIMESTAMPING_SOFTWARE;
1056 		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1057 			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1058 		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1059 			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1060 		break;
1061 
1062 	case SO_RCVTIMEO:
1063 		lv = sizeof(struct timeval);
1064 		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1065 			v.tm.tv_sec = 0;
1066 			v.tm.tv_usec = 0;
1067 		} else {
1068 			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1069 			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1070 		}
1071 		break;
1072 
1073 	case SO_SNDTIMEO:
1074 		lv = sizeof(struct timeval);
1075 		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1076 			v.tm.tv_sec = 0;
1077 			v.tm.tv_usec = 0;
1078 		} else {
1079 			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1080 			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1081 		}
1082 		break;
1083 
1084 	case SO_RCVLOWAT:
1085 		v.val = sk->sk_rcvlowat;
1086 		break;
1087 
1088 	case SO_SNDLOWAT:
1089 		v.val = 1;
1090 		break;
1091 
1092 	case SO_PASSCRED:
1093 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1094 		break;
1095 
1096 	case SO_PEERCRED:
1097 	{
1098 		struct ucred peercred;
1099 		if (len > sizeof(peercred))
1100 			len = sizeof(peercred);
1101 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1102 		if (copy_to_user(optval, &peercred, len))
1103 			return -EFAULT;
1104 		goto lenout;
1105 	}
1106 
1107 	case SO_PEERNAME:
1108 	{
1109 		char address[128];
1110 
1111 		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1112 			return -ENOTCONN;
1113 		if (lv < len)
1114 			return -EINVAL;
1115 		if (copy_to_user(optval, address, len))
1116 			return -EFAULT;
1117 		goto lenout;
1118 	}
1119 
1120 	/* Dubious BSD thing... Probably nobody even uses it, but
1121 	 * the UNIX standard wants it for whatever reason... -DaveM
1122 	 */
1123 	case SO_ACCEPTCONN:
1124 		v.val = sk->sk_state == TCP_LISTEN;
1125 		break;
1126 
1127 	case SO_PASSSEC:
1128 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1129 		break;
1130 
1131 	case SO_PEERSEC:
1132 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1133 
1134 	case SO_MARK:
1135 		v.val = sk->sk_mark;
1136 		break;
1137 
1138 	case SO_RXQ_OVFL:
1139 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1140 		break;
1141 
1142 	case SO_WIFI_STATUS:
1143 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1144 		break;
1145 
1146 	case SO_PEEK_OFF:
1147 		if (!sock->ops->set_peek_off)
1148 			return -EOPNOTSUPP;
1149 
1150 		v.val = sk->sk_peek_off;
1151 		break;
1152 	case SO_NOFCS:
1153 		v.val = sock_flag(sk, SOCK_NOFCS);
1154 		break;
1155 
1156 	case SO_BINDTODEVICE:
1157 		return sock_getbindtodevice(sk, optval, optlen, len);
1158 
1159 	case SO_GET_FILTER:
1160 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1161 		if (len < 0)
1162 			return len;
1163 
1164 		goto lenout;
1165 
1166 	case SO_LOCK_FILTER:
1167 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1168 		break;
1169 
1170 	case SO_BPF_EXTENSIONS:
1171 		v.val = bpf_tell_extensions();
1172 		break;
1173 
1174 	case SO_SELECT_ERR_QUEUE:
1175 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1176 		break;
1177 
1178 #ifdef CONFIG_NET_RX_BUSY_POLL
1179 	case SO_BUSY_POLL:
1180 		v.val = sk->sk_ll_usec;
1181 		break;
1182 #endif
1183 
1184 	case SO_MAX_PACING_RATE:
1185 		v.val = sk->sk_max_pacing_rate;
1186 		break;
1187 
1188 	default:
1189 		return -ENOPROTOOPT;
1190 	}
1191 
1192 	if (len > lv)
1193 		len = lv;
1194 	if (copy_to_user(optval, &v, len))
1195 		return -EFAULT;
1196 lenout:
1197 	if (put_user(len, optlen))
1198 		return -EFAULT;
1199 	return 0;
1200 }
1201 
1202 /*
1203  * Initialize an sk_lock.
1204  *
1205  * (We also register the sk_lock with the lock validator.)
1206  */
1207 static inline void sock_lock_init(struct sock *sk)
1208 {
1209 	sock_lock_init_class_and_name(sk,
1210 			af_family_slock_key_strings[sk->sk_family],
1211 			af_family_slock_keys + sk->sk_family,
1212 			af_family_key_strings[sk->sk_family],
1213 			af_family_keys + sk->sk_family);
1214 }
1215 
1216 /*
1217  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1218  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1219  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1220  */
1221 static void sock_copy(struct sock *nsk, const struct sock *osk)
1222 {
1223 #ifdef CONFIG_SECURITY_NETWORK
1224 	void *sptr = nsk->sk_security;
1225 #endif
1226 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1227 
1228 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1229 	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1230 
1231 #ifdef CONFIG_SECURITY_NETWORK
1232 	nsk->sk_security = sptr;
1233 	security_sk_clone(osk, nsk);
1234 #endif
1235 }
1236 
1237 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1238 {
1239 	unsigned long nulls1, nulls2;
1240 
1241 	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1242 	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1243 	if (nulls1 > nulls2)
1244 		swap(nulls1, nulls2);
1245 
1246 	if (nulls1 != 0)
1247 		memset((char *)sk, 0, nulls1);
1248 	memset((char *)sk + nulls1 + sizeof(void *), 0,
1249 	       nulls2 - nulls1 - sizeof(void *));
1250 	memset((char *)sk + nulls2 + sizeof(void *), 0,
1251 	       size - nulls2 - sizeof(void *));
1252 }
1253 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1254 
1255 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1256 		int family)
1257 {
1258 	struct sock *sk;
1259 	struct kmem_cache *slab;
1260 
1261 	slab = prot->slab;
1262 	if (slab != NULL) {
1263 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1264 		if (!sk)
1265 			return sk;
1266 		if (priority & __GFP_ZERO) {
1267 			if (prot->clear_sk)
1268 				prot->clear_sk(sk, prot->obj_size);
1269 			else
1270 				sk_prot_clear_nulls(sk, prot->obj_size);
1271 		}
1272 	} else
1273 		sk = kmalloc(prot->obj_size, priority);
1274 
1275 	if (sk != NULL) {
1276 		kmemcheck_annotate_bitfield(sk, flags);
1277 
1278 		if (security_sk_alloc(sk, family, priority))
1279 			goto out_free;
1280 
1281 		if (!try_module_get(prot->owner))
1282 			goto out_free_sec;
1283 		sk_tx_queue_clear(sk);
1284 	}
1285 
1286 	return sk;
1287 
1288 out_free_sec:
1289 	security_sk_free(sk);
1290 out_free:
1291 	if (slab != NULL)
1292 		kmem_cache_free(slab, sk);
1293 	else
1294 		kfree(sk);
1295 	return NULL;
1296 }
1297 
1298 static void sk_prot_free(struct proto *prot, struct sock *sk)
1299 {
1300 	struct kmem_cache *slab;
1301 	struct module *owner;
1302 
1303 	owner = prot->owner;
1304 	slab = prot->slab;
1305 
1306 	security_sk_free(sk);
1307 	if (slab != NULL)
1308 		kmem_cache_free(slab, sk);
1309 	else
1310 		kfree(sk);
1311 	module_put(owner);
1312 }
1313 
1314 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1315 void sock_update_netprioidx(struct sock *sk)
1316 {
1317 	if (in_interrupt())
1318 		return;
1319 
1320 	sk->sk_cgrp_prioidx = task_netprioidx(current);
1321 }
1322 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1323 #endif
1324 
1325 /**
1326  *	sk_alloc - All socket objects are allocated here
1327  *	@net: the applicable net namespace
1328  *	@family: protocol family
1329  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1330  *	@prot: struct proto associated with this new sock instance
1331  */
1332 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1333 		      struct proto *prot)
1334 {
1335 	struct sock *sk;
1336 
1337 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1338 	if (sk) {
1339 		sk->sk_family = family;
1340 		/*
1341 		 * See comment in struct sock definition to understand
1342 		 * why we need sk_prot_creator -acme
1343 		 */
1344 		sk->sk_prot = sk->sk_prot_creator = prot;
1345 		sock_lock_init(sk);
1346 		sock_net_set(sk, get_net(net));
1347 		atomic_set(&sk->sk_wmem_alloc, 1);
1348 
1349 		sock_update_classid(sk);
1350 		sock_update_netprioidx(sk);
1351 	}
1352 
1353 	return sk;
1354 }
1355 EXPORT_SYMBOL(sk_alloc);
1356 
1357 static void __sk_free(struct sock *sk)
1358 {
1359 	struct sk_filter *filter;
1360 
1361 	if (sk->sk_destruct)
1362 		sk->sk_destruct(sk);
1363 
1364 	filter = rcu_dereference_check(sk->sk_filter,
1365 				       atomic_read(&sk->sk_wmem_alloc) == 0);
1366 	if (filter) {
1367 		sk_filter_uncharge(sk, filter);
1368 		RCU_INIT_POINTER(sk->sk_filter, NULL);
1369 	}
1370 
1371 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1372 
1373 	if (atomic_read(&sk->sk_omem_alloc))
1374 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1375 			 __func__, atomic_read(&sk->sk_omem_alloc));
1376 
1377 	if (sk->sk_peer_cred)
1378 		put_cred(sk->sk_peer_cred);
1379 	put_pid(sk->sk_peer_pid);
1380 	put_net(sock_net(sk));
1381 	sk_prot_free(sk->sk_prot_creator, sk);
1382 }
1383 
1384 void sk_free(struct sock *sk)
1385 {
1386 	/*
1387 	 * We subtract one from sk_wmem_alloc and can know if
1388 	 * some packets are still in some tx queue.
1389 	 * If not null, sock_wfree() will call __sk_free(sk) later
1390 	 */
1391 	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1392 		__sk_free(sk);
1393 }
1394 EXPORT_SYMBOL(sk_free);
1395 
1396 /*
1397  * Last sock_put should drop reference to sk->sk_net. It has already
1398  * been dropped in sk_change_net. Taking reference to stopping namespace
1399  * is not an option.
1400  * Take reference to a socket to remove it from hash _alive_ and after that
1401  * destroy it in the context of init_net.
1402  */
1403 void sk_release_kernel(struct sock *sk)
1404 {
1405 	if (sk == NULL || sk->sk_socket == NULL)
1406 		return;
1407 
1408 	sock_hold(sk);
1409 	sock_release(sk->sk_socket);
1410 	release_net(sock_net(sk));
1411 	sock_net_set(sk, get_net(&init_net));
1412 	sock_put(sk);
1413 }
1414 EXPORT_SYMBOL(sk_release_kernel);
1415 
1416 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1417 {
1418 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1419 		sock_update_memcg(newsk);
1420 }
1421 
1422 /**
1423  *	sk_clone_lock - clone a socket, and lock its clone
1424  *	@sk: the socket to clone
1425  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1426  *
1427  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1428  */
1429 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1430 {
1431 	struct sock *newsk;
1432 
1433 	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1434 	if (newsk != NULL) {
1435 		struct sk_filter *filter;
1436 
1437 		sock_copy(newsk, sk);
1438 
1439 		/* SANITY */
1440 		get_net(sock_net(newsk));
1441 		sk_node_init(&newsk->sk_node);
1442 		sock_lock_init(newsk);
1443 		bh_lock_sock(newsk);
1444 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1445 		newsk->sk_backlog.len = 0;
1446 
1447 		atomic_set(&newsk->sk_rmem_alloc, 0);
1448 		/*
1449 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1450 		 */
1451 		atomic_set(&newsk->sk_wmem_alloc, 1);
1452 		atomic_set(&newsk->sk_omem_alloc, 0);
1453 		skb_queue_head_init(&newsk->sk_receive_queue);
1454 		skb_queue_head_init(&newsk->sk_write_queue);
1455 #ifdef CONFIG_NET_DMA
1456 		skb_queue_head_init(&newsk->sk_async_wait_queue);
1457 #endif
1458 
1459 		spin_lock_init(&newsk->sk_dst_lock);
1460 		rwlock_init(&newsk->sk_callback_lock);
1461 		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1462 				af_callback_keys + newsk->sk_family,
1463 				af_family_clock_key_strings[newsk->sk_family]);
1464 
1465 		newsk->sk_dst_cache	= NULL;
1466 		newsk->sk_wmem_queued	= 0;
1467 		newsk->sk_forward_alloc = 0;
1468 		newsk->sk_send_head	= NULL;
1469 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1470 
1471 		sock_reset_flag(newsk, SOCK_DONE);
1472 		skb_queue_head_init(&newsk->sk_error_queue);
1473 
1474 		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1475 		if (filter != NULL)
1476 			sk_filter_charge(newsk, filter);
1477 
1478 		if (unlikely(xfrm_sk_clone_policy(newsk))) {
1479 			/* It is still raw copy of parent, so invalidate
1480 			 * destructor and make plain sk_free() */
1481 			newsk->sk_destruct = NULL;
1482 			bh_unlock_sock(newsk);
1483 			sk_free(newsk);
1484 			newsk = NULL;
1485 			goto out;
1486 		}
1487 
1488 		newsk->sk_err	   = 0;
1489 		newsk->sk_priority = 0;
1490 		/*
1491 		 * Before updating sk_refcnt, we must commit prior changes to memory
1492 		 * (Documentation/RCU/rculist_nulls.txt for details)
1493 		 */
1494 		smp_wmb();
1495 		atomic_set(&newsk->sk_refcnt, 2);
1496 
1497 		/*
1498 		 * Increment the counter in the same struct proto as the master
1499 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1500 		 * is the same as sk->sk_prot->socks, as this field was copied
1501 		 * with memcpy).
1502 		 *
1503 		 * This _changes_ the previous behaviour, where
1504 		 * tcp_create_openreq_child always was incrementing the
1505 		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1506 		 * to be taken into account in all callers. -acme
1507 		 */
1508 		sk_refcnt_debug_inc(newsk);
1509 		sk_set_socket(newsk, NULL);
1510 		newsk->sk_wq = NULL;
1511 
1512 		sk_update_clone(sk, newsk);
1513 
1514 		if (newsk->sk_prot->sockets_allocated)
1515 			sk_sockets_allocated_inc(newsk);
1516 
1517 		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1518 			net_enable_timestamp();
1519 	}
1520 out:
1521 	return newsk;
1522 }
1523 EXPORT_SYMBOL_GPL(sk_clone_lock);
1524 
1525 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1526 {
1527 	__sk_dst_set(sk, dst);
1528 	sk->sk_route_caps = dst->dev->features;
1529 	if (sk->sk_route_caps & NETIF_F_GSO)
1530 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1531 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1532 	if (sk_can_gso(sk)) {
1533 		if (dst->header_len) {
1534 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1535 		} else {
1536 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1537 			sk->sk_gso_max_size = dst->dev->gso_max_size;
1538 			sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1539 		}
1540 	}
1541 }
1542 EXPORT_SYMBOL_GPL(sk_setup_caps);
1543 
1544 /*
1545  *	Simple resource managers for sockets.
1546  */
1547 
1548 
1549 /*
1550  * Write buffer destructor automatically called from kfree_skb.
1551  */
1552 void sock_wfree(struct sk_buff *skb)
1553 {
1554 	struct sock *sk = skb->sk;
1555 	unsigned int len = skb->truesize;
1556 
1557 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1558 		/*
1559 		 * Keep a reference on sk_wmem_alloc, this will be released
1560 		 * after sk_write_space() call
1561 		 */
1562 		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1563 		sk->sk_write_space(sk);
1564 		len = 1;
1565 	}
1566 	/*
1567 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1568 	 * could not do because of in-flight packets
1569 	 */
1570 	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1571 		__sk_free(sk);
1572 }
1573 EXPORT_SYMBOL(sock_wfree);
1574 
1575 void skb_orphan_partial(struct sk_buff *skb)
1576 {
1577 	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1578 	 * so we do not completely orphan skb, but transfert all
1579 	 * accounted bytes but one, to avoid unexpected reorders.
1580 	 */
1581 	if (skb->destructor == sock_wfree
1582 #ifdef CONFIG_INET
1583 	    || skb->destructor == tcp_wfree
1584 #endif
1585 		) {
1586 		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1587 		skb->truesize = 1;
1588 	} else {
1589 		skb_orphan(skb);
1590 	}
1591 }
1592 EXPORT_SYMBOL(skb_orphan_partial);
1593 
1594 /*
1595  * Read buffer destructor automatically called from kfree_skb.
1596  */
1597 void sock_rfree(struct sk_buff *skb)
1598 {
1599 	struct sock *sk = skb->sk;
1600 	unsigned int len = skb->truesize;
1601 
1602 	atomic_sub(len, &sk->sk_rmem_alloc);
1603 	sk_mem_uncharge(sk, len);
1604 }
1605 EXPORT_SYMBOL(sock_rfree);
1606 
1607 void sock_edemux(struct sk_buff *skb)
1608 {
1609 	struct sock *sk = skb->sk;
1610 
1611 #ifdef CONFIG_INET
1612 	if (sk->sk_state == TCP_TIME_WAIT)
1613 		inet_twsk_put(inet_twsk(sk));
1614 	else
1615 #endif
1616 		sock_put(sk);
1617 }
1618 EXPORT_SYMBOL(sock_edemux);
1619 
1620 kuid_t sock_i_uid(struct sock *sk)
1621 {
1622 	kuid_t uid;
1623 
1624 	read_lock_bh(&sk->sk_callback_lock);
1625 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1626 	read_unlock_bh(&sk->sk_callback_lock);
1627 	return uid;
1628 }
1629 EXPORT_SYMBOL(sock_i_uid);
1630 
1631 unsigned long sock_i_ino(struct sock *sk)
1632 {
1633 	unsigned long ino;
1634 
1635 	read_lock_bh(&sk->sk_callback_lock);
1636 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1637 	read_unlock_bh(&sk->sk_callback_lock);
1638 	return ino;
1639 }
1640 EXPORT_SYMBOL(sock_i_ino);
1641 
1642 /*
1643  * Allocate a skb from the socket's send buffer.
1644  */
1645 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1646 			     gfp_t priority)
1647 {
1648 	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1649 		struct sk_buff *skb = alloc_skb(size, priority);
1650 		if (skb) {
1651 			skb_set_owner_w(skb, sk);
1652 			return skb;
1653 		}
1654 	}
1655 	return NULL;
1656 }
1657 EXPORT_SYMBOL(sock_wmalloc);
1658 
1659 /*
1660  * Allocate a memory block from the socket's option memory buffer.
1661  */
1662 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1663 {
1664 	if ((unsigned int)size <= sysctl_optmem_max &&
1665 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1666 		void *mem;
1667 		/* First do the add, to avoid the race if kmalloc
1668 		 * might sleep.
1669 		 */
1670 		atomic_add(size, &sk->sk_omem_alloc);
1671 		mem = kmalloc(size, priority);
1672 		if (mem)
1673 			return mem;
1674 		atomic_sub(size, &sk->sk_omem_alloc);
1675 	}
1676 	return NULL;
1677 }
1678 EXPORT_SYMBOL(sock_kmalloc);
1679 
1680 /*
1681  * Free an option memory block.
1682  */
1683 void sock_kfree_s(struct sock *sk, void *mem, int size)
1684 {
1685 	kfree(mem);
1686 	atomic_sub(size, &sk->sk_omem_alloc);
1687 }
1688 EXPORT_SYMBOL(sock_kfree_s);
1689 
1690 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1691    I think, these locks should be removed for datagram sockets.
1692  */
1693 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1694 {
1695 	DEFINE_WAIT(wait);
1696 
1697 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1698 	for (;;) {
1699 		if (!timeo)
1700 			break;
1701 		if (signal_pending(current))
1702 			break;
1703 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1704 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1705 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1706 			break;
1707 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1708 			break;
1709 		if (sk->sk_err)
1710 			break;
1711 		timeo = schedule_timeout(timeo);
1712 	}
1713 	finish_wait(sk_sleep(sk), &wait);
1714 	return timeo;
1715 }
1716 
1717 
1718 /*
1719  *	Generic send/receive buffer handlers
1720  */
1721 
1722 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1723 				     unsigned long data_len, int noblock,
1724 				     int *errcode, int max_page_order)
1725 {
1726 	struct sk_buff *skb = NULL;
1727 	unsigned long chunk;
1728 	gfp_t gfp_mask;
1729 	long timeo;
1730 	int err;
1731 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1732 	struct page *page;
1733 	int i;
1734 
1735 	err = -EMSGSIZE;
1736 	if (npages > MAX_SKB_FRAGS)
1737 		goto failure;
1738 
1739 	timeo = sock_sndtimeo(sk, noblock);
1740 	while (!skb) {
1741 		err = sock_error(sk);
1742 		if (err != 0)
1743 			goto failure;
1744 
1745 		err = -EPIPE;
1746 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1747 			goto failure;
1748 
1749 		if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
1750 			set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1751 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1752 			err = -EAGAIN;
1753 			if (!timeo)
1754 				goto failure;
1755 			if (signal_pending(current))
1756 				goto interrupted;
1757 			timeo = sock_wait_for_wmem(sk, timeo);
1758 			continue;
1759 		}
1760 
1761 		err = -ENOBUFS;
1762 		gfp_mask = sk->sk_allocation;
1763 		if (gfp_mask & __GFP_WAIT)
1764 			gfp_mask |= __GFP_REPEAT;
1765 
1766 		skb = alloc_skb(header_len, gfp_mask);
1767 		if (!skb)
1768 			goto failure;
1769 
1770 		skb->truesize += data_len;
1771 
1772 		for (i = 0; npages > 0; i++) {
1773 			int order = max_page_order;
1774 
1775 			while (order) {
1776 				if (npages >= 1 << order) {
1777 					page = alloc_pages(sk->sk_allocation |
1778 							   __GFP_COMP | __GFP_NOWARN,
1779 							   order);
1780 					if (page)
1781 						goto fill_page;
1782 				}
1783 				order--;
1784 			}
1785 			page = alloc_page(sk->sk_allocation);
1786 			if (!page)
1787 				goto failure;
1788 fill_page:
1789 			chunk = min_t(unsigned long, data_len,
1790 				      PAGE_SIZE << order);
1791 			skb_fill_page_desc(skb, i, page, 0, chunk);
1792 			data_len -= chunk;
1793 			npages -= 1 << order;
1794 		}
1795 	}
1796 
1797 	skb_set_owner_w(skb, sk);
1798 	return skb;
1799 
1800 interrupted:
1801 	err = sock_intr_errno(timeo);
1802 failure:
1803 	kfree_skb(skb);
1804 	*errcode = err;
1805 	return NULL;
1806 }
1807 EXPORT_SYMBOL(sock_alloc_send_pskb);
1808 
1809 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1810 				    int noblock, int *errcode)
1811 {
1812 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1813 }
1814 EXPORT_SYMBOL(sock_alloc_send_skb);
1815 
1816 /* On 32bit arches, an skb frag is limited to 2^15 */
1817 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
1818 
1819 /**
1820  * skb_page_frag_refill - check that a page_frag contains enough room
1821  * @sz: minimum size of the fragment we want to get
1822  * @pfrag: pointer to page_frag
1823  * @prio: priority for memory allocation
1824  *
1825  * Note: While this allocator tries to use high order pages, there is
1826  * no guarantee that allocations succeed. Therefore, @sz MUST be
1827  * less or equal than PAGE_SIZE.
1828  */
1829 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1830 {
1831 	int order;
1832 
1833 	if (pfrag->page) {
1834 		if (atomic_read(&pfrag->page->_count) == 1) {
1835 			pfrag->offset = 0;
1836 			return true;
1837 		}
1838 		if (pfrag->offset + sz <= pfrag->size)
1839 			return true;
1840 		put_page(pfrag->page);
1841 	}
1842 
1843 	order = SKB_FRAG_PAGE_ORDER;
1844 	do {
1845 		gfp_t gfp = prio;
1846 
1847 		if (order)
1848 			gfp |= __GFP_COMP | __GFP_NOWARN;
1849 		pfrag->page = alloc_pages(gfp, order);
1850 		if (likely(pfrag->page)) {
1851 			pfrag->offset = 0;
1852 			pfrag->size = PAGE_SIZE << order;
1853 			return true;
1854 		}
1855 	} while (--order >= 0);
1856 
1857 	return false;
1858 }
1859 EXPORT_SYMBOL(skb_page_frag_refill);
1860 
1861 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1862 {
1863 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1864 		return true;
1865 
1866 	sk_enter_memory_pressure(sk);
1867 	sk_stream_moderate_sndbuf(sk);
1868 	return false;
1869 }
1870 EXPORT_SYMBOL(sk_page_frag_refill);
1871 
1872 static void __lock_sock(struct sock *sk)
1873 	__releases(&sk->sk_lock.slock)
1874 	__acquires(&sk->sk_lock.slock)
1875 {
1876 	DEFINE_WAIT(wait);
1877 
1878 	for (;;) {
1879 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1880 					TASK_UNINTERRUPTIBLE);
1881 		spin_unlock_bh(&sk->sk_lock.slock);
1882 		schedule();
1883 		spin_lock_bh(&sk->sk_lock.slock);
1884 		if (!sock_owned_by_user(sk))
1885 			break;
1886 	}
1887 	finish_wait(&sk->sk_lock.wq, &wait);
1888 }
1889 
1890 static void __release_sock(struct sock *sk)
1891 	__releases(&sk->sk_lock.slock)
1892 	__acquires(&sk->sk_lock.slock)
1893 {
1894 	struct sk_buff *skb = sk->sk_backlog.head;
1895 
1896 	do {
1897 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1898 		bh_unlock_sock(sk);
1899 
1900 		do {
1901 			struct sk_buff *next = skb->next;
1902 
1903 			prefetch(next);
1904 			WARN_ON_ONCE(skb_dst_is_noref(skb));
1905 			skb->next = NULL;
1906 			sk_backlog_rcv(sk, skb);
1907 
1908 			/*
1909 			 * We are in process context here with softirqs
1910 			 * disabled, use cond_resched_softirq() to preempt.
1911 			 * This is safe to do because we've taken the backlog
1912 			 * queue private:
1913 			 */
1914 			cond_resched_softirq();
1915 
1916 			skb = next;
1917 		} while (skb != NULL);
1918 
1919 		bh_lock_sock(sk);
1920 	} while ((skb = sk->sk_backlog.head) != NULL);
1921 
1922 	/*
1923 	 * Doing the zeroing here guarantee we can not loop forever
1924 	 * while a wild producer attempts to flood us.
1925 	 */
1926 	sk->sk_backlog.len = 0;
1927 }
1928 
1929 /**
1930  * sk_wait_data - wait for data to arrive at sk_receive_queue
1931  * @sk:    sock to wait on
1932  * @timeo: for how long
1933  *
1934  * Now socket state including sk->sk_err is changed only under lock,
1935  * hence we may omit checks after joining wait queue.
1936  * We check receive queue before schedule() only as optimization;
1937  * it is very likely that release_sock() added new data.
1938  */
1939 int sk_wait_data(struct sock *sk, long *timeo)
1940 {
1941 	int rc;
1942 	DEFINE_WAIT(wait);
1943 
1944 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1945 	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1946 	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1947 	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1948 	finish_wait(sk_sleep(sk), &wait);
1949 	return rc;
1950 }
1951 EXPORT_SYMBOL(sk_wait_data);
1952 
1953 /**
1954  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1955  *	@sk: socket
1956  *	@size: memory size to allocate
1957  *	@kind: allocation type
1958  *
1959  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1960  *	rmem allocation. This function assumes that protocols which have
1961  *	memory_pressure use sk_wmem_queued as write buffer accounting.
1962  */
1963 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1964 {
1965 	struct proto *prot = sk->sk_prot;
1966 	int amt = sk_mem_pages(size);
1967 	long allocated;
1968 	int parent_status = UNDER_LIMIT;
1969 
1970 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1971 
1972 	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1973 
1974 	/* Under limit. */
1975 	if (parent_status == UNDER_LIMIT &&
1976 			allocated <= sk_prot_mem_limits(sk, 0)) {
1977 		sk_leave_memory_pressure(sk);
1978 		return 1;
1979 	}
1980 
1981 	/* Under pressure. (we or our parents) */
1982 	if ((parent_status > SOFT_LIMIT) ||
1983 			allocated > sk_prot_mem_limits(sk, 1))
1984 		sk_enter_memory_pressure(sk);
1985 
1986 	/* Over hard limit (we or our parents) */
1987 	if ((parent_status == OVER_LIMIT) ||
1988 			(allocated > sk_prot_mem_limits(sk, 2)))
1989 		goto suppress_allocation;
1990 
1991 	/* guarantee minimum buffer size under pressure */
1992 	if (kind == SK_MEM_RECV) {
1993 		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1994 			return 1;
1995 
1996 	} else { /* SK_MEM_SEND */
1997 		if (sk->sk_type == SOCK_STREAM) {
1998 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1999 				return 1;
2000 		} else if (atomic_read(&sk->sk_wmem_alloc) <
2001 			   prot->sysctl_wmem[0])
2002 				return 1;
2003 	}
2004 
2005 	if (sk_has_memory_pressure(sk)) {
2006 		int alloc;
2007 
2008 		if (!sk_under_memory_pressure(sk))
2009 			return 1;
2010 		alloc = sk_sockets_allocated_read_positive(sk);
2011 		if (sk_prot_mem_limits(sk, 2) > alloc *
2012 		    sk_mem_pages(sk->sk_wmem_queued +
2013 				 atomic_read(&sk->sk_rmem_alloc) +
2014 				 sk->sk_forward_alloc))
2015 			return 1;
2016 	}
2017 
2018 suppress_allocation:
2019 
2020 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2021 		sk_stream_moderate_sndbuf(sk);
2022 
2023 		/* Fail only if socket is _under_ its sndbuf.
2024 		 * In this case we cannot block, so that we have to fail.
2025 		 */
2026 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2027 			return 1;
2028 	}
2029 
2030 	trace_sock_exceed_buf_limit(sk, prot, allocated);
2031 
2032 	/* Alas. Undo changes. */
2033 	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2034 
2035 	sk_memory_allocated_sub(sk, amt);
2036 
2037 	return 0;
2038 }
2039 EXPORT_SYMBOL(__sk_mem_schedule);
2040 
2041 /**
2042  *	__sk_reclaim - reclaim memory_allocated
2043  *	@sk: socket
2044  */
2045 void __sk_mem_reclaim(struct sock *sk)
2046 {
2047 	sk_memory_allocated_sub(sk,
2048 				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
2049 	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2050 
2051 	if (sk_under_memory_pressure(sk) &&
2052 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2053 		sk_leave_memory_pressure(sk);
2054 }
2055 EXPORT_SYMBOL(__sk_mem_reclaim);
2056 
2057 
2058 /*
2059  * Set of default routines for initialising struct proto_ops when
2060  * the protocol does not support a particular function. In certain
2061  * cases where it makes no sense for a protocol to have a "do nothing"
2062  * function, some default processing is provided.
2063  */
2064 
2065 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2066 {
2067 	return -EOPNOTSUPP;
2068 }
2069 EXPORT_SYMBOL(sock_no_bind);
2070 
2071 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2072 		    int len, int flags)
2073 {
2074 	return -EOPNOTSUPP;
2075 }
2076 EXPORT_SYMBOL(sock_no_connect);
2077 
2078 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2079 {
2080 	return -EOPNOTSUPP;
2081 }
2082 EXPORT_SYMBOL(sock_no_socketpair);
2083 
2084 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2085 {
2086 	return -EOPNOTSUPP;
2087 }
2088 EXPORT_SYMBOL(sock_no_accept);
2089 
2090 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2091 		    int *len, int peer)
2092 {
2093 	return -EOPNOTSUPP;
2094 }
2095 EXPORT_SYMBOL(sock_no_getname);
2096 
2097 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2098 {
2099 	return 0;
2100 }
2101 EXPORT_SYMBOL(sock_no_poll);
2102 
2103 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2104 {
2105 	return -EOPNOTSUPP;
2106 }
2107 EXPORT_SYMBOL(sock_no_ioctl);
2108 
2109 int sock_no_listen(struct socket *sock, int backlog)
2110 {
2111 	return -EOPNOTSUPP;
2112 }
2113 EXPORT_SYMBOL(sock_no_listen);
2114 
2115 int sock_no_shutdown(struct socket *sock, int how)
2116 {
2117 	return -EOPNOTSUPP;
2118 }
2119 EXPORT_SYMBOL(sock_no_shutdown);
2120 
2121 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2122 		    char __user *optval, unsigned int optlen)
2123 {
2124 	return -EOPNOTSUPP;
2125 }
2126 EXPORT_SYMBOL(sock_no_setsockopt);
2127 
2128 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2129 		    char __user *optval, int __user *optlen)
2130 {
2131 	return -EOPNOTSUPP;
2132 }
2133 EXPORT_SYMBOL(sock_no_getsockopt);
2134 
2135 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2136 		    size_t len)
2137 {
2138 	return -EOPNOTSUPP;
2139 }
2140 EXPORT_SYMBOL(sock_no_sendmsg);
2141 
2142 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2143 		    size_t len, int flags)
2144 {
2145 	return -EOPNOTSUPP;
2146 }
2147 EXPORT_SYMBOL(sock_no_recvmsg);
2148 
2149 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2150 {
2151 	/* Mirror missing mmap method error code */
2152 	return -ENODEV;
2153 }
2154 EXPORT_SYMBOL(sock_no_mmap);
2155 
2156 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2157 {
2158 	ssize_t res;
2159 	struct msghdr msg = {.msg_flags = flags};
2160 	struct kvec iov;
2161 	char *kaddr = kmap(page);
2162 	iov.iov_base = kaddr + offset;
2163 	iov.iov_len = size;
2164 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2165 	kunmap(page);
2166 	return res;
2167 }
2168 EXPORT_SYMBOL(sock_no_sendpage);
2169 
2170 /*
2171  *	Default Socket Callbacks
2172  */
2173 
2174 static void sock_def_wakeup(struct sock *sk)
2175 {
2176 	struct socket_wq *wq;
2177 
2178 	rcu_read_lock();
2179 	wq = rcu_dereference(sk->sk_wq);
2180 	if (wq_has_sleeper(wq))
2181 		wake_up_interruptible_all(&wq->wait);
2182 	rcu_read_unlock();
2183 }
2184 
2185 static void sock_def_error_report(struct sock *sk)
2186 {
2187 	struct socket_wq *wq;
2188 
2189 	rcu_read_lock();
2190 	wq = rcu_dereference(sk->sk_wq);
2191 	if (wq_has_sleeper(wq))
2192 		wake_up_interruptible_poll(&wq->wait, POLLERR);
2193 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2194 	rcu_read_unlock();
2195 }
2196 
2197 static void sock_def_readable(struct sock *sk, int len)
2198 {
2199 	struct socket_wq *wq;
2200 
2201 	rcu_read_lock();
2202 	wq = rcu_dereference(sk->sk_wq);
2203 	if (wq_has_sleeper(wq))
2204 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2205 						POLLRDNORM | POLLRDBAND);
2206 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2207 	rcu_read_unlock();
2208 }
2209 
2210 static void sock_def_write_space(struct sock *sk)
2211 {
2212 	struct socket_wq *wq;
2213 
2214 	rcu_read_lock();
2215 
2216 	/* Do not wake up a writer until he can make "significant"
2217 	 * progress.  --DaveM
2218 	 */
2219 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2220 		wq = rcu_dereference(sk->sk_wq);
2221 		if (wq_has_sleeper(wq))
2222 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2223 						POLLWRNORM | POLLWRBAND);
2224 
2225 		/* Should agree with poll, otherwise some programs break */
2226 		if (sock_writeable(sk))
2227 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2228 	}
2229 
2230 	rcu_read_unlock();
2231 }
2232 
2233 static void sock_def_destruct(struct sock *sk)
2234 {
2235 	kfree(sk->sk_protinfo);
2236 }
2237 
2238 void sk_send_sigurg(struct sock *sk)
2239 {
2240 	if (sk->sk_socket && sk->sk_socket->file)
2241 		if (send_sigurg(&sk->sk_socket->file->f_owner))
2242 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2243 }
2244 EXPORT_SYMBOL(sk_send_sigurg);
2245 
2246 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2247 		    unsigned long expires)
2248 {
2249 	if (!mod_timer(timer, expires))
2250 		sock_hold(sk);
2251 }
2252 EXPORT_SYMBOL(sk_reset_timer);
2253 
2254 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2255 {
2256 	if (del_timer(timer))
2257 		__sock_put(sk);
2258 }
2259 EXPORT_SYMBOL(sk_stop_timer);
2260 
2261 void sock_init_data(struct socket *sock, struct sock *sk)
2262 {
2263 	skb_queue_head_init(&sk->sk_receive_queue);
2264 	skb_queue_head_init(&sk->sk_write_queue);
2265 	skb_queue_head_init(&sk->sk_error_queue);
2266 #ifdef CONFIG_NET_DMA
2267 	skb_queue_head_init(&sk->sk_async_wait_queue);
2268 #endif
2269 
2270 	sk->sk_send_head	=	NULL;
2271 
2272 	init_timer(&sk->sk_timer);
2273 
2274 	sk->sk_allocation	=	GFP_KERNEL;
2275 	sk->sk_rcvbuf		=	sysctl_rmem_default;
2276 	sk->sk_sndbuf		=	sysctl_wmem_default;
2277 	sk->sk_state		=	TCP_CLOSE;
2278 	sk_set_socket(sk, sock);
2279 
2280 	sock_set_flag(sk, SOCK_ZAPPED);
2281 
2282 	if (sock) {
2283 		sk->sk_type	=	sock->type;
2284 		sk->sk_wq	=	sock->wq;
2285 		sock->sk	=	sk;
2286 	} else
2287 		sk->sk_wq	=	NULL;
2288 
2289 	spin_lock_init(&sk->sk_dst_lock);
2290 	rwlock_init(&sk->sk_callback_lock);
2291 	lockdep_set_class_and_name(&sk->sk_callback_lock,
2292 			af_callback_keys + sk->sk_family,
2293 			af_family_clock_key_strings[sk->sk_family]);
2294 
2295 	sk->sk_state_change	=	sock_def_wakeup;
2296 	sk->sk_data_ready	=	sock_def_readable;
2297 	sk->sk_write_space	=	sock_def_write_space;
2298 	sk->sk_error_report	=	sock_def_error_report;
2299 	sk->sk_destruct		=	sock_def_destruct;
2300 
2301 	sk->sk_frag.page	=	NULL;
2302 	sk->sk_frag.offset	=	0;
2303 	sk->sk_peek_off		=	-1;
2304 
2305 	sk->sk_peer_pid 	=	NULL;
2306 	sk->sk_peer_cred	=	NULL;
2307 	sk->sk_write_pending	=	0;
2308 	sk->sk_rcvlowat		=	1;
2309 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2310 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2311 
2312 	sk->sk_stamp = ktime_set(-1L, 0);
2313 
2314 #ifdef CONFIG_NET_RX_BUSY_POLL
2315 	sk->sk_napi_id		=	0;
2316 	sk->sk_ll_usec		=	sysctl_net_busy_read;
2317 #endif
2318 
2319 	sk->sk_max_pacing_rate = ~0U;
2320 	sk->sk_pacing_rate = ~0U;
2321 	/*
2322 	 * Before updating sk_refcnt, we must commit prior changes to memory
2323 	 * (Documentation/RCU/rculist_nulls.txt for details)
2324 	 */
2325 	smp_wmb();
2326 	atomic_set(&sk->sk_refcnt, 1);
2327 	atomic_set(&sk->sk_drops, 0);
2328 }
2329 EXPORT_SYMBOL(sock_init_data);
2330 
2331 void lock_sock_nested(struct sock *sk, int subclass)
2332 {
2333 	might_sleep();
2334 	spin_lock_bh(&sk->sk_lock.slock);
2335 	if (sk->sk_lock.owned)
2336 		__lock_sock(sk);
2337 	sk->sk_lock.owned = 1;
2338 	spin_unlock(&sk->sk_lock.slock);
2339 	/*
2340 	 * The sk_lock has mutex_lock() semantics here:
2341 	 */
2342 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2343 	local_bh_enable();
2344 }
2345 EXPORT_SYMBOL(lock_sock_nested);
2346 
2347 void release_sock(struct sock *sk)
2348 {
2349 	/*
2350 	 * The sk_lock has mutex_unlock() semantics:
2351 	 */
2352 	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2353 
2354 	spin_lock_bh(&sk->sk_lock.slock);
2355 	if (sk->sk_backlog.tail)
2356 		__release_sock(sk);
2357 
2358 	if (sk->sk_prot->release_cb)
2359 		sk->sk_prot->release_cb(sk);
2360 
2361 	sk->sk_lock.owned = 0;
2362 	if (waitqueue_active(&sk->sk_lock.wq))
2363 		wake_up(&sk->sk_lock.wq);
2364 	spin_unlock_bh(&sk->sk_lock.slock);
2365 }
2366 EXPORT_SYMBOL(release_sock);
2367 
2368 /**
2369  * lock_sock_fast - fast version of lock_sock
2370  * @sk: socket
2371  *
2372  * This version should be used for very small section, where process wont block
2373  * return false if fast path is taken
2374  *   sk_lock.slock locked, owned = 0, BH disabled
2375  * return true if slow path is taken
2376  *   sk_lock.slock unlocked, owned = 1, BH enabled
2377  */
2378 bool lock_sock_fast(struct sock *sk)
2379 {
2380 	might_sleep();
2381 	spin_lock_bh(&sk->sk_lock.slock);
2382 
2383 	if (!sk->sk_lock.owned)
2384 		/*
2385 		 * Note : We must disable BH
2386 		 */
2387 		return false;
2388 
2389 	__lock_sock(sk);
2390 	sk->sk_lock.owned = 1;
2391 	spin_unlock(&sk->sk_lock.slock);
2392 	/*
2393 	 * The sk_lock has mutex_lock() semantics here:
2394 	 */
2395 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2396 	local_bh_enable();
2397 	return true;
2398 }
2399 EXPORT_SYMBOL(lock_sock_fast);
2400 
2401 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2402 {
2403 	struct timeval tv;
2404 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2405 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2406 	tv = ktime_to_timeval(sk->sk_stamp);
2407 	if (tv.tv_sec == -1)
2408 		return -ENOENT;
2409 	if (tv.tv_sec == 0) {
2410 		sk->sk_stamp = ktime_get_real();
2411 		tv = ktime_to_timeval(sk->sk_stamp);
2412 	}
2413 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2414 }
2415 EXPORT_SYMBOL(sock_get_timestamp);
2416 
2417 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2418 {
2419 	struct timespec ts;
2420 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2421 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2422 	ts = ktime_to_timespec(sk->sk_stamp);
2423 	if (ts.tv_sec == -1)
2424 		return -ENOENT;
2425 	if (ts.tv_sec == 0) {
2426 		sk->sk_stamp = ktime_get_real();
2427 		ts = ktime_to_timespec(sk->sk_stamp);
2428 	}
2429 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2430 }
2431 EXPORT_SYMBOL(sock_get_timestampns);
2432 
2433 void sock_enable_timestamp(struct sock *sk, int flag)
2434 {
2435 	if (!sock_flag(sk, flag)) {
2436 		unsigned long previous_flags = sk->sk_flags;
2437 
2438 		sock_set_flag(sk, flag);
2439 		/*
2440 		 * we just set one of the two flags which require net
2441 		 * time stamping, but time stamping might have been on
2442 		 * already because of the other one
2443 		 */
2444 		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2445 			net_enable_timestamp();
2446 	}
2447 }
2448 
2449 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2450 		       int level, int type)
2451 {
2452 	struct sock_exterr_skb *serr;
2453 	struct sk_buff *skb, *skb2;
2454 	int copied, err;
2455 
2456 	err = -EAGAIN;
2457 	skb = skb_dequeue(&sk->sk_error_queue);
2458 	if (skb == NULL)
2459 		goto out;
2460 
2461 	copied = skb->len;
2462 	if (copied > len) {
2463 		msg->msg_flags |= MSG_TRUNC;
2464 		copied = len;
2465 	}
2466 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2467 	if (err)
2468 		goto out_free_skb;
2469 
2470 	sock_recv_timestamp(msg, sk, skb);
2471 
2472 	serr = SKB_EXT_ERR(skb);
2473 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2474 
2475 	msg->msg_flags |= MSG_ERRQUEUE;
2476 	err = copied;
2477 
2478 	/* Reset and regenerate socket error */
2479 	spin_lock_bh(&sk->sk_error_queue.lock);
2480 	sk->sk_err = 0;
2481 	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2482 		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2483 		spin_unlock_bh(&sk->sk_error_queue.lock);
2484 		sk->sk_error_report(sk);
2485 	} else
2486 		spin_unlock_bh(&sk->sk_error_queue.lock);
2487 
2488 out_free_skb:
2489 	kfree_skb(skb);
2490 out:
2491 	return err;
2492 }
2493 EXPORT_SYMBOL(sock_recv_errqueue);
2494 
2495 /*
2496  *	Get a socket option on an socket.
2497  *
2498  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2499  *	asynchronous errors should be reported by getsockopt. We assume
2500  *	this means if you specify SO_ERROR (otherwise whats the point of it).
2501  */
2502 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2503 			   char __user *optval, int __user *optlen)
2504 {
2505 	struct sock *sk = sock->sk;
2506 
2507 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2508 }
2509 EXPORT_SYMBOL(sock_common_getsockopt);
2510 
2511 #ifdef CONFIG_COMPAT
2512 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2513 				  char __user *optval, int __user *optlen)
2514 {
2515 	struct sock *sk = sock->sk;
2516 
2517 	if (sk->sk_prot->compat_getsockopt != NULL)
2518 		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2519 						      optval, optlen);
2520 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2521 }
2522 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2523 #endif
2524 
2525 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2526 			struct msghdr *msg, size_t size, int flags)
2527 {
2528 	struct sock *sk = sock->sk;
2529 	int addr_len = 0;
2530 	int err;
2531 
2532 	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2533 				   flags & ~MSG_DONTWAIT, &addr_len);
2534 	if (err >= 0)
2535 		msg->msg_namelen = addr_len;
2536 	return err;
2537 }
2538 EXPORT_SYMBOL(sock_common_recvmsg);
2539 
2540 /*
2541  *	Set socket options on an inet socket.
2542  */
2543 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2544 			   char __user *optval, unsigned int optlen)
2545 {
2546 	struct sock *sk = sock->sk;
2547 
2548 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2549 }
2550 EXPORT_SYMBOL(sock_common_setsockopt);
2551 
2552 #ifdef CONFIG_COMPAT
2553 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2554 				  char __user *optval, unsigned int optlen)
2555 {
2556 	struct sock *sk = sock->sk;
2557 
2558 	if (sk->sk_prot->compat_setsockopt != NULL)
2559 		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2560 						      optval, optlen);
2561 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2562 }
2563 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2564 #endif
2565 
2566 void sk_common_release(struct sock *sk)
2567 {
2568 	if (sk->sk_prot->destroy)
2569 		sk->sk_prot->destroy(sk);
2570 
2571 	/*
2572 	 * Observation: when sock_common_release is called, processes have
2573 	 * no access to socket. But net still has.
2574 	 * Step one, detach it from networking:
2575 	 *
2576 	 * A. Remove from hash tables.
2577 	 */
2578 
2579 	sk->sk_prot->unhash(sk);
2580 
2581 	/*
2582 	 * In this point socket cannot receive new packets, but it is possible
2583 	 * that some packets are in flight because some CPU runs receiver and
2584 	 * did hash table lookup before we unhashed socket. They will achieve
2585 	 * receive queue and will be purged by socket destructor.
2586 	 *
2587 	 * Also we still have packets pending on receive queue and probably,
2588 	 * our own packets waiting in device queues. sock_destroy will drain
2589 	 * receive queue, but transmitted packets will delay socket destruction
2590 	 * until the last reference will be released.
2591 	 */
2592 
2593 	sock_orphan(sk);
2594 
2595 	xfrm_sk_free_policy(sk);
2596 
2597 	sk_refcnt_debug_release(sk);
2598 
2599 	if (sk->sk_frag.page) {
2600 		put_page(sk->sk_frag.page);
2601 		sk->sk_frag.page = NULL;
2602 	}
2603 
2604 	sock_put(sk);
2605 }
2606 EXPORT_SYMBOL(sk_common_release);
2607 
2608 #ifdef CONFIG_PROC_FS
2609 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
2610 struct prot_inuse {
2611 	int val[PROTO_INUSE_NR];
2612 };
2613 
2614 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2615 
2616 #ifdef CONFIG_NET_NS
2617 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2618 {
2619 	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2620 }
2621 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2622 
2623 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2624 {
2625 	int cpu, idx = prot->inuse_idx;
2626 	int res = 0;
2627 
2628 	for_each_possible_cpu(cpu)
2629 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2630 
2631 	return res >= 0 ? res : 0;
2632 }
2633 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2634 
2635 static int __net_init sock_inuse_init_net(struct net *net)
2636 {
2637 	net->core.inuse = alloc_percpu(struct prot_inuse);
2638 	return net->core.inuse ? 0 : -ENOMEM;
2639 }
2640 
2641 static void __net_exit sock_inuse_exit_net(struct net *net)
2642 {
2643 	free_percpu(net->core.inuse);
2644 }
2645 
2646 static struct pernet_operations net_inuse_ops = {
2647 	.init = sock_inuse_init_net,
2648 	.exit = sock_inuse_exit_net,
2649 };
2650 
2651 static __init int net_inuse_init(void)
2652 {
2653 	if (register_pernet_subsys(&net_inuse_ops))
2654 		panic("Cannot initialize net inuse counters");
2655 
2656 	return 0;
2657 }
2658 
2659 core_initcall(net_inuse_init);
2660 #else
2661 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2662 
2663 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2664 {
2665 	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2666 }
2667 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2668 
2669 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2670 {
2671 	int cpu, idx = prot->inuse_idx;
2672 	int res = 0;
2673 
2674 	for_each_possible_cpu(cpu)
2675 		res += per_cpu(prot_inuse, cpu).val[idx];
2676 
2677 	return res >= 0 ? res : 0;
2678 }
2679 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2680 #endif
2681 
2682 static void assign_proto_idx(struct proto *prot)
2683 {
2684 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2685 
2686 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2687 		pr_err("PROTO_INUSE_NR exhausted\n");
2688 		return;
2689 	}
2690 
2691 	set_bit(prot->inuse_idx, proto_inuse_idx);
2692 }
2693 
2694 static void release_proto_idx(struct proto *prot)
2695 {
2696 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2697 		clear_bit(prot->inuse_idx, proto_inuse_idx);
2698 }
2699 #else
2700 static inline void assign_proto_idx(struct proto *prot)
2701 {
2702 }
2703 
2704 static inline void release_proto_idx(struct proto *prot)
2705 {
2706 }
2707 #endif
2708 
2709 int proto_register(struct proto *prot, int alloc_slab)
2710 {
2711 	if (alloc_slab) {
2712 		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2713 					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2714 					NULL);
2715 
2716 		if (prot->slab == NULL) {
2717 			pr_crit("%s: Can't create sock SLAB cache!\n",
2718 				prot->name);
2719 			goto out;
2720 		}
2721 
2722 		if (prot->rsk_prot != NULL) {
2723 			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2724 			if (prot->rsk_prot->slab_name == NULL)
2725 				goto out_free_sock_slab;
2726 
2727 			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2728 								 prot->rsk_prot->obj_size, 0,
2729 								 SLAB_HWCACHE_ALIGN, NULL);
2730 
2731 			if (prot->rsk_prot->slab == NULL) {
2732 				pr_crit("%s: Can't create request sock SLAB cache!\n",
2733 					prot->name);
2734 				goto out_free_request_sock_slab_name;
2735 			}
2736 		}
2737 
2738 		if (prot->twsk_prot != NULL) {
2739 			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2740 
2741 			if (prot->twsk_prot->twsk_slab_name == NULL)
2742 				goto out_free_request_sock_slab;
2743 
2744 			prot->twsk_prot->twsk_slab =
2745 				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2746 						  prot->twsk_prot->twsk_obj_size,
2747 						  0,
2748 						  SLAB_HWCACHE_ALIGN |
2749 							prot->slab_flags,
2750 						  NULL);
2751 			if (prot->twsk_prot->twsk_slab == NULL)
2752 				goto out_free_timewait_sock_slab_name;
2753 		}
2754 	}
2755 
2756 	mutex_lock(&proto_list_mutex);
2757 	list_add(&prot->node, &proto_list);
2758 	assign_proto_idx(prot);
2759 	mutex_unlock(&proto_list_mutex);
2760 	return 0;
2761 
2762 out_free_timewait_sock_slab_name:
2763 	kfree(prot->twsk_prot->twsk_slab_name);
2764 out_free_request_sock_slab:
2765 	if (prot->rsk_prot && prot->rsk_prot->slab) {
2766 		kmem_cache_destroy(prot->rsk_prot->slab);
2767 		prot->rsk_prot->slab = NULL;
2768 	}
2769 out_free_request_sock_slab_name:
2770 	if (prot->rsk_prot)
2771 		kfree(prot->rsk_prot->slab_name);
2772 out_free_sock_slab:
2773 	kmem_cache_destroy(prot->slab);
2774 	prot->slab = NULL;
2775 out:
2776 	return -ENOBUFS;
2777 }
2778 EXPORT_SYMBOL(proto_register);
2779 
2780 void proto_unregister(struct proto *prot)
2781 {
2782 	mutex_lock(&proto_list_mutex);
2783 	release_proto_idx(prot);
2784 	list_del(&prot->node);
2785 	mutex_unlock(&proto_list_mutex);
2786 
2787 	if (prot->slab != NULL) {
2788 		kmem_cache_destroy(prot->slab);
2789 		prot->slab = NULL;
2790 	}
2791 
2792 	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2793 		kmem_cache_destroy(prot->rsk_prot->slab);
2794 		kfree(prot->rsk_prot->slab_name);
2795 		prot->rsk_prot->slab = NULL;
2796 	}
2797 
2798 	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2799 		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2800 		kfree(prot->twsk_prot->twsk_slab_name);
2801 		prot->twsk_prot->twsk_slab = NULL;
2802 	}
2803 }
2804 EXPORT_SYMBOL(proto_unregister);
2805 
2806 #ifdef CONFIG_PROC_FS
2807 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2808 	__acquires(proto_list_mutex)
2809 {
2810 	mutex_lock(&proto_list_mutex);
2811 	return seq_list_start_head(&proto_list, *pos);
2812 }
2813 
2814 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2815 {
2816 	return seq_list_next(v, &proto_list, pos);
2817 }
2818 
2819 static void proto_seq_stop(struct seq_file *seq, void *v)
2820 	__releases(proto_list_mutex)
2821 {
2822 	mutex_unlock(&proto_list_mutex);
2823 }
2824 
2825 static char proto_method_implemented(const void *method)
2826 {
2827 	return method == NULL ? 'n' : 'y';
2828 }
2829 static long sock_prot_memory_allocated(struct proto *proto)
2830 {
2831 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2832 }
2833 
2834 static char *sock_prot_memory_pressure(struct proto *proto)
2835 {
2836 	return proto->memory_pressure != NULL ?
2837 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2838 }
2839 
2840 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2841 {
2842 
2843 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2844 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2845 		   proto->name,
2846 		   proto->obj_size,
2847 		   sock_prot_inuse_get(seq_file_net(seq), proto),
2848 		   sock_prot_memory_allocated(proto),
2849 		   sock_prot_memory_pressure(proto),
2850 		   proto->max_header,
2851 		   proto->slab == NULL ? "no" : "yes",
2852 		   module_name(proto->owner),
2853 		   proto_method_implemented(proto->close),
2854 		   proto_method_implemented(proto->connect),
2855 		   proto_method_implemented(proto->disconnect),
2856 		   proto_method_implemented(proto->accept),
2857 		   proto_method_implemented(proto->ioctl),
2858 		   proto_method_implemented(proto->init),
2859 		   proto_method_implemented(proto->destroy),
2860 		   proto_method_implemented(proto->shutdown),
2861 		   proto_method_implemented(proto->setsockopt),
2862 		   proto_method_implemented(proto->getsockopt),
2863 		   proto_method_implemented(proto->sendmsg),
2864 		   proto_method_implemented(proto->recvmsg),
2865 		   proto_method_implemented(proto->sendpage),
2866 		   proto_method_implemented(proto->bind),
2867 		   proto_method_implemented(proto->backlog_rcv),
2868 		   proto_method_implemented(proto->hash),
2869 		   proto_method_implemented(proto->unhash),
2870 		   proto_method_implemented(proto->get_port),
2871 		   proto_method_implemented(proto->enter_memory_pressure));
2872 }
2873 
2874 static int proto_seq_show(struct seq_file *seq, void *v)
2875 {
2876 	if (v == &proto_list)
2877 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2878 			   "protocol",
2879 			   "size",
2880 			   "sockets",
2881 			   "memory",
2882 			   "press",
2883 			   "maxhdr",
2884 			   "slab",
2885 			   "module",
2886 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2887 	else
2888 		proto_seq_printf(seq, list_entry(v, struct proto, node));
2889 	return 0;
2890 }
2891 
2892 static const struct seq_operations proto_seq_ops = {
2893 	.start  = proto_seq_start,
2894 	.next   = proto_seq_next,
2895 	.stop   = proto_seq_stop,
2896 	.show   = proto_seq_show,
2897 };
2898 
2899 static int proto_seq_open(struct inode *inode, struct file *file)
2900 {
2901 	return seq_open_net(inode, file, &proto_seq_ops,
2902 			    sizeof(struct seq_net_private));
2903 }
2904 
2905 static const struct file_operations proto_seq_fops = {
2906 	.owner		= THIS_MODULE,
2907 	.open		= proto_seq_open,
2908 	.read		= seq_read,
2909 	.llseek		= seq_lseek,
2910 	.release	= seq_release_net,
2911 };
2912 
2913 static __net_init int proto_init_net(struct net *net)
2914 {
2915 	if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2916 		return -ENOMEM;
2917 
2918 	return 0;
2919 }
2920 
2921 static __net_exit void proto_exit_net(struct net *net)
2922 {
2923 	remove_proc_entry("protocols", net->proc_net);
2924 }
2925 
2926 
2927 static __net_initdata struct pernet_operations proto_net_ops = {
2928 	.init = proto_init_net,
2929 	.exit = proto_exit_net,
2930 };
2931 
2932 static int __init proto_init(void)
2933 {
2934 	return register_pernet_subsys(&proto_net_ops);
2935 }
2936 
2937 subsys_initcall(proto_init);
2938 
2939 #endif /* PROC_FS */
2940