xref: /openbmc/linux/net/core/sock.c (revision f2f872f9272a79a1048877ea14c15576f46c225e)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic socket support routines. Memory allocators, socket lock/release
7  *		handler for protocols to use and generic option handler.
8  *
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  *
85  *
86  *		This program is free software; you can redistribute it and/or
87  *		modify it under the terms of the GNU General Public License
88  *		as published by the Free Software Foundation; either version
89  *		2 of the License, or (at your option) any later version.
90  */
91 
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93 
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
120 
121 #include <asm/uaccess.h>
122 
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134 
135 #include <linux/filter.h>
136 
137 #include <trace/events/sock.h>
138 
139 #ifdef CONFIG_INET
140 #include <net/tcp.h>
141 #endif
142 
143 #include <net/busy_poll.h>
144 
145 static DEFINE_MUTEX(proto_list_mutex);
146 static LIST_HEAD(proto_list);
147 
148 #ifdef CONFIG_MEMCG_KMEM
149 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
150 {
151 	struct proto *proto;
152 	int ret = 0;
153 
154 	mutex_lock(&proto_list_mutex);
155 	list_for_each_entry(proto, &proto_list, node) {
156 		if (proto->init_cgroup) {
157 			ret = proto->init_cgroup(memcg, ss);
158 			if (ret)
159 				goto out;
160 		}
161 	}
162 
163 	mutex_unlock(&proto_list_mutex);
164 	return ret;
165 out:
166 	list_for_each_entry_continue_reverse(proto, &proto_list, node)
167 		if (proto->destroy_cgroup)
168 			proto->destroy_cgroup(memcg);
169 	mutex_unlock(&proto_list_mutex);
170 	return ret;
171 }
172 
173 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
174 {
175 	struct proto *proto;
176 
177 	mutex_lock(&proto_list_mutex);
178 	list_for_each_entry_reverse(proto, &proto_list, node)
179 		if (proto->destroy_cgroup)
180 			proto->destroy_cgroup(memcg);
181 	mutex_unlock(&proto_list_mutex);
182 }
183 #endif
184 
185 /*
186  * Each address family might have different locking rules, so we have
187  * one slock key per address family:
188  */
189 static struct lock_class_key af_family_keys[AF_MAX];
190 static struct lock_class_key af_family_slock_keys[AF_MAX];
191 
192 #if defined(CONFIG_MEMCG_KMEM)
193 struct static_key memcg_socket_limit_enabled;
194 EXPORT_SYMBOL(memcg_socket_limit_enabled);
195 #endif
196 
197 /*
198  * Make lock validator output more readable. (we pre-construct these
199  * strings build-time, so that runtime initialization of socket
200  * locks is fast):
201  */
202 static const char *const af_family_key_strings[AF_MAX+1] = {
203   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
204   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
205   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
206   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
207   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
208   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
209   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
210   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
211   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
212   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
213   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
214   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
215   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
216   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
217 };
218 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
219   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
220   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
221   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
222   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
223   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
224   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
225   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
226   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
227   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
228   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
229   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
230   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
231   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
232   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
233 };
234 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
235   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
236   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
237   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
238   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
239   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
240   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
241   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
242   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
243   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
244   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
245   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
246   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
247   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
248   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
249 };
250 
251 /*
252  * sk_callback_lock locking rules are per-address-family,
253  * so split the lock classes by using a per-AF key:
254  */
255 static struct lock_class_key af_callback_keys[AF_MAX];
256 
257 /* Take into consideration the size of the struct sk_buff overhead in the
258  * determination of these values, since that is non-constant across
259  * platforms.  This makes socket queueing behavior and performance
260  * not depend upon such differences.
261  */
262 #define _SK_MEM_PACKETS		256
263 #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
264 #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
265 #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
266 
267 /* Run time adjustable parameters. */
268 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
269 EXPORT_SYMBOL(sysctl_wmem_max);
270 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
271 EXPORT_SYMBOL(sysctl_rmem_max);
272 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
273 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
274 
275 /* Maximal space eaten by iovec or ancillary data plus some space */
276 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
277 EXPORT_SYMBOL(sysctl_optmem_max);
278 
279 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
280 EXPORT_SYMBOL_GPL(memalloc_socks);
281 
282 /**
283  * sk_set_memalloc - sets %SOCK_MEMALLOC
284  * @sk: socket to set it on
285  *
286  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
287  * It's the responsibility of the admin to adjust min_free_kbytes
288  * to meet the requirements
289  */
290 void sk_set_memalloc(struct sock *sk)
291 {
292 	sock_set_flag(sk, SOCK_MEMALLOC);
293 	sk->sk_allocation |= __GFP_MEMALLOC;
294 	static_key_slow_inc(&memalloc_socks);
295 }
296 EXPORT_SYMBOL_GPL(sk_set_memalloc);
297 
298 void sk_clear_memalloc(struct sock *sk)
299 {
300 	sock_reset_flag(sk, SOCK_MEMALLOC);
301 	sk->sk_allocation &= ~__GFP_MEMALLOC;
302 	static_key_slow_dec(&memalloc_socks);
303 
304 	/*
305 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
306 	 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
307 	 * it has rmem allocations there is a risk that the user of the
308 	 * socket cannot make forward progress due to exceeding the rmem
309 	 * limits. By rights, sk_clear_memalloc() should only be called
310 	 * on sockets being torn down but warn and reset the accounting if
311 	 * that assumption breaks.
312 	 */
313 	if (WARN_ON(sk->sk_forward_alloc))
314 		sk_mem_reclaim(sk);
315 }
316 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
317 
318 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
319 {
320 	int ret;
321 	unsigned long pflags = current->flags;
322 
323 	/* these should have been dropped before queueing */
324 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
325 
326 	current->flags |= PF_MEMALLOC;
327 	ret = sk->sk_backlog_rcv(sk, skb);
328 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
329 
330 	return ret;
331 }
332 EXPORT_SYMBOL(__sk_backlog_rcv);
333 
334 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
335 {
336 	struct timeval tv;
337 
338 	if (optlen < sizeof(tv))
339 		return -EINVAL;
340 	if (copy_from_user(&tv, optval, sizeof(tv)))
341 		return -EFAULT;
342 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
343 		return -EDOM;
344 
345 	if (tv.tv_sec < 0) {
346 		static int warned __read_mostly;
347 
348 		*timeo_p = 0;
349 		if (warned < 10 && net_ratelimit()) {
350 			warned++;
351 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
352 				__func__, current->comm, task_pid_nr(current));
353 		}
354 		return 0;
355 	}
356 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
357 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
358 		return 0;
359 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
360 		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
361 	return 0;
362 }
363 
364 static void sock_warn_obsolete_bsdism(const char *name)
365 {
366 	static int warned;
367 	static char warncomm[TASK_COMM_LEN];
368 	if (strcmp(warncomm, current->comm) && warned < 5) {
369 		strcpy(warncomm,  current->comm);
370 		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
371 			warncomm, name);
372 		warned++;
373 	}
374 }
375 
376 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
377 
378 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
379 {
380 	if (sk->sk_flags & flags) {
381 		sk->sk_flags &= ~flags;
382 		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
383 			net_disable_timestamp();
384 	}
385 }
386 
387 
388 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
389 {
390 	int err;
391 	int skb_len;
392 	unsigned long flags;
393 	struct sk_buff_head *list = &sk->sk_receive_queue;
394 
395 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
396 		atomic_inc(&sk->sk_drops);
397 		trace_sock_rcvqueue_full(sk, skb);
398 		return -ENOMEM;
399 	}
400 
401 	err = sk_filter(sk, skb);
402 	if (err)
403 		return err;
404 
405 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
406 		atomic_inc(&sk->sk_drops);
407 		return -ENOBUFS;
408 	}
409 
410 	skb->dev = NULL;
411 	skb_set_owner_r(skb, sk);
412 
413 	/* Cache the SKB length before we tack it onto the receive
414 	 * queue.  Once it is added it no longer belongs to us and
415 	 * may be freed by other threads of control pulling packets
416 	 * from the queue.
417 	 */
418 	skb_len = skb->len;
419 
420 	/* we escape from rcu protected region, make sure we dont leak
421 	 * a norefcounted dst
422 	 */
423 	skb_dst_force(skb);
424 
425 	spin_lock_irqsave(&list->lock, flags);
426 	skb->dropcount = atomic_read(&sk->sk_drops);
427 	__skb_queue_tail(list, skb);
428 	spin_unlock_irqrestore(&list->lock, flags);
429 
430 	if (!sock_flag(sk, SOCK_DEAD))
431 		sk->sk_data_ready(sk, skb_len);
432 	return 0;
433 }
434 EXPORT_SYMBOL(sock_queue_rcv_skb);
435 
436 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
437 {
438 	int rc = NET_RX_SUCCESS;
439 
440 	if (sk_filter(sk, skb))
441 		goto discard_and_relse;
442 
443 	skb->dev = NULL;
444 
445 	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
446 		atomic_inc(&sk->sk_drops);
447 		goto discard_and_relse;
448 	}
449 	if (nested)
450 		bh_lock_sock_nested(sk);
451 	else
452 		bh_lock_sock(sk);
453 	if (!sock_owned_by_user(sk)) {
454 		/*
455 		 * trylock + unlock semantics:
456 		 */
457 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
458 
459 		rc = sk_backlog_rcv(sk, skb);
460 
461 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
462 	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
463 		bh_unlock_sock(sk);
464 		atomic_inc(&sk->sk_drops);
465 		goto discard_and_relse;
466 	}
467 
468 	bh_unlock_sock(sk);
469 out:
470 	sock_put(sk);
471 	return rc;
472 discard_and_relse:
473 	kfree_skb(skb);
474 	goto out;
475 }
476 EXPORT_SYMBOL(sk_receive_skb);
477 
478 void sk_reset_txq(struct sock *sk)
479 {
480 	sk_tx_queue_clear(sk);
481 }
482 EXPORT_SYMBOL(sk_reset_txq);
483 
484 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
485 {
486 	struct dst_entry *dst = __sk_dst_get(sk);
487 
488 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
489 		sk_tx_queue_clear(sk);
490 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
491 		dst_release(dst);
492 		return NULL;
493 	}
494 
495 	return dst;
496 }
497 EXPORT_SYMBOL(__sk_dst_check);
498 
499 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
500 {
501 	struct dst_entry *dst = sk_dst_get(sk);
502 
503 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
504 		sk_dst_reset(sk);
505 		dst_release(dst);
506 		return NULL;
507 	}
508 
509 	return dst;
510 }
511 EXPORT_SYMBOL(sk_dst_check);
512 
513 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
514 				int optlen)
515 {
516 	int ret = -ENOPROTOOPT;
517 #ifdef CONFIG_NETDEVICES
518 	struct net *net = sock_net(sk);
519 	char devname[IFNAMSIZ];
520 	int index;
521 
522 	/* Sorry... */
523 	ret = -EPERM;
524 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
525 		goto out;
526 
527 	ret = -EINVAL;
528 	if (optlen < 0)
529 		goto out;
530 
531 	/* Bind this socket to a particular device like "eth0",
532 	 * as specified in the passed interface name. If the
533 	 * name is "" or the option length is zero the socket
534 	 * is not bound.
535 	 */
536 	if (optlen > IFNAMSIZ - 1)
537 		optlen = IFNAMSIZ - 1;
538 	memset(devname, 0, sizeof(devname));
539 
540 	ret = -EFAULT;
541 	if (copy_from_user(devname, optval, optlen))
542 		goto out;
543 
544 	index = 0;
545 	if (devname[0] != '\0') {
546 		struct net_device *dev;
547 
548 		rcu_read_lock();
549 		dev = dev_get_by_name_rcu(net, devname);
550 		if (dev)
551 			index = dev->ifindex;
552 		rcu_read_unlock();
553 		ret = -ENODEV;
554 		if (!dev)
555 			goto out;
556 	}
557 
558 	lock_sock(sk);
559 	sk->sk_bound_dev_if = index;
560 	sk_dst_reset(sk);
561 	release_sock(sk);
562 
563 	ret = 0;
564 
565 out:
566 #endif
567 
568 	return ret;
569 }
570 
571 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
572 				int __user *optlen, int len)
573 {
574 	int ret = -ENOPROTOOPT;
575 #ifdef CONFIG_NETDEVICES
576 	struct net *net = sock_net(sk);
577 	char devname[IFNAMSIZ];
578 
579 	if (sk->sk_bound_dev_if == 0) {
580 		len = 0;
581 		goto zero;
582 	}
583 
584 	ret = -EINVAL;
585 	if (len < IFNAMSIZ)
586 		goto out;
587 
588 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
589 	if (ret)
590 		goto out;
591 
592 	len = strlen(devname) + 1;
593 
594 	ret = -EFAULT;
595 	if (copy_to_user(optval, devname, len))
596 		goto out;
597 
598 zero:
599 	ret = -EFAULT;
600 	if (put_user(len, optlen))
601 		goto out;
602 
603 	ret = 0;
604 
605 out:
606 #endif
607 
608 	return ret;
609 }
610 
611 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
612 {
613 	if (valbool)
614 		sock_set_flag(sk, bit);
615 	else
616 		sock_reset_flag(sk, bit);
617 }
618 
619 /*
620  *	This is meant for all protocols to use and covers goings on
621  *	at the socket level. Everything here is generic.
622  */
623 
624 int sock_setsockopt(struct socket *sock, int level, int optname,
625 		    char __user *optval, unsigned int optlen)
626 {
627 	struct sock *sk = sock->sk;
628 	int val;
629 	int valbool;
630 	struct linger ling;
631 	int ret = 0;
632 
633 	/*
634 	 *	Options without arguments
635 	 */
636 
637 	if (optname == SO_BINDTODEVICE)
638 		return sock_setbindtodevice(sk, optval, optlen);
639 
640 	if (optlen < sizeof(int))
641 		return -EINVAL;
642 
643 	if (get_user(val, (int __user *)optval))
644 		return -EFAULT;
645 
646 	valbool = val ? 1 : 0;
647 
648 	lock_sock(sk);
649 
650 	switch (optname) {
651 	case SO_DEBUG:
652 		if (val && !capable(CAP_NET_ADMIN))
653 			ret = -EACCES;
654 		else
655 			sock_valbool_flag(sk, SOCK_DBG, valbool);
656 		break;
657 	case SO_REUSEADDR:
658 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
659 		break;
660 	case SO_REUSEPORT:
661 		sk->sk_reuseport = valbool;
662 		break;
663 	case SO_TYPE:
664 	case SO_PROTOCOL:
665 	case SO_DOMAIN:
666 	case SO_ERROR:
667 		ret = -ENOPROTOOPT;
668 		break;
669 	case SO_DONTROUTE:
670 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
671 		break;
672 	case SO_BROADCAST:
673 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
674 		break;
675 	case SO_SNDBUF:
676 		/* Don't error on this BSD doesn't and if you think
677 		 * about it this is right. Otherwise apps have to
678 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
679 		 * are treated in BSD as hints
680 		 */
681 		val = min_t(u32, val, sysctl_wmem_max);
682 set_sndbuf:
683 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
684 		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
685 		/* Wake up sending tasks if we upped the value. */
686 		sk->sk_write_space(sk);
687 		break;
688 
689 	case SO_SNDBUFFORCE:
690 		if (!capable(CAP_NET_ADMIN)) {
691 			ret = -EPERM;
692 			break;
693 		}
694 		goto set_sndbuf;
695 
696 	case SO_RCVBUF:
697 		/* Don't error on this BSD doesn't and if you think
698 		 * about it this is right. Otherwise apps have to
699 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
700 		 * are treated in BSD as hints
701 		 */
702 		val = min_t(u32, val, sysctl_rmem_max);
703 set_rcvbuf:
704 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
705 		/*
706 		 * We double it on the way in to account for
707 		 * "struct sk_buff" etc. overhead.   Applications
708 		 * assume that the SO_RCVBUF setting they make will
709 		 * allow that much actual data to be received on that
710 		 * socket.
711 		 *
712 		 * Applications are unaware that "struct sk_buff" and
713 		 * other overheads allocate from the receive buffer
714 		 * during socket buffer allocation.
715 		 *
716 		 * And after considering the possible alternatives,
717 		 * returning the value we actually used in getsockopt
718 		 * is the most desirable behavior.
719 		 */
720 		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
721 		break;
722 
723 	case SO_RCVBUFFORCE:
724 		if (!capable(CAP_NET_ADMIN)) {
725 			ret = -EPERM;
726 			break;
727 		}
728 		goto set_rcvbuf;
729 
730 	case SO_KEEPALIVE:
731 #ifdef CONFIG_INET
732 		if (sk->sk_protocol == IPPROTO_TCP &&
733 		    sk->sk_type == SOCK_STREAM)
734 			tcp_set_keepalive(sk, valbool);
735 #endif
736 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
737 		break;
738 
739 	case SO_OOBINLINE:
740 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
741 		break;
742 
743 	case SO_NO_CHECK:
744 		sk->sk_no_check = valbool;
745 		break;
746 
747 	case SO_PRIORITY:
748 		if ((val >= 0 && val <= 6) ||
749 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
750 			sk->sk_priority = val;
751 		else
752 			ret = -EPERM;
753 		break;
754 
755 	case SO_LINGER:
756 		if (optlen < sizeof(ling)) {
757 			ret = -EINVAL;	/* 1003.1g */
758 			break;
759 		}
760 		if (copy_from_user(&ling, optval, sizeof(ling))) {
761 			ret = -EFAULT;
762 			break;
763 		}
764 		if (!ling.l_onoff)
765 			sock_reset_flag(sk, SOCK_LINGER);
766 		else {
767 #if (BITS_PER_LONG == 32)
768 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
769 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
770 			else
771 #endif
772 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
773 			sock_set_flag(sk, SOCK_LINGER);
774 		}
775 		break;
776 
777 	case SO_BSDCOMPAT:
778 		sock_warn_obsolete_bsdism("setsockopt");
779 		break;
780 
781 	case SO_PASSCRED:
782 		if (valbool)
783 			set_bit(SOCK_PASSCRED, &sock->flags);
784 		else
785 			clear_bit(SOCK_PASSCRED, &sock->flags);
786 		break;
787 
788 	case SO_TIMESTAMP:
789 	case SO_TIMESTAMPNS:
790 		if (valbool)  {
791 			if (optname == SO_TIMESTAMP)
792 				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
793 			else
794 				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
795 			sock_set_flag(sk, SOCK_RCVTSTAMP);
796 			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
797 		} else {
798 			sock_reset_flag(sk, SOCK_RCVTSTAMP);
799 			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
800 		}
801 		break;
802 
803 	case SO_TIMESTAMPING:
804 		if (val & ~SOF_TIMESTAMPING_MASK) {
805 			ret = -EINVAL;
806 			break;
807 		}
808 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
809 				  val & SOF_TIMESTAMPING_TX_HARDWARE);
810 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
811 				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
812 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
813 				  val & SOF_TIMESTAMPING_RX_HARDWARE);
814 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
815 			sock_enable_timestamp(sk,
816 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
817 		else
818 			sock_disable_timestamp(sk,
819 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
820 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
821 				  val & SOF_TIMESTAMPING_SOFTWARE);
822 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
823 				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
824 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
825 				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
826 		break;
827 
828 	case SO_RCVLOWAT:
829 		if (val < 0)
830 			val = INT_MAX;
831 		sk->sk_rcvlowat = val ? : 1;
832 		break;
833 
834 	case SO_RCVTIMEO:
835 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
836 		break;
837 
838 	case SO_SNDTIMEO:
839 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
840 		break;
841 
842 	case SO_ATTACH_FILTER:
843 		ret = -EINVAL;
844 		if (optlen == sizeof(struct sock_fprog)) {
845 			struct sock_fprog fprog;
846 
847 			ret = -EFAULT;
848 			if (copy_from_user(&fprog, optval, sizeof(fprog)))
849 				break;
850 
851 			ret = sk_attach_filter(&fprog, sk);
852 		}
853 		break;
854 
855 	case SO_DETACH_FILTER:
856 		ret = sk_detach_filter(sk);
857 		break;
858 
859 	case SO_LOCK_FILTER:
860 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
861 			ret = -EPERM;
862 		else
863 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
864 		break;
865 
866 	case SO_PASSSEC:
867 		if (valbool)
868 			set_bit(SOCK_PASSSEC, &sock->flags);
869 		else
870 			clear_bit(SOCK_PASSSEC, &sock->flags);
871 		break;
872 	case SO_MARK:
873 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
874 			ret = -EPERM;
875 		else
876 			sk->sk_mark = val;
877 		break;
878 
879 		/* We implement the SO_SNDLOWAT etc to
880 		   not be settable (1003.1g 5.3) */
881 	case SO_RXQ_OVFL:
882 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
883 		break;
884 
885 	case SO_WIFI_STATUS:
886 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
887 		break;
888 
889 	case SO_PEEK_OFF:
890 		if (sock->ops->set_peek_off)
891 			sock->ops->set_peek_off(sk, val);
892 		else
893 			ret = -EOPNOTSUPP;
894 		break;
895 
896 	case SO_NOFCS:
897 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
898 		break;
899 
900 	case SO_SELECT_ERR_QUEUE:
901 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
902 		break;
903 
904 #ifdef CONFIG_NET_LL_RX_POLL
905 	case SO_BUSY_POLL:
906 		/* allow unprivileged users to decrease the value */
907 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
908 			ret = -EPERM;
909 		else {
910 			if (val < 0)
911 				ret = -EINVAL;
912 			else
913 				sk->sk_ll_usec = val;
914 		}
915 		break;
916 #endif
917 	default:
918 		ret = -ENOPROTOOPT;
919 		break;
920 	}
921 	release_sock(sk);
922 	return ret;
923 }
924 EXPORT_SYMBOL(sock_setsockopt);
925 
926 
927 void cred_to_ucred(struct pid *pid, const struct cred *cred,
928 		   struct ucred *ucred)
929 {
930 	ucred->pid = pid_vnr(pid);
931 	ucred->uid = ucred->gid = -1;
932 	if (cred) {
933 		struct user_namespace *current_ns = current_user_ns();
934 
935 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
936 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
937 	}
938 }
939 EXPORT_SYMBOL_GPL(cred_to_ucred);
940 
941 int sock_getsockopt(struct socket *sock, int level, int optname,
942 		    char __user *optval, int __user *optlen)
943 {
944 	struct sock *sk = sock->sk;
945 
946 	union {
947 		int val;
948 		struct linger ling;
949 		struct timeval tm;
950 	} v;
951 
952 	int lv = sizeof(int);
953 	int len;
954 
955 	if (get_user(len, optlen))
956 		return -EFAULT;
957 	if (len < 0)
958 		return -EINVAL;
959 
960 	memset(&v, 0, sizeof(v));
961 
962 	switch (optname) {
963 	case SO_DEBUG:
964 		v.val = sock_flag(sk, SOCK_DBG);
965 		break;
966 
967 	case SO_DONTROUTE:
968 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
969 		break;
970 
971 	case SO_BROADCAST:
972 		v.val = sock_flag(sk, SOCK_BROADCAST);
973 		break;
974 
975 	case SO_SNDBUF:
976 		v.val = sk->sk_sndbuf;
977 		break;
978 
979 	case SO_RCVBUF:
980 		v.val = sk->sk_rcvbuf;
981 		break;
982 
983 	case SO_REUSEADDR:
984 		v.val = sk->sk_reuse;
985 		break;
986 
987 	case SO_REUSEPORT:
988 		v.val = sk->sk_reuseport;
989 		break;
990 
991 	case SO_KEEPALIVE:
992 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
993 		break;
994 
995 	case SO_TYPE:
996 		v.val = sk->sk_type;
997 		break;
998 
999 	case SO_PROTOCOL:
1000 		v.val = sk->sk_protocol;
1001 		break;
1002 
1003 	case SO_DOMAIN:
1004 		v.val = sk->sk_family;
1005 		break;
1006 
1007 	case SO_ERROR:
1008 		v.val = -sock_error(sk);
1009 		if (v.val == 0)
1010 			v.val = xchg(&sk->sk_err_soft, 0);
1011 		break;
1012 
1013 	case SO_OOBINLINE:
1014 		v.val = sock_flag(sk, SOCK_URGINLINE);
1015 		break;
1016 
1017 	case SO_NO_CHECK:
1018 		v.val = sk->sk_no_check;
1019 		break;
1020 
1021 	case SO_PRIORITY:
1022 		v.val = sk->sk_priority;
1023 		break;
1024 
1025 	case SO_LINGER:
1026 		lv		= sizeof(v.ling);
1027 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1028 		v.ling.l_linger	= sk->sk_lingertime / HZ;
1029 		break;
1030 
1031 	case SO_BSDCOMPAT:
1032 		sock_warn_obsolete_bsdism("getsockopt");
1033 		break;
1034 
1035 	case SO_TIMESTAMP:
1036 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1037 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1038 		break;
1039 
1040 	case SO_TIMESTAMPNS:
1041 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1042 		break;
1043 
1044 	case SO_TIMESTAMPING:
1045 		v.val = 0;
1046 		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1047 			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1048 		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1049 			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1050 		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1051 			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1052 		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1053 			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1054 		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1055 			v.val |= SOF_TIMESTAMPING_SOFTWARE;
1056 		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1057 			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1058 		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1059 			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1060 		break;
1061 
1062 	case SO_RCVTIMEO:
1063 		lv = sizeof(struct timeval);
1064 		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1065 			v.tm.tv_sec = 0;
1066 			v.tm.tv_usec = 0;
1067 		} else {
1068 			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1069 			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1070 		}
1071 		break;
1072 
1073 	case SO_SNDTIMEO:
1074 		lv = sizeof(struct timeval);
1075 		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1076 			v.tm.tv_sec = 0;
1077 			v.tm.tv_usec = 0;
1078 		} else {
1079 			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1080 			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1081 		}
1082 		break;
1083 
1084 	case SO_RCVLOWAT:
1085 		v.val = sk->sk_rcvlowat;
1086 		break;
1087 
1088 	case SO_SNDLOWAT:
1089 		v.val = 1;
1090 		break;
1091 
1092 	case SO_PASSCRED:
1093 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1094 		break;
1095 
1096 	case SO_PEERCRED:
1097 	{
1098 		struct ucred peercred;
1099 		if (len > sizeof(peercred))
1100 			len = sizeof(peercred);
1101 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1102 		if (copy_to_user(optval, &peercred, len))
1103 			return -EFAULT;
1104 		goto lenout;
1105 	}
1106 
1107 	case SO_PEERNAME:
1108 	{
1109 		char address[128];
1110 
1111 		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1112 			return -ENOTCONN;
1113 		if (lv < len)
1114 			return -EINVAL;
1115 		if (copy_to_user(optval, address, len))
1116 			return -EFAULT;
1117 		goto lenout;
1118 	}
1119 
1120 	/* Dubious BSD thing... Probably nobody even uses it, but
1121 	 * the UNIX standard wants it for whatever reason... -DaveM
1122 	 */
1123 	case SO_ACCEPTCONN:
1124 		v.val = sk->sk_state == TCP_LISTEN;
1125 		break;
1126 
1127 	case SO_PASSSEC:
1128 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1129 		break;
1130 
1131 	case SO_PEERSEC:
1132 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1133 
1134 	case SO_MARK:
1135 		v.val = sk->sk_mark;
1136 		break;
1137 
1138 	case SO_RXQ_OVFL:
1139 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1140 		break;
1141 
1142 	case SO_WIFI_STATUS:
1143 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1144 		break;
1145 
1146 	case SO_PEEK_OFF:
1147 		if (!sock->ops->set_peek_off)
1148 			return -EOPNOTSUPP;
1149 
1150 		v.val = sk->sk_peek_off;
1151 		break;
1152 	case SO_NOFCS:
1153 		v.val = sock_flag(sk, SOCK_NOFCS);
1154 		break;
1155 
1156 	case SO_BINDTODEVICE:
1157 		return sock_getbindtodevice(sk, optval, optlen, len);
1158 
1159 	case SO_GET_FILTER:
1160 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1161 		if (len < 0)
1162 			return len;
1163 
1164 		goto lenout;
1165 
1166 	case SO_LOCK_FILTER:
1167 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1168 		break;
1169 
1170 	case SO_SELECT_ERR_QUEUE:
1171 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1172 		break;
1173 
1174 #ifdef CONFIG_NET_LL_RX_POLL
1175 	case SO_BUSY_POLL:
1176 		v.val = sk->sk_ll_usec;
1177 		break;
1178 #endif
1179 
1180 	default:
1181 		return -ENOPROTOOPT;
1182 	}
1183 
1184 	if (len > lv)
1185 		len = lv;
1186 	if (copy_to_user(optval, &v, len))
1187 		return -EFAULT;
1188 lenout:
1189 	if (put_user(len, optlen))
1190 		return -EFAULT;
1191 	return 0;
1192 }
1193 
1194 /*
1195  * Initialize an sk_lock.
1196  *
1197  * (We also register the sk_lock with the lock validator.)
1198  */
1199 static inline void sock_lock_init(struct sock *sk)
1200 {
1201 	sock_lock_init_class_and_name(sk,
1202 			af_family_slock_key_strings[sk->sk_family],
1203 			af_family_slock_keys + sk->sk_family,
1204 			af_family_key_strings[sk->sk_family],
1205 			af_family_keys + sk->sk_family);
1206 }
1207 
1208 /*
1209  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1210  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1211  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1212  */
1213 static void sock_copy(struct sock *nsk, const struct sock *osk)
1214 {
1215 #ifdef CONFIG_SECURITY_NETWORK
1216 	void *sptr = nsk->sk_security;
1217 #endif
1218 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1219 
1220 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1221 	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1222 
1223 #ifdef CONFIG_SECURITY_NETWORK
1224 	nsk->sk_security = sptr;
1225 	security_sk_clone(osk, nsk);
1226 #endif
1227 }
1228 
1229 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1230 {
1231 	unsigned long nulls1, nulls2;
1232 
1233 	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1234 	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1235 	if (nulls1 > nulls2)
1236 		swap(nulls1, nulls2);
1237 
1238 	if (nulls1 != 0)
1239 		memset((char *)sk, 0, nulls1);
1240 	memset((char *)sk + nulls1 + sizeof(void *), 0,
1241 	       nulls2 - nulls1 - sizeof(void *));
1242 	memset((char *)sk + nulls2 + sizeof(void *), 0,
1243 	       size - nulls2 - sizeof(void *));
1244 }
1245 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1246 
1247 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1248 		int family)
1249 {
1250 	struct sock *sk;
1251 	struct kmem_cache *slab;
1252 
1253 	slab = prot->slab;
1254 	if (slab != NULL) {
1255 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1256 		if (!sk)
1257 			return sk;
1258 		if (priority & __GFP_ZERO) {
1259 			if (prot->clear_sk)
1260 				prot->clear_sk(sk, prot->obj_size);
1261 			else
1262 				sk_prot_clear_nulls(sk, prot->obj_size);
1263 		}
1264 	} else
1265 		sk = kmalloc(prot->obj_size, priority);
1266 
1267 	if (sk != NULL) {
1268 		kmemcheck_annotate_bitfield(sk, flags);
1269 
1270 		if (security_sk_alloc(sk, family, priority))
1271 			goto out_free;
1272 
1273 		if (!try_module_get(prot->owner))
1274 			goto out_free_sec;
1275 		sk_tx_queue_clear(sk);
1276 	}
1277 
1278 	return sk;
1279 
1280 out_free_sec:
1281 	security_sk_free(sk);
1282 out_free:
1283 	if (slab != NULL)
1284 		kmem_cache_free(slab, sk);
1285 	else
1286 		kfree(sk);
1287 	return NULL;
1288 }
1289 
1290 static void sk_prot_free(struct proto *prot, struct sock *sk)
1291 {
1292 	struct kmem_cache *slab;
1293 	struct module *owner;
1294 
1295 	owner = prot->owner;
1296 	slab = prot->slab;
1297 
1298 	security_sk_free(sk);
1299 	if (slab != NULL)
1300 		kmem_cache_free(slab, sk);
1301 	else
1302 		kfree(sk);
1303 	module_put(owner);
1304 }
1305 
1306 #if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
1307 void sock_update_classid(struct sock *sk)
1308 {
1309 	u32 classid;
1310 
1311 	classid = task_cls_classid(current);
1312 	if (classid != sk->sk_classid)
1313 		sk->sk_classid = classid;
1314 }
1315 EXPORT_SYMBOL(sock_update_classid);
1316 #endif
1317 
1318 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1319 void sock_update_netprioidx(struct sock *sk)
1320 {
1321 	if (in_interrupt())
1322 		return;
1323 
1324 	sk->sk_cgrp_prioidx = task_netprioidx(current);
1325 }
1326 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1327 #endif
1328 
1329 /**
1330  *	sk_alloc - All socket objects are allocated here
1331  *	@net: the applicable net namespace
1332  *	@family: protocol family
1333  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1334  *	@prot: struct proto associated with this new sock instance
1335  */
1336 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1337 		      struct proto *prot)
1338 {
1339 	struct sock *sk;
1340 
1341 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1342 	if (sk) {
1343 		sk->sk_family = family;
1344 		/*
1345 		 * See comment in struct sock definition to understand
1346 		 * why we need sk_prot_creator -acme
1347 		 */
1348 		sk->sk_prot = sk->sk_prot_creator = prot;
1349 		sock_lock_init(sk);
1350 		sock_net_set(sk, get_net(net));
1351 		atomic_set(&sk->sk_wmem_alloc, 1);
1352 
1353 		sock_update_classid(sk);
1354 		sock_update_netprioidx(sk);
1355 	}
1356 
1357 	return sk;
1358 }
1359 EXPORT_SYMBOL(sk_alloc);
1360 
1361 static void __sk_free(struct sock *sk)
1362 {
1363 	struct sk_filter *filter;
1364 
1365 	if (sk->sk_destruct)
1366 		sk->sk_destruct(sk);
1367 
1368 	filter = rcu_dereference_check(sk->sk_filter,
1369 				       atomic_read(&sk->sk_wmem_alloc) == 0);
1370 	if (filter) {
1371 		sk_filter_uncharge(sk, filter);
1372 		RCU_INIT_POINTER(sk->sk_filter, NULL);
1373 	}
1374 
1375 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1376 
1377 	if (atomic_read(&sk->sk_omem_alloc))
1378 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1379 			 __func__, atomic_read(&sk->sk_omem_alloc));
1380 
1381 	if (sk->sk_peer_cred)
1382 		put_cred(sk->sk_peer_cred);
1383 	put_pid(sk->sk_peer_pid);
1384 	put_net(sock_net(sk));
1385 	sk_prot_free(sk->sk_prot_creator, sk);
1386 }
1387 
1388 void sk_free(struct sock *sk)
1389 {
1390 	/*
1391 	 * We subtract one from sk_wmem_alloc and can know if
1392 	 * some packets are still in some tx queue.
1393 	 * If not null, sock_wfree() will call __sk_free(sk) later
1394 	 */
1395 	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1396 		__sk_free(sk);
1397 }
1398 EXPORT_SYMBOL(sk_free);
1399 
1400 /*
1401  * Last sock_put should drop reference to sk->sk_net. It has already
1402  * been dropped in sk_change_net. Taking reference to stopping namespace
1403  * is not an option.
1404  * Take reference to a socket to remove it from hash _alive_ and after that
1405  * destroy it in the context of init_net.
1406  */
1407 void sk_release_kernel(struct sock *sk)
1408 {
1409 	if (sk == NULL || sk->sk_socket == NULL)
1410 		return;
1411 
1412 	sock_hold(sk);
1413 	sock_release(sk->sk_socket);
1414 	release_net(sock_net(sk));
1415 	sock_net_set(sk, get_net(&init_net));
1416 	sock_put(sk);
1417 }
1418 EXPORT_SYMBOL(sk_release_kernel);
1419 
1420 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1421 {
1422 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1423 		sock_update_memcg(newsk);
1424 }
1425 
1426 /**
1427  *	sk_clone_lock - clone a socket, and lock its clone
1428  *	@sk: the socket to clone
1429  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1430  *
1431  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1432  */
1433 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1434 {
1435 	struct sock *newsk;
1436 
1437 	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1438 	if (newsk != NULL) {
1439 		struct sk_filter *filter;
1440 
1441 		sock_copy(newsk, sk);
1442 
1443 		/* SANITY */
1444 		get_net(sock_net(newsk));
1445 		sk_node_init(&newsk->sk_node);
1446 		sock_lock_init(newsk);
1447 		bh_lock_sock(newsk);
1448 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1449 		newsk->sk_backlog.len = 0;
1450 
1451 		atomic_set(&newsk->sk_rmem_alloc, 0);
1452 		/*
1453 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1454 		 */
1455 		atomic_set(&newsk->sk_wmem_alloc, 1);
1456 		atomic_set(&newsk->sk_omem_alloc, 0);
1457 		skb_queue_head_init(&newsk->sk_receive_queue);
1458 		skb_queue_head_init(&newsk->sk_write_queue);
1459 #ifdef CONFIG_NET_DMA
1460 		skb_queue_head_init(&newsk->sk_async_wait_queue);
1461 #endif
1462 
1463 		spin_lock_init(&newsk->sk_dst_lock);
1464 		rwlock_init(&newsk->sk_callback_lock);
1465 		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1466 				af_callback_keys + newsk->sk_family,
1467 				af_family_clock_key_strings[newsk->sk_family]);
1468 
1469 		newsk->sk_dst_cache	= NULL;
1470 		newsk->sk_wmem_queued	= 0;
1471 		newsk->sk_forward_alloc = 0;
1472 		newsk->sk_send_head	= NULL;
1473 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1474 
1475 		sock_reset_flag(newsk, SOCK_DONE);
1476 		skb_queue_head_init(&newsk->sk_error_queue);
1477 
1478 		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1479 		if (filter != NULL)
1480 			sk_filter_charge(newsk, filter);
1481 
1482 		if (unlikely(xfrm_sk_clone_policy(newsk))) {
1483 			/* It is still raw copy of parent, so invalidate
1484 			 * destructor and make plain sk_free() */
1485 			newsk->sk_destruct = NULL;
1486 			bh_unlock_sock(newsk);
1487 			sk_free(newsk);
1488 			newsk = NULL;
1489 			goto out;
1490 		}
1491 
1492 		newsk->sk_err	   = 0;
1493 		newsk->sk_priority = 0;
1494 		/*
1495 		 * Before updating sk_refcnt, we must commit prior changes to memory
1496 		 * (Documentation/RCU/rculist_nulls.txt for details)
1497 		 */
1498 		smp_wmb();
1499 		atomic_set(&newsk->sk_refcnt, 2);
1500 
1501 		/*
1502 		 * Increment the counter in the same struct proto as the master
1503 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1504 		 * is the same as sk->sk_prot->socks, as this field was copied
1505 		 * with memcpy).
1506 		 *
1507 		 * This _changes_ the previous behaviour, where
1508 		 * tcp_create_openreq_child always was incrementing the
1509 		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1510 		 * to be taken into account in all callers. -acme
1511 		 */
1512 		sk_refcnt_debug_inc(newsk);
1513 		sk_set_socket(newsk, NULL);
1514 		newsk->sk_wq = NULL;
1515 
1516 		sk_update_clone(sk, newsk);
1517 
1518 		if (newsk->sk_prot->sockets_allocated)
1519 			sk_sockets_allocated_inc(newsk);
1520 
1521 		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1522 			net_enable_timestamp();
1523 	}
1524 out:
1525 	return newsk;
1526 }
1527 EXPORT_SYMBOL_GPL(sk_clone_lock);
1528 
1529 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1530 {
1531 	__sk_dst_set(sk, dst);
1532 	sk->sk_route_caps = dst->dev->features;
1533 	if (sk->sk_route_caps & NETIF_F_GSO)
1534 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1535 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1536 	if (sk_can_gso(sk)) {
1537 		if (dst->header_len) {
1538 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1539 		} else {
1540 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1541 			sk->sk_gso_max_size = dst->dev->gso_max_size;
1542 			sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1543 		}
1544 	}
1545 }
1546 EXPORT_SYMBOL_GPL(sk_setup_caps);
1547 
1548 /*
1549  *	Simple resource managers for sockets.
1550  */
1551 
1552 
1553 /*
1554  * Write buffer destructor automatically called from kfree_skb.
1555  */
1556 void sock_wfree(struct sk_buff *skb)
1557 {
1558 	struct sock *sk = skb->sk;
1559 	unsigned int len = skb->truesize;
1560 
1561 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1562 		/*
1563 		 * Keep a reference on sk_wmem_alloc, this will be released
1564 		 * after sk_write_space() call
1565 		 */
1566 		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1567 		sk->sk_write_space(sk);
1568 		len = 1;
1569 	}
1570 	/*
1571 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1572 	 * could not do because of in-flight packets
1573 	 */
1574 	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1575 		__sk_free(sk);
1576 }
1577 EXPORT_SYMBOL(sock_wfree);
1578 
1579 void skb_orphan_partial(struct sk_buff *skb)
1580 {
1581 	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1582 	 * so we do not completely orphan skb, but transfert all
1583 	 * accounted bytes but one, to avoid unexpected reorders.
1584 	 */
1585 	if (skb->destructor == sock_wfree
1586 #ifdef CONFIG_INET
1587 	    || skb->destructor == tcp_wfree
1588 #endif
1589 		) {
1590 		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1591 		skb->truesize = 1;
1592 	} else {
1593 		skb_orphan(skb);
1594 	}
1595 }
1596 EXPORT_SYMBOL(skb_orphan_partial);
1597 
1598 /*
1599  * Read buffer destructor automatically called from kfree_skb.
1600  */
1601 void sock_rfree(struct sk_buff *skb)
1602 {
1603 	struct sock *sk = skb->sk;
1604 	unsigned int len = skb->truesize;
1605 
1606 	atomic_sub(len, &sk->sk_rmem_alloc);
1607 	sk_mem_uncharge(sk, len);
1608 }
1609 EXPORT_SYMBOL(sock_rfree);
1610 
1611 void sock_edemux(struct sk_buff *skb)
1612 {
1613 	struct sock *sk = skb->sk;
1614 
1615 #ifdef CONFIG_INET
1616 	if (sk->sk_state == TCP_TIME_WAIT)
1617 		inet_twsk_put(inet_twsk(sk));
1618 	else
1619 #endif
1620 		sock_put(sk);
1621 }
1622 EXPORT_SYMBOL(sock_edemux);
1623 
1624 kuid_t sock_i_uid(struct sock *sk)
1625 {
1626 	kuid_t uid;
1627 
1628 	read_lock_bh(&sk->sk_callback_lock);
1629 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1630 	read_unlock_bh(&sk->sk_callback_lock);
1631 	return uid;
1632 }
1633 EXPORT_SYMBOL(sock_i_uid);
1634 
1635 unsigned long sock_i_ino(struct sock *sk)
1636 {
1637 	unsigned long ino;
1638 
1639 	read_lock_bh(&sk->sk_callback_lock);
1640 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1641 	read_unlock_bh(&sk->sk_callback_lock);
1642 	return ino;
1643 }
1644 EXPORT_SYMBOL(sock_i_ino);
1645 
1646 /*
1647  * Allocate a skb from the socket's send buffer.
1648  */
1649 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1650 			     gfp_t priority)
1651 {
1652 	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1653 		struct sk_buff *skb = alloc_skb(size, priority);
1654 		if (skb) {
1655 			skb_set_owner_w(skb, sk);
1656 			return skb;
1657 		}
1658 	}
1659 	return NULL;
1660 }
1661 EXPORT_SYMBOL(sock_wmalloc);
1662 
1663 /*
1664  * Allocate a skb from the socket's receive buffer.
1665  */
1666 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1667 			     gfp_t priority)
1668 {
1669 	if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1670 		struct sk_buff *skb = alloc_skb(size, priority);
1671 		if (skb) {
1672 			skb_set_owner_r(skb, sk);
1673 			return skb;
1674 		}
1675 	}
1676 	return NULL;
1677 }
1678 
1679 /*
1680  * Allocate a memory block from the socket's option memory buffer.
1681  */
1682 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1683 {
1684 	if ((unsigned int)size <= sysctl_optmem_max &&
1685 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1686 		void *mem;
1687 		/* First do the add, to avoid the race if kmalloc
1688 		 * might sleep.
1689 		 */
1690 		atomic_add(size, &sk->sk_omem_alloc);
1691 		mem = kmalloc(size, priority);
1692 		if (mem)
1693 			return mem;
1694 		atomic_sub(size, &sk->sk_omem_alloc);
1695 	}
1696 	return NULL;
1697 }
1698 EXPORT_SYMBOL(sock_kmalloc);
1699 
1700 /*
1701  * Free an option memory block.
1702  */
1703 void sock_kfree_s(struct sock *sk, void *mem, int size)
1704 {
1705 	kfree(mem);
1706 	atomic_sub(size, &sk->sk_omem_alloc);
1707 }
1708 EXPORT_SYMBOL(sock_kfree_s);
1709 
1710 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1711    I think, these locks should be removed for datagram sockets.
1712  */
1713 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1714 {
1715 	DEFINE_WAIT(wait);
1716 
1717 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1718 	for (;;) {
1719 		if (!timeo)
1720 			break;
1721 		if (signal_pending(current))
1722 			break;
1723 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1724 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1725 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1726 			break;
1727 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1728 			break;
1729 		if (sk->sk_err)
1730 			break;
1731 		timeo = schedule_timeout(timeo);
1732 	}
1733 	finish_wait(sk_sleep(sk), &wait);
1734 	return timeo;
1735 }
1736 
1737 
1738 /*
1739  *	Generic send/receive buffer handlers
1740  */
1741 
1742 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1743 				     unsigned long data_len, int noblock,
1744 				     int *errcode)
1745 {
1746 	struct sk_buff *skb;
1747 	gfp_t gfp_mask;
1748 	long timeo;
1749 	int err;
1750 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1751 
1752 	err = -EMSGSIZE;
1753 	if (npages > MAX_SKB_FRAGS)
1754 		goto failure;
1755 
1756 	gfp_mask = sk->sk_allocation;
1757 	if (gfp_mask & __GFP_WAIT)
1758 		gfp_mask |= __GFP_REPEAT;
1759 
1760 	timeo = sock_sndtimeo(sk, noblock);
1761 	while (1) {
1762 		err = sock_error(sk);
1763 		if (err != 0)
1764 			goto failure;
1765 
1766 		err = -EPIPE;
1767 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1768 			goto failure;
1769 
1770 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1771 			skb = alloc_skb(header_len, gfp_mask);
1772 			if (skb) {
1773 				int i;
1774 
1775 				/* No pages, we're done... */
1776 				if (!data_len)
1777 					break;
1778 
1779 				skb->truesize += data_len;
1780 				skb_shinfo(skb)->nr_frags = npages;
1781 				for (i = 0; i < npages; i++) {
1782 					struct page *page;
1783 
1784 					page = alloc_pages(sk->sk_allocation, 0);
1785 					if (!page) {
1786 						err = -ENOBUFS;
1787 						skb_shinfo(skb)->nr_frags = i;
1788 						kfree_skb(skb);
1789 						goto failure;
1790 					}
1791 
1792 					__skb_fill_page_desc(skb, i,
1793 							page, 0,
1794 							(data_len >= PAGE_SIZE ?
1795 							 PAGE_SIZE :
1796 							 data_len));
1797 					data_len -= PAGE_SIZE;
1798 				}
1799 
1800 				/* Full success... */
1801 				break;
1802 			}
1803 			err = -ENOBUFS;
1804 			goto failure;
1805 		}
1806 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1807 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1808 		err = -EAGAIN;
1809 		if (!timeo)
1810 			goto failure;
1811 		if (signal_pending(current))
1812 			goto interrupted;
1813 		timeo = sock_wait_for_wmem(sk, timeo);
1814 	}
1815 
1816 	skb_set_owner_w(skb, sk);
1817 	return skb;
1818 
1819 interrupted:
1820 	err = sock_intr_errno(timeo);
1821 failure:
1822 	*errcode = err;
1823 	return NULL;
1824 }
1825 EXPORT_SYMBOL(sock_alloc_send_pskb);
1826 
1827 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1828 				    int noblock, int *errcode)
1829 {
1830 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1831 }
1832 EXPORT_SYMBOL(sock_alloc_send_skb);
1833 
1834 /* On 32bit arches, an skb frag is limited to 2^15 */
1835 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
1836 
1837 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1838 {
1839 	int order;
1840 
1841 	if (pfrag->page) {
1842 		if (atomic_read(&pfrag->page->_count) == 1) {
1843 			pfrag->offset = 0;
1844 			return true;
1845 		}
1846 		if (pfrag->offset < pfrag->size)
1847 			return true;
1848 		put_page(pfrag->page);
1849 	}
1850 
1851 	/* We restrict high order allocations to users that can afford to wait */
1852 	order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1853 
1854 	do {
1855 		gfp_t gfp = sk->sk_allocation;
1856 
1857 		if (order)
1858 			gfp |= __GFP_COMP | __GFP_NOWARN;
1859 		pfrag->page = alloc_pages(gfp, order);
1860 		if (likely(pfrag->page)) {
1861 			pfrag->offset = 0;
1862 			pfrag->size = PAGE_SIZE << order;
1863 			return true;
1864 		}
1865 	} while (--order >= 0);
1866 
1867 	sk_enter_memory_pressure(sk);
1868 	sk_stream_moderate_sndbuf(sk);
1869 	return false;
1870 }
1871 EXPORT_SYMBOL(sk_page_frag_refill);
1872 
1873 static void __lock_sock(struct sock *sk)
1874 	__releases(&sk->sk_lock.slock)
1875 	__acquires(&sk->sk_lock.slock)
1876 {
1877 	DEFINE_WAIT(wait);
1878 
1879 	for (;;) {
1880 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1881 					TASK_UNINTERRUPTIBLE);
1882 		spin_unlock_bh(&sk->sk_lock.slock);
1883 		schedule();
1884 		spin_lock_bh(&sk->sk_lock.slock);
1885 		if (!sock_owned_by_user(sk))
1886 			break;
1887 	}
1888 	finish_wait(&sk->sk_lock.wq, &wait);
1889 }
1890 
1891 static void __release_sock(struct sock *sk)
1892 	__releases(&sk->sk_lock.slock)
1893 	__acquires(&sk->sk_lock.slock)
1894 {
1895 	struct sk_buff *skb = sk->sk_backlog.head;
1896 
1897 	do {
1898 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1899 		bh_unlock_sock(sk);
1900 
1901 		do {
1902 			struct sk_buff *next = skb->next;
1903 
1904 			prefetch(next);
1905 			WARN_ON_ONCE(skb_dst_is_noref(skb));
1906 			skb->next = NULL;
1907 			sk_backlog_rcv(sk, skb);
1908 
1909 			/*
1910 			 * We are in process context here with softirqs
1911 			 * disabled, use cond_resched_softirq() to preempt.
1912 			 * This is safe to do because we've taken the backlog
1913 			 * queue private:
1914 			 */
1915 			cond_resched_softirq();
1916 
1917 			skb = next;
1918 		} while (skb != NULL);
1919 
1920 		bh_lock_sock(sk);
1921 	} while ((skb = sk->sk_backlog.head) != NULL);
1922 
1923 	/*
1924 	 * Doing the zeroing here guarantee we can not loop forever
1925 	 * while a wild producer attempts to flood us.
1926 	 */
1927 	sk->sk_backlog.len = 0;
1928 }
1929 
1930 /**
1931  * sk_wait_data - wait for data to arrive at sk_receive_queue
1932  * @sk:    sock to wait on
1933  * @timeo: for how long
1934  *
1935  * Now socket state including sk->sk_err is changed only under lock,
1936  * hence we may omit checks after joining wait queue.
1937  * We check receive queue before schedule() only as optimization;
1938  * it is very likely that release_sock() added new data.
1939  */
1940 int sk_wait_data(struct sock *sk, long *timeo)
1941 {
1942 	int rc;
1943 	DEFINE_WAIT(wait);
1944 
1945 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1946 	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1947 	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1948 	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1949 	finish_wait(sk_sleep(sk), &wait);
1950 	return rc;
1951 }
1952 EXPORT_SYMBOL(sk_wait_data);
1953 
1954 /**
1955  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1956  *	@sk: socket
1957  *	@size: memory size to allocate
1958  *	@kind: allocation type
1959  *
1960  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1961  *	rmem allocation. This function assumes that protocols which have
1962  *	memory_pressure use sk_wmem_queued as write buffer accounting.
1963  */
1964 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1965 {
1966 	struct proto *prot = sk->sk_prot;
1967 	int amt = sk_mem_pages(size);
1968 	long allocated;
1969 	int parent_status = UNDER_LIMIT;
1970 
1971 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1972 
1973 	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1974 
1975 	/* Under limit. */
1976 	if (parent_status == UNDER_LIMIT &&
1977 			allocated <= sk_prot_mem_limits(sk, 0)) {
1978 		sk_leave_memory_pressure(sk);
1979 		return 1;
1980 	}
1981 
1982 	/* Under pressure. (we or our parents) */
1983 	if ((parent_status > SOFT_LIMIT) ||
1984 			allocated > sk_prot_mem_limits(sk, 1))
1985 		sk_enter_memory_pressure(sk);
1986 
1987 	/* Over hard limit (we or our parents) */
1988 	if ((parent_status == OVER_LIMIT) ||
1989 			(allocated > sk_prot_mem_limits(sk, 2)))
1990 		goto suppress_allocation;
1991 
1992 	/* guarantee minimum buffer size under pressure */
1993 	if (kind == SK_MEM_RECV) {
1994 		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1995 			return 1;
1996 
1997 	} else { /* SK_MEM_SEND */
1998 		if (sk->sk_type == SOCK_STREAM) {
1999 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2000 				return 1;
2001 		} else if (atomic_read(&sk->sk_wmem_alloc) <
2002 			   prot->sysctl_wmem[0])
2003 				return 1;
2004 	}
2005 
2006 	if (sk_has_memory_pressure(sk)) {
2007 		int alloc;
2008 
2009 		if (!sk_under_memory_pressure(sk))
2010 			return 1;
2011 		alloc = sk_sockets_allocated_read_positive(sk);
2012 		if (sk_prot_mem_limits(sk, 2) > alloc *
2013 		    sk_mem_pages(sk->sk_wmem_queued +
2014 				 atomic_read(&sk->sk_rmem_alloc) +
2015 				 sk->sk_forward_alloc))
2016 			return 1;
2017 	}
2018 
2019 suppress_allocation:
2020 
2021 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2022 		sk_stream_moderate_sndbuf(sk);
2023 
2024 		/* Fail only if socket is _under_ its sndbuf.
2025 		 * In this case we cannot block, so that we have to fail.
2026 		 */
2027 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2028 			return 1;
2029 	}
2030 
2031 	trace_sock_exceed_buf_limit(sk, prot, allocated);
2032 
2033 	/* Alas. Undo changes. */
2034 	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2035 
2036 	sk_memory_allocated_sub(sk, amt);
2037 
2038 	return 0;
2039 }
2040 EXPORT_SYMBOL(__sk_mem_schedule);
2041 
2042 /**
2043  *	__sk_reclaim - reclaim memory_allocated
2044  *	@sk: socket
2045  */
2046 void __sk_mem_reclaim(struct sock *sk)
2047 {
2048 	sk_memory_allocated_sub(sk,
2049 				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
2050 	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2051 
2052 	if (sk_under_memory_pressure(sk) &&
2053 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2054 		sk_leave_memory_pressure(sk);
2055 }
2056 EXPORT_SYMBOL(__sk_mem_reclaim);
2057 
2058 
2059 /*
2060  * Set of default routines for initialising struct proto_ops when
2061  * the protocol does not support a particular function. In certain
2062  * cases where it makes no sense for a protocol to have a "do nothing"
2063  * function, some default processing is provided.
2064  */
2065 
2066 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2067 {
2068 	return -EOPNOTSUPP;
2069 }
2070 EXPORT_SYMBOL(sock_no_bind);
2071 
2072 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2073 		    int len, int flags)
2074 {
2075 	return -EOPNOTSUPP;
2076 }
2077 EXPORT_SYMBOL(sock_no_connect);
2078 
2079 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2080 {
2081 	return -EOPNOTSUPP;
2082 }
2083 EXPORT_SYMBOL(sock_no_socketpair);
2084 
2085 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2086 {
2087 	return -EOPNOTSUPP;
2088 }
2089 EXPORT_SYMBOL(sock_no_accept);
2090 
2091 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2092 		    int *len, int peer)
2093 {
2094 	return -EOPNOTSUPP;
2095 }
2096 EXPORT_SYMBOL(sock_no_getname);
2097 
2098 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2099 {
2100 	return 0;
2101 }
2102 EXPORT_SYMBOL(sock_no_poll);
2103 
2104 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2105 {
2106 	return -EOPNOTSUPP;
2107 }
2108 EXPORT_SYMBOL(sock_no_ioctl);
2109 
2110 int sock_no_listen(struct socket *sock, int backlog)
2111 {
2112 	return -EOPNOTSUPP;
2113 }
2114 EXPORT_SYMBOL(sock_no_listen);
2115 
2116 int sock_no_shutdown(struct socket *sock, int how)
2117 {
2118 	return -EOPNOTSUPP;
2119 }
2120 EXPORT_SYMBOL(sock_no_shutdown);
2121 
2122 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2123 		    char __user *optval, unsigned int optlen)
2124 {
2125 	return -EOPNOTSUPP;
2126 }
2127 EXPORT_SYMBOL(sock_no_setsockopt);
2128 
2129 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2130 		    char __user *optval, int __user *optlen)
2131 {
2132 	return -EOPNOTSUPP;
2133 }
2134 EXPORT_SYMBOL(sock_no_getsockopt);
2135 
2136 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2137 		    size_t len)
2138 {
2139 	return -EOPNOTSUPP;
2140 }
2141 EXPORT_SYMBOL(sock_no_sendmsg);
2142 
2143 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2144 		    size_t len, int flags)
2145 {
2146 	return -EOPNOTSUPP;
2147 }
2148 EXPORT_SYMBOL(sock_no_recvmsg);
2149 
2150 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2151 {
2152 	/* Mirror missing mmap method error code */
2153 	return -ENODEV;
2154 }
2155 EXPORT_SYMBOL(sock_no_mmap);
2156 
2157 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2158 {
2159 	ssize_t res;
2160 	struct msghdr msg = {.msg_flags = flags};
2161 	struct kvec iov;
2162 	char *kaddr = kmap(page);
2163 	iov.iov_base = kaddr + offset;
2164 	iov.iov_len = size;
2165 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2166 	kunmap(page);
2167 	return res;
2168 }
2169 EXPORT_SYMBOL(sock_no_sendpage);
2170 
2171 /*
2172  *	Default Socket Callbacks
2173  */
2174 
2175 static void sock_def_wakeup(struct sock *sk)
2176 {
2177 	struct socket_wq *wq;
2178 
2179 	rcu_read_lock();
2180 	wq = rcu_dereference(sk->sk_wq);
2181 	if (wq_has_sleeper(wq))
2182 		wake_up_interruptible_all(&wq->wait);
2183 	rcu_read_unlock();
2184 }
2185 
2186 static void sock_def_error_report(struct sock *sk)
2187 {
2188 	struct socket_wq *wq;
2189 
2190 	rcu_read_lock();
2191 	wq = rcu_dereference(sk->sk_wq);
2192 	if (wq_has_sleeper(wq))
2193 		wake_up_interruptible_poll(&wq->wait, POLLERR);
2194 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2195 	rcu_read_unlock();
2196 }
2197 
2198 static void sock_def_readable(struct sock *sk, int len)
2199 {
2200 	struct socket_wq *wq;
2201 
2202 	rcu_read_lock();
2203 	wq = rcu_dereference(sk->sk_wq);
2204 	if (wq_has_sleeper(wq))
2205 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2206 						POLLRDNORM | POLLRDBAND);
2207 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2208 	rcu_read_unlock();
2209 }
2210 
2211 static void sock_def_write_space(struct sock *sk)
2212 {
2213 	struct socket_wq *wq;
2214 
2215 	rcu_read_lock();
2216 
2217 	/* Do not wake up a writer until he can make "significant"
2218 	 * progress.  --DaveM
2219 	 */
2220 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2221 		wq = rcu_dereference(sk->sk_wq);
2222 		if (wq_has_sleeper(wq))
2223 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2224 						POLLWRNORM | POLLWRBAND);
2225 
2226 		/* Should agree with poll, otherwise some programs break */
2227 		if (sock_writeable(sk))
2228 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2229 	}
2230 
2231 	rcu_read_unlock();
2232 }
2233 
2234 static void sock_def_destruct(struct sock *sk)
2235 {
2236 	kfree(sk->sk_protinfo);
2237 }
2238 
2239 void sk_send_sigurg(struct sock *sk)
2240 {
2241 	if (sk->sk_socket && sk->sk_socket->file)
2242 		if (send_sigurg(&sk->sk_socket->file->f_owner))
2243 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2244 }
2245 EXPORT_SYMBOL(sk_send_sigurg);
2246 
2247 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2248 		    unsigned long expires)
2249 {
2250 	if (!mod_timer(timer, expires))
2251 		sock_hold(sk);
2252 }
2253 EXPORT_SYMBOL(sk_reset_timer);
2254 
2255 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2256 {
2257 	if (del_timer(timer))
2258 		__sock_put(sk);
2259 }
2260 EXPORT_SYMBOL(sk_stop_timer);
2261 
2262 void sock_init_data(struct socket *sock, struct sock *sk)
2263 {
2264 	skb_queue_head_init(&sk->sk_receive_queue);
2265 	skb_queue_head_init(&sk->sk_write_queue);
2266 	skb_queue_head_init(&sk->sk_error_queue);
2267 #ifdef CONFIG_NET_DMA
2268 	skb_queue_head_init(&sk->sk_async_wait_queue);
2269 #endif
2270 
2271 	sk->sk_send_head	=	NULL;
2272 
2273 	init_timer(&sk->sk_timer);
2274 
2275 	sk->sk_allocation	=	GFP_KERNEL;
2276 	sk->sk_rcvbuf		=	sysctl_rmem_default;
2277 	sk->sk_sndbuf		=	sysctl_wmem_default;
2278 	sk->sk_state		=	TCP_CLOSE;
2279 	sk_set_socket(sk, sock);
2280 
2281 	sock_set_flag(sk, SOCK_ZAPPED);
2282 
2283 	if (sock) {
2284 		sk->sk_type	=	sock->type;
2285 		sk->sk_wq	=	sock->wq;
2286 		sock->sk	=	sk;
2287 	} else
2288 		sk->sk_wq	=	NULL;
2289 
2290 	spin_lock_init(&sk->sk_dst_lock);
2291 	rwlock_init(&sk->sk_callback_lock);
2292 	lockdep_set_class_and_name(&sk->sk_callback_lock,
2293 			af_callback_keys + sk->sk_family,
2294 			af_family_clock_key_strings[sk->sk_family]);
2295 
2296 	sk->sk_state_change	=	sock_def_wakeup;
2297 	sk->sk_data_ready	=	sock_def_readable;
2298 	sk->sk_write_space	=	sock_def_write_space;
2299 	sk->sk_error_report	=	sock_def_error_report;
2300 	sk->sk_destruct		=	sock_def_destruct;
2301 
2302 	sk->sk_frag.page	=	NULL;
2303 	sk->sk_frag.offset	=	0;
2304 	sk->sk_peek_off		=	-1;
2305 
2306 	sk->sk_peer_pid 	=	NULL;
2307 	sk->sk_peer_cred	=	NULL;
2308 	sk->sk_write_pending	=	0;
2309 	sk->sk_rcvlowat		=	1;
2310 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2311 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2312 
2313 	sk->sk_stamp = ktime_set(-1L, 0);
2314 
2315 #ifdef CONFIG_NET_LL_RX_POLL
2316 	sk->sk_napi_id		=	0;
2317 	sk->sk_ll_usec		=	sysctl_net_busy_read;
2318 #endif
2319 
2320 	/*
2321 	 * Before updating sk_refcnt, we must commit prior changes to memory
2322 	 * (Documentation/RCU/rculist_nulls.txt for details)
2323 	 */
2324 	smp_wmb();
2325 	atomic_set(&sk->sk_refcnt, 1);
2326 	atomic_set(&sk->sk_drops, 0);
2327 }
2328 EXPORT_SYMBOL(sock_init_data);
2329 
2330 void lock_sock_nested(struct sock *sk, int subclass)
2331 {
2332 	might_sleep();
2333 	spin_lock_bh(&sk->sk_lock.slock);
2334 	if (sk->sk_lock.owned)
2335 		__lock_sock(sk);
2336 	sk->sk_lock.owned = 1;
2337 	spin_unlock(&sk->sk_lock.slock);
2338 	/*
2339 	 * The sk_lock has mutex_lock() semantics here:
2340 	 */
2341 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2342 	local_bh_enable();
2343 }
2344 EXPORT_SYMBOL(lock_sock_nested);
2345 
2346 void release_sock(struct sock *sk)
2347 {
2348 	/*
2349 	 * The sk_lock has mutex_unlock() semantics:
2350 	 */
2351 	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2352 
2353 	spin_lock_bh(&sk->sk_lock.slock);
2354 	if (sk->sk_backlog.tail)
2355 		__release_sock(sk);
2356 
2357 	if (sk->sk_prot->release_cb)
2358 		sk->sk_prot->release_cb(sk);
2359 
2360 	sk->sk_lock.owned = 0;
2361 	if (waitqueue_active(&sk->sk_lock.wq))
2362 		wake_up(&sk->sk_lock.wq);
2363 	spin_unlock_bh(&sk->sk_lock.slock);
2364 }
2365 EXPORT_SYMBOL(release_sock);
2366 
2367 /**
2368  * lock_sock_fast - fast version of lock_sock
2369  * @sk: socket
2370  *
2371  * This version should be used for very small section, where process wont block
2372  * return false if fast path is taken
2373  *   sk_lock.slock locked, owned = 0, BH disabled
2374  * return true if slow path is taken
2375  *   sk_lock.slock unlocked, owned = 1, BH enabled
2376  */
2377 bool lock_sock_fast(struct sock *sk)
2378 {
2379 	might_sleep();
2380 	spin_lock_bh(&sk->sk_lock.slock);
2381 
2382 	if (!sk->sk_lock.owned)
2383 		/*
2384 		 * Note : We must disable BH
2385 		 */
2386 		return false;
2387 
2388 	__lock_sock(sk);
2389 	sk->sk_lock.owned = 1;
2390 	spin_unlock(&sk->sk_lock.slock);
2391 	/*
2392 	 * The sk_lock has mutex_lock() semantics here:
2393 	 */
2394 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2395 	local_bh_enable();
2396 	return true;
2397 }
2398 EXPORT_SYMBOL(lock_sock_fast);
2399 
2400 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2401 {
2402 	struct timeval tv;
2403 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2404 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2405 	tv = ktime_to_timeval(sk->sk_stamp);
2406 	if (tv.tv_sec == -1)
2407 		return -ENOENT;
2408 	if (tv.tv_sec == 0) {
2409 		sk->sk_stamp = ktime_get_real();
2410 		tv = ktime_to_timeval(sk->sk_stamp);
2411 	}
2412 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2413 }
2414 EXPORT_SYMBOL(sock_get_timestamp);
2415 
2416 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2417 {
2418 	struct timespec ts;
2419 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2420 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2421 	ts = ktime_to_timespec(sk->sk_stamp);
2422 	if (ts.tv_sec == -1)
2423 		return -ENOENT;
2424 	if (ts.tv_sec == 0) {
2425 		sk->sk_stamp = ktime_get_real();
2426 		ts = ktime_to_timespec(sk->sk_stamp);
2427 	}
2428 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2429 }
2430 EXPORT_SYMBOL(sock_get_timestampns);
2431 
2432 void sock_enable_timestamp(struct sock *sk, int flag)
2433 {
2434 	if (!sock_flag(sk, flag)) {
2435 		unsigned long previous_flags = sk->sk_flags;
2436 
2437 		sock_set_flag(sk, flag);
2438 		/*
2439 		 * we just set one of the two flags which require net
2440 		 * time stamping, but time stamping might have been on
2441 		 * already because of the other one
2442 		 */
2443 		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2444 			net_enable_timestamp();
2445 	}
2446 }
2447 
2448 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2449 		       int level, int type)
2450 {
2451 	struct sock_exterr_skb *serr;
2452 	struct sk_buff *skb, *skb2;
2453 	int copied, err;
2454 
2455 	err = -EAGAIN;
2456 	skb = skb_dequeue(&sk->sk_error_queue);
2457 	if (skb == NULL)
2458 		goto out;
2459 
2460 	copied = skb->len;
2461 	if (copied > len) {
2462 		msg->msg_flags |= MSG_TRUNC;
2463 		copied = len;
2464 	}
2465 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2466 	if (err)
2467 		goto out_free_skb;
2468 
2469 	sock_recv_timestamp(msg, sk, skb);
2470 
2471 	serr = SKB_EXT_ERR(skb);
2472 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2473 
2474 	msg->msg_flags |= MSG_ERRQUEUE;
2475 	err = copied;
2476 
2477 	/* Reset and regenerate socket error */
2478 	spin_lock_bh(&sk->sk_error_queue.lock);
2479 	sk->sk_err = 0;
2480 	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2481 		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2482 		spin_unlock_bh(&sk->sk_error_queue.lock);
2483 		sk->sk_error_report(sk);
2484 	} else
2485 		spin_unlock_bh(&sk->sk_error_queue.lock);
2486 
2487 out_free_skb:
2488 	kfree_skb(skb);
2489 out:
2490 	return err;
2491 }
2492 EXPORT_SYMBOL(sock_recv_errqueue);
2493 
2494 /*
2495  *	Get a socket option on an socket.
2496  *
2497  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2498  *	asynchronous errors should be reported by getsockopt. We assume
2499  *	this means if you specify SO_ERROR (otherwise whats the point of it).
2500  */
2501 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2502 			   char __user *optval, int __user *optlen)
2503 {
2504 	struct sock *sk = sock->sk;
2505 
2506 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2507 }
2508 EXPORT_SYMBOL(sock_common_getsockopt);
2509 
2510 #ifdef CONFIG_COMPAT
2511 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2512 				  char __user *optval, int __user *optlen)
2513 {
2514 	struct sock *sk = sock->sk;
2515 
2516 	if (sk->sk_prot->compat_getsockopt != NULL)
2517 		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2518 						      optval, optlen);
2519 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2520 }
2521 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2522 #endif
2523 
2524 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2525 			struct msghdr *msg, size_t size, int flags)
2526 {
2527 	struct sock *sk = sock->sk;
2528 	int addr_len = 0;
2529 	int err;
2530 
2531 	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2532 				   flags & ~MSG_DONTWAIT, &addr_len);
2533 	if (err >= 0)
2534 		msg->msg_namelen = addr_len;
2535 	return err;
2536 }
2537 EXPORT_SYMBOL(sock_common_recvmsg);
2538 
2539 /*
2540  *	Set socket options on an inet socket.
2541  */
2542 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2543 			   char __user *optval, unsigned int optlen)
2544 {
2545 	struct sock *sk = sock->sk;
2546 
2547 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2548 }
2549 EXPORT_SYMBOL(sock_common_setsockopt);
2550 
2551 #ifdef CONFIG_COMPAT
2552 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2553 				  char __user *optval, unsigned int optlen)
2554 {
2555 	struct sock *sk = sock->sk;
2556 
2557 	if (sk->sk_prot->compat_setsockopt != NULL)
2558 		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2559 						      optval, optlen);
2560 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2561 }
2562 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2563 #endif
2564 
2565 void sk_common_release(struct sock *sk)
2566 {
2567 	if (sk->sk_prot->destroy)
2568 		sk->sk_prot->destroy(sk);
2569 
2570 	/*
2571 	 * Observation: when sock_common_release is called, processes have
2572 	 * no access to socket. But net still has.
2573 	 * Step one, detach it from networking:
2574 	 *
2575 	 * A. Remove from hash tables.
2576 	 */
2577 
2578 	sk->sk_prot->unhash(sk);
2579 
2580 	/*
2581 	 * In this point socket cannot receive new packets, but it is possible
2582 	 * that some packets are in flight because some CPU runs receiver and
2583 	 * did hash table lookup before we unhashed socket. They will achieve
2584 	 * receive queue and will be purged by socket destructor.
2585 	 *
2586 	 * Also we still have packets pending on receive queue and probably,
2587 	 * our own packets waiting in device queues. sock_destroy will drain
2588 	 * receive queue, but transmitted packets will delay socket destruction
2589 	 * until the last reference will be released.
2590 	 */
2591 
2592 	sock_orphan(sk);
2593 
2594 	xfrm_sk_free_policy(sk);
2595 
2596 	sk_refcnt_debug_release(sk);
2597 
2598 	if (sk->sk_frag.page) {
2599 		put_page(sk->sk_frag.page);
2600 		sk->sk_frag.page = NULL;
2601 	}
2602 
2603 	sock_put(sk);
2604 }
2605 EXPORT_SYMBOL(sk_common_release);
2606 
2607 #ifdef CONFIG_PROC_FS
2608 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
2609 struct prot_inuse {
2610 	int val[PROTO_INUSE_NR];
2611 };
2612 
2613 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2614 
2615 #ifdef CONFIG_NET_NS
2616 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2617 {
2618 	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2619 }
2620 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2621 
2622 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2623 {
2624 	int cpu, idx = prot->inuse_idx;
2625 	int res = 0;
2626 
2627 	for_each_possible_cpu(cpu)
2628 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2629 
2630 	return res >= 0 ? res : 0;
2631 }
2632 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2633 
2634 static int __net_init sock_inuse_init_net(struct net *net)
2635 {
2636 	net->core.inuse = alloc_percpu(struct prot_inuse);
2637 	return net->core.inuse ? 0 : -ENOMEM;
2638 }
2639 
2640 static void __net_exit sock_inuse_exit_net(struct net *net)
2641 {
2642 	free_percpu(net->core.inuse);
2643 }
2644 
2645 static struct pernet_operations net_inuse_ops = {
2646 	.init = sock_inuse_init_net,
2647 	.exit = sock_inuse_exit_net,
2648 };
2649 
2650 static __init int net_inuse_init(void)
2651 {
2652 	if (register_pernet_subsys(&net_inuse_ops))
2653 		panic("Cannot initialize net inuse counters");
2654 
2655 	return 0;
2656 }
2657 
2658 core_initcall(net_inuse_init);
2659 #else
2660 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2661 
2662 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2663 {
2664 	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2665 }
2666 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2667 
2668 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2669 {
2670 	int cpu, idx = prot->inuse_idx;
2671 	int res = 0;
2672 
2673 	for_each_possible_cpu(cpu)
2674 		res += per_cpu(prot_inuse, cpu).val[idx];
2675 
2676 	return res >= 0 ? res : 0;
2677 }
2678 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2679 #endif
2680 
2681 static void assign_proto_idx(struct proto *prot)
2682 {
2683 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2684 
2685 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2686 		pr_err("PROTO_INUSE_NR exhausted\n");
2687 		return;
2688 	}
2689 
2690 	set_bit(prot->inuse_idx, proto_inuse_idx);
2691 }
2692 
2693 static void release_proto_idx(struct proto *prot)
2694 {
2695 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2696 		clear_bit(prot->inuse_idx, proto_inuse_idx);
2697 }
2698 #else
2699 static inline void assign_proto_idx(struct proto *prot)
2700 {
2701 }
2702 
2703 static inline void release_proto_idx(struct proto *prot)
2704 {
2705 }
2706 #endif
2707 
2708 int proto_register(struct proto *prot, int alloc_slab)
2709 {
2710 	if (alloc_slab) {
2711 		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2712 					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2713 					NULL);
2714 
2715 		if (prot->slab == NULL) {
2716 			pr_crit("%s: Can't create sock SLAB cache!\n",
2717 				prot->name);
2718 			goto out;
2719 		}
2720 
2721 		if (prot->rsk_prot != NULL) {
2722 			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2723 			if (prot->rsk_prot->slab_name == NULL)
2724 				goto out_free_sock_slab;
2725 
2726 			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2727 								 prot->rsk_prot->obj_size, 0,
2728 								 SLAB_HWCACHE_ALIGN, NULL);
2729 
2730 			if (prot->rsk_prot->slab == NULL) {
2731 				pr_crit("%s: Can't create request sock SLAB cache!\n",
2732 					prot->name);
2733 				goto out_free_request_sock_slab_name;
2734 			}
2735 		}
2736 
2737 		if (prot->twsk_prot != NULL) {
2738 			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2739 
2740 			if (prot->twsk_prot->twsk_slab_name == NULL)
2741 				goto out_free_request_sock_slab;
2742 
2743 			prot->twsk_prot->twsk_slab =
2744 				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2745 						  prot->twsk_prot->twsk_obj_size,
2746 						  0,
2747 						  SLAB_HWCACHE_ALIGN |
2748 							prot->slab_flags,
2749 						  NULL);
2750 			if (prot->twsk_prot->twsk_slab == NULL)
2751 				goto out_free_timewait_sock_slab_name;
2752 		}
2753 	}
2754 
2755 	mutex_lock(&proto_list_mutex);
2756 	list_add(&prot->node, &proto_list);
2757 	assign_proto_idx(prot);
2758 	mutex_unlock(&proto_list_mutex);
2759 	return 0;
2760 
2761 out_free_timewait_sock_slab_name:
2762 	kfree(prot->twsk_prot->twsk_slab_name);
2763 out_free_request_sock_slab:
2764 	if (prot->rsk_prot && prot->rsk_prot->slab) {
2765 		kmem_cache_destroy(prot->rsk_prot->slab);
2766 		prot->rsk_prot->slab = NULL;
2767 	}
2768 out_free_request_sock_slab_name:
2769 	if (prot->rsk_prot)
2770 		kfree(prot->rsk_prot->slab_name);
2771 out_free_sock_slab:
2772 	kmem_cache_destroy(prot->slab);
2773 	prot->slab = NULL;
2774 out:
2775 	return -ENOBUFS;
2776 }
2777 EXPORT_SYMBOL(proto_register);
2778 
2779 void proto_unregister(struct proto *prot)
2780 {
2781 	mutex_lock(&proto_list_mutex);
2782 	release_proto_idx(prot);
2783 	list_del(&prot->node);
2784 	mutex_unlock(&proto_list_mutex);
2785 
2786 	if (prot->slab != NULL) {
2787 		kmem_cache_destroy(prot->slab);
2788 		prot->slab = NULL;
2789 	}
2790 
2791 	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2792 		kmem_cache_destroy(prot->rsk_prot->slab);
2793 		kfree(prot->rsk_prot->slab_name);
2794 		prot->rsk_prot->slab = NULL;
2795 	}
2796 
2797 	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2798 		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2799 		kfree(prot->twsk_prot->twsk_slab_name);
2800 		prot->twsk_prot->twsk_slab = NULL;
2801 	}
2802 }
2803 EXPORT_SYMBOL(proto_unregister);
2804 
2805 #ifdef CONFIG_PROC_FS
2806 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2807 	__acquires(proto_list_mutex)
2808 {
2809 	mutex_lock(&proto_list_mutex);
2810 	return seq_list_start_head(&proto_list, *pos);
2811 }
2812 
2813 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2814 {
2815 	return seq_list_next(v, &proto_list, pos);
2816 }
2817 
2818 static void proto_seq_stop(struct seq_file *seq, void *v)
2819 	__releases(proto_list_mutex)
2820 {
2821 	mutex_unlock(&proto_list_mutex);
2822 }
2823 
2824 static char proto_method_implemented(const void *method)
2825 {
2826 	return method == NULL ? 'n' : 'y';
2827 }
2828 static long sock_prot_memory_allocated(struct proto *proto)
2829 {
2830 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2831 }
2832 
2833 static char *sock_prot_memory_pressure(struct proto *proto)
2834 {
2835 	return proto->memory_pressure != NULL ?
2836 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2837 }
2838 
2839 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2840 {
2841 
2842 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2843 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2844 		   proto->name,
2845 		   proto->obj_size,
2846 		   sock_prot_inuse_get(seq_file_net(seq), proto),
2847 		   sock_prot_memory_allocated(proto),
2848 		   sock_prot_memory_pressure(proto),
2849 		   proto->max_header,
2850 		   proto->slab == NULL ? "no" : "yes",
2851 		   module_name(proto->owner),
2852 		   proto_method_implemented(proto->close),
2853 		   proto_method_implemented(proto->connect),
2854 		   proto_method_implemented(proto->disconnect),
2855 		   proto_method_implemented(proto->accept),
2856 		   proto_method_implemented(proto->ioctl),
2857 		   proto_method_implemented(proto->init),
2858 		   proto_method_implemented(proto->destroy),
2859 		   proto_method_implemented(proto->shutdown),
2860 		   proto_method_implemented(proto->setsockopt),
2861 		   proto_method_implemented(proto->getsockopt),
2862 		   proto_method_implemented(proto->sendmsg),
2863 		   proto_method_implemented(proto->recvmsg),
2864 		   proto_method_implemented(proto->sendpage),
2865 		   proto_method_implemented(proto->bind),
2866 		   proto_method_implemented(proto->backlog_rcv),
2867 		   proto_method_implemented(proto->hash),
2868 		   proto_method_implemented(proto->unhash),
2869 		   proto_method_implemented(proto->get_port),
2870 		   proto_method_implemented(proto->enter_memory_pressure));
2871 }
2872 
2873 static int proto_seq_show(struct seq_file *seq, void *v)
2874 {
2875 	if (v == &proto_list)
2876 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2877 			   "protocol",
2878 			   "size",
2879 			   "sockets",
2880 			   "memory",
2881 			   "press",
2882 			   "maxhdr",
2883 			   "slab",
2884 			   "module",
2885 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2886 	else
2887 		proto_seq_printf(seq, list_entry(v, struct proto, node));
2888 	return 0;
2889 }
2890 
2891 static const struct seq_operations proto_seq_ops = {
2892 	.start  = proto_seq_start,
2893 	.next   = proto_seq_next,
2894 	.stop   = proto_seq_stop,
2895 	.show   = proto_seq_show,
2896 };
2897 
2898 static int proto_seq_open(struct inode *inode, struct file *file)
2899 {
2900 	return seq_open_net(inode, file, &proto_seq_ops,
2901 			    sizeof(struct seq_net_private));
2902 }
2903 
2904 static const struct file_operations proto_seq_fops = {
2905 	.owner		= THIS_MODULE,
2906 	.open		= proto_seq_open,
2907 	.read		= seq_read,
2908 	.llseek		= seq_lseek,
2909 	.release	= seq_release_net,
2910 };
2911 
2912 static __net_init int proto_init_net(struct net *net)
2913 {
2914 	if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2915 		return -ENOMEM;
2916 
2917 	return 0;
2918 }
2919 
2920 static __net_exit void proto_exit_net(struct net *net)
2921 {
2922 	remove_proc_entry("protocols", net->proc_net);
2923 }
2924 
2925 
2926 static __net_initdata struct pernet_operations proto_net_ops = {
2927 	.init = proto_init_net,
2928 	.exit = proto_exit_net,
2929 };
2930 
2931 static int __init proto_init(void)
2932 {
2933 	return register_pernet_subsys(&proto_net_ops);
2934 }
2935 
2936 subsys_initcall(proto_init);
2937 
2938 #endif /* PROC_FS */
2939