xref: /openbmc/linux/net/core/sock.c (revision 8e9356c6)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic socket support routines. Memory allocators, socket lock/release
7  *		handler for protocols to use and generic option handler.
8  *
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  *
85  *
86  *		This program is free software; you can redistribute it and/or
87  *		modify it under the terms of the GNU General Public License
88  *		as published by the Free Software Foundation; either version
89  *		2 of the License, or (at your option) any later version.
90  */
91 
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93 
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
120 
121 #include <asm/uaccess.h>
122 
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134 
135 #include <linux/filter.h>
136 
137 #include <trace/events/sock.h>
138 
139 #ifdef CONFIG_INET
140 #include <net/tcp.h>
141 #endif
142 
143 #include <net/busy_poll.h>
144 
145 static DEFINE_MUTEX(proto_list_mutex);
146 static LIST_HEAD(proto_list);
147 
148 #ifdef CONFIG_MEMCG_KMEM
149 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
150 {
151 	struct proto *proto;
152 	int ret = 0;
153 
154 	mutex_lock(&proto_list_mutex);
155 	list_for_each_entry(proto, &proto_list, node) {
156 		if (proto->init_cgroup) {
157 			ret = proto->init_cgroup(memcg, ss);
158 			if (ret)
159 				goto out;
160 		}
161 	}
162 
163 	mutex_unlock(&proto_list_mutex);
164 	return ret;
165 out:
166 	list_for_each_entry_continue_reverse(proto, &proto_list, node)
167 		if (proto->destroy_cgroup)
168 			proto->destroy_cgroup(memcg);
169 	mutex_unlock(&proto_list_mutex);
170 	return ret;
171 }
172 
173 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
174 {
175 	struct proto *proto;
176 
177 	mutex_lock(&proto_list_mutex);
178 	list_for_each_entry_reverse(proto, &proto_list, node)
179 		if (proto->destroy_cgroup)
180 			proto->destroy_cgroup(memcg);
181 	mutex_unlock(&proto_list_mutex);
182 }
183 #endif
184 
185 /*
186  * Each address family might have different locking rules, so we have
187  * one slock key per address family:
188  */
189 static struct lock_class_key af_family_keys[AF_MAX];
190 static struct lock_class_key af_family_slock_keys[AF_MAX];
191 
192 #if defined(CONFIG_MEMCG_KMEM)
193 struct static_key memcg_socket_limit_enabled;
194 EXPORT_SYMBOL(memcg_socket_limit_enabled);
195 #endif
196 
197 /*
198  * Make lock validator output more readable. (we pre-construct these
199  * strings build-time, so that runtime initialization of socket
200  * locks is fast):
201  */
202 static const char *const af_family_key_strings[AF_MAX+1] = {
203   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
204   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
205   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
206   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
207   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
208   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
209   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
210   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
211   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
212   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
213   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
214   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
215   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
216   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
217 };
218 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
219   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
220   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
221   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
222   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
223   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
224   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
225   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
226   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
227   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
228   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
229   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
230   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
231   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
232   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
233 };
234 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
235   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
236   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
237   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
238   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
239   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
240   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
241   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
242   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
243   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
244   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
245   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
246   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
247   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
248   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
249 };
250 
251 /*
252  * sk_callback_lock locking rules are per-address-family,
253  * so split the lock classes by using a per-AF key:
254  */
255 static struct lock_class_key af_callback_keys[AF_MAX];
256 
257 /* Take into consideration the size of the struct sk_buff overhead in the
258  * determination of these values, since that is non-constant across
259  * platforms.  This makes socket queueing behavior and performance
260  * not depend upon such differences.
261  */
262 #define _SK_MEM_PACKETS		256
263 #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
264 #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
265 #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
266 
267 /* Run time adjustable parameters. */
268 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
269 EXPORT_SYMBOL(sysctl_wmem_max);
270 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
271 EXPORT_SYMBOL(sysctl_rmem_max);
272 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
273 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
274 
275 /* Maximal space eaten by iovec or ancillary data plus some space */
276 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
277 EXPORT_SYMBOL(sysctl_optmem_max);
278 
279 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
280 EXPORT_SYMBOL_GPL(memalloc_socks);
281 
282 /**
283  * sk_set_memalloc - sets %SOCK_MEMALLOC
284  * @sk: socket to set it on
285  *
286  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
287  * It's the responsibility of the admin to adjust min_free_kbytes
288  * to meet the requirements
289  */
290 void sk_set_memalloc(struct sock *sk)
291 {
292 	sock_set_flag(sk, SOCK_MEMALLOC);
293 	sk->sk_allocation |= __GFP_MEMALLOC;
294 	static_key_slow_inc(&memalloc_socks);
295 }
296 EXPORT_SYMBOL_GPL(sk_set_memalloc);
297 
298 void sk_clear_memalloc(struct sock *sk)
299 {
300 	sock_reset_flag(sk, SOCK_MEMALLOC);
301 	sk->sk_allocation &= ~__GFP_MEMALLOC;
302 	static_key_slow_dec(&memalloc_socks);
303 
304 	/*
305 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
306 	 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
307 	 * it has rmem allocations there is a risk that the user of the
308 	 * socket cannot make forward progress due to exceeding the rmem
309 	 * limits. By rights, sk_clear_memalloc() should only be called
310 	 * on sockets being torn down but warn and reset the accounting if
311 	 * that assumption breaks.
312 	 */
313 	if (WARN_ON(sk->sk_forward_alloc))
314 		sk_mem_reclaim(sk);
315 }
316 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
317 
318 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
319 {
320 	int ret;
321 	unsigned long pflags = current->flags;
322 
323 	/* these should have been dropped before queueing */
324 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
325 
326 	current->flags |= PF_MEMALLOC;
327 	ret = sk->sk_backlog_rcv(sk, skb);
328 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
329 
330 	return ret;
331 }
332 EXPORT_SYMBOL(__sk_backlog_rcv);
333 
334 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
335 {
336 	struct timeval tv;
337 
338 	if (optlen < sizeof(tv))
339 		return -EINVAL;
340 	if (copy_from_user(&tv, optval, sizeof(tv)))
341 		return -EFAULT;
342 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
343 		return -EDOM;
344 
345 	if (tv.tv_sec < 0) {
346 		static int warned __read_mostly;
347 
348 		*timeo_p = 0;
349 		if (warned < 10 && net_ratelimit()) {
350 			warned++;
351 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
352 				__func__, current->comm, task_pid_nr(current));
353 		}
354 		return 0;
355 	}
356 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
357 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
358 		return 0;
359 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
360 		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
361 	return 0;
362 }
363 
364 static void sock_warn_obsolete_bsdism(const char *name)
365 {
366 	static int warned;
367 	static char warncomm[TASK_COMM_LEN];
368 	if (strcmp(warncomm, current->comm) && warned < 5) {
369 		strcpy(warncomm,  current->comm);
370 		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
371 			warncomm, name);
372 		warned++;
373 	}
374 }
375 
376 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
377 
378 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
379 {
380 	if (sk->sk_flags & flags) {
381 		sk->sk_flags &= ~flags;
382 		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
383 			net_disable_timestamp();
384 	}
385 }
386 
387 
388 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
389 {
390 	int err;
391 	int skb_len;
392 	unsigned long flags;
393 	struct sk_buff_head *list = &sk->sk_receive_queue;
394 
395 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
396 		atomic_inc(&sk->sk_drops);
397 		trace_sock_rcvqueue_full(sk, skb);
398 		return -ENOMEM;
399 	}
400 
401 	err = sk_filter(sk, skb);
402 	if (err)
403 		return err;
404 
405 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
406 		atomic_inc(&sk->sk_drops);
407 		return -ENOBUFS;
408 	}
409 
410 	skb->dev = NULL;
411 	skb_set_owner_r(skb, sk);
412 
413 	/* Cache the SKB length before we tack it onto the receive
414 	 * queue.  Once it is added it no longer belongs to us and
415 	 * may be freed by other threads of control pulling packets
416 	 * from the queue.
417 	 */
418 	skb_len = skb->len;
419 
420 	/* we escape from rcu protected region, make sure we dont leak
421 	 * a norefcounted dst
422 	 */
423 	skb_dst_force(skb);
424 
425 	spin_lock_irqsave(&list->lock, flags);
426 	skb->dropcount = atomic_read(&sk->sk_drops);
427 	__skb_queue_tail(list, skb);
428 	spin_unlock_irqrestore(&list->lock, flags);
429 
430 	if (!sock_flag(sk, SOCK_DEAD))
431 		sk->sk_data_ready(sk, skb_len);
432 	return 0;
433 }
434 EXPORT_SYMBOL(sock_queue_rcv_skb);
435 
436 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
437 {
438 	int rc = NET_RX_SUCCESS;
439 
440 	if (sk_filter(sk, skb))
441 		goto discard_and_relse;
442 
443 	skb->dev = NULL;
444 
445 	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
446 		atomic_inc(&sk->sk_drops);
447 		goto discard_and_relse;
448 	}
449 	if (nested)
450 		bh_lock_sock_nested(sk);
451 	else
452 		bh_lock_sock(sk);
453 	if (!sock_owned_by_user(sk)) {
454 		/*
455 		 * trylock + unlock semantics:
456 		 */
457 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
458 
459 		rc = sk_backlog_rcv(sk, skb);
460 
461 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
462 	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
463 		bh_unlock_sock(sk);
464 		atomic_inc(&sk->sk_drops);
465 		goto discard_and_relse;
466 	}
467 
468 	bh_unlock_sock(sk);
469 out:
470 	sock_put(sk);
471 	return rc;
472 discard_and_relse:
473 	kfree_skb(skb);
474 	goto out;
475 }
476 EXPORT_SYMBOL(sk_receive_skb);
477 
478 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
479 {
480 	struct dst_entry *dst = __sk_dst_get(sk);
481 
482 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
483 		sk_tx_queue_clear(sk);
484 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
485 		dst_release(dst);
486 		return NULL;
487 	}
488 
489 	return dst;
490 }
491 EXPORT_SYMBOL(__sk_dst_check);
492 
493 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
494 {
495 	struct dst_entry *dst = sk_dst_get(sk);
496 
497 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
498 		sk_dst_reset(sk);
499 		dst_release(dst);
500 		return NULL;
501 	}
502 
503 	return dst;
504 }
505 EXPORT_SYMBOL(sk_dst_check);
506 
507 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
508 				int optlen)
509 {
510 	int ret = -ENOPROTOOPT;
511 #ifdef CONFIG_NETDEVICES
512 	struct net *net = sock_net(sk);
513 	char devname[IFNAMSIZ];
514 	int index;
515 
516 	/* Sorry... */
517 	ret = -EPERM;
518 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
519 		goto out;
520 
521 	ret = -EINVAL;
522 	if (optlen < 0)
523 		goto out;
524 
525 	/* Bind this socket to a particular device like "eth0",
526 	 * as specified in the passed interface name. If the
527 	 * name is "" or the option length is zero the socket
528 	 * is not bound.
529 	 */
530 	if (optlen > IFNAMSIZ - 1)
531 		optlen = IFNAMSIZ - 1;
532 	memset(devname, 0, sizeof(devname));
533 
534 	ret = -EFAULT;
535 	if (copy_from_user(devname, optval, optlen))
536 		goto out;
537 
538 	index = 0;
539 	if (devname[0] != '\0') {
540 		struct net_device *dev;
541 
542 		rcu_read_lock();
543 		dev = dev_get_by_name_rcu(net, devname);
544 		if (dev)
545 			index = dev->ifindex;
546 		rcu_read_unlock();
547 		ret = -ENODEV;
548 		if (!dev)
549 			goto out;
550 	}
551 
552 	lock_sock(sk);
553 	sk->sk_bound_dev_if = index;
554 	sk_dst_reset(sk);
555 	release_sock(sk);
556 
557 	ret = 0;
558 
559 out:
560 #endif
561 
562 	return ret;
563 }
564 
565 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
566 				int __user *optlen, int len)
567 {
568 	int ret = -ENOPROTOOPT;
569 #ifdef CONFIG_NETDEVICES
570 	struct net *net = sock_net(sk);
571 	char devname[IFNAMSIZ];
572 
573 	if (sk->sk_bound_dev_if == 0) {
574 		len = 0;
575 		goto zero;
576 	}
577 
578 	ret = -EINVAL;
579 	if (len < IFNAMSIZ)
580 		goto out;
581 
582 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
583 	if (ret)
584 		goto out;
585 
586 	len = strlen(devname) + 1;
587 
588 	ret = -EFAULT;
589 	if (copy_to_user(optval, devname, len))
590 		goto out;
591 
592 zero:
593 	ret = -EFAULT;
594 	if (put_user(len, optlen))
595 		goto out;
596 
597 	ret = 0;
598 
599 out:
600 #endif
601 
602 	return ret;
603 }
604 
605 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
606 {
607 	if (valbool)
608 		sock_set_flag(sk, bit);
609 	else
610 		sock_reset_flag(sk, bit);
611 }
612 
613 /*
614  *	This is meant for all protocols to use and covers goings on
615  *	at the socket level. Everything here is generic.
616  */
617 
618 int sock_setsockopt(struct socket *sock, int level, int optname,
619 		    char __user *optval, unsigned int optlen)
620 {
621 	struct sock *sk = sock->sk;
622 	int val;
623 	int valbool;
624 	struct linger ling;
625 	int ret = 0;
626 
627 	/*
628 	 *	Options without arguments
629 	 */
630 
631 	if (optname == SO_BINDTODEVICE)
632 		return sock_setbindtodevice(sk, optval, optlen);
633 
634 	if (optlen < sizeof(int))
635 		return -EINVAL;
636 
637 	if (get_user(val, (int __user *)optval))
638 		return -EFAULT;
639 
640 	valbool = val ? 1 : 0;
641 
642 	lock_sock(sk);
643 
644 	switch (optname) {
645 	case SO_DEBUG:
646 		if (val && !capable(CAP_NET_ADMIN))
647 			ret = -EACCES;
648 		else
649 			sock_valbool_flag(sk, SOCK_DBG, valbool);
650 		break;
651 	case SO_REUSEADDR:
652 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
653 		break;
654 	case SO_REUSEPORT:
655 		sk->sk_reuseport = valbool;
656 		break;
657 	case SO_TYPE:
658 	case SO_PROTOCOL:
659 	case SO_DOMAIN:
660 	case SO_ERROR:
661 		ret = -ENOPROTOOPT;
662 		break;
663 	case SO_DONTROUTE:
664 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
665 		break;
666 	case SO_BROADCAST:
667 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
668 		break;
669 	case SO_SNDBUF:
670 		/* Don't error on this BSD doesn't and if you think
671 		 * about it this is right. Otherwise apps have to
672 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
673 		 * are treated in BSD as hints
674 		 */
675 		val = min_t(u32, val, sysctl_wmem_max);
676 set_sndbuf:
677 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
678 		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
679 		/* Wake up sending tasks if we upped the value. */
680 		sk->sk_write_space(sk);
681 		break;
682 
683 	case SO_SNDBUFFORCE:
684 		if (!capable(CAP_NET_ADMIN)) {
685 			ret = -EPERM;
686 			break;
687 		}
688 		goto set_sndbuf;
689 
690 	case SO_RCVBUF:
691 		/* Don't error on this BSD doesn't and if you think
692 		 * about it this is right. Otherwise apps have to
693 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
694 		 * are treated in BSD as hints
695 		 */
696 		val = min_t(u32, val, sysctl_rmem_max);
697 set_rcvbuf:
698 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
699 		/*
700 		 * We double it on the way in to account for
701 		 * "struct sk_buff" etc. overhead.   Applications
702 		 * assume that the SO_RCVBUF setting they make will
703 		 * allow that much actual data to be received on that
704 		 * socket.
705 		 *
706 		 * Applications are unaware that "struct sk_buff" and
707 		 * other overheads allocate from the receive buffer
708 		 * during socket buffer allocation.
709 		 *
710 		 * And after considering the possible alternatives,
711 		 * returning the value we actually used in getsockopt
712 		 * is the most desirable behavior.
713 		 */
714 		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
715 		break;
716 
717 	case SO_RCVBUFFORCE:
718 		if (!capable(CAP_NET_ADMIN)) {
719 			ret = -EPERM;
720 			break;
721 		}
722 		goto set_rcvbuf;
723 
724 	case SO_KEEPALIVE:
725 #ifdef CONFIG_INET
726 		if (sk->sk_protocol == IPPROTO_TCP &&
727 		    sk->sk_type == SOCK_STREAM)
728 			tcp_set_keepalive(sk, valbool);
729 #endif
730 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
731 		break;
732 
733 	case SO_OOBINLINE:
734 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
735 		break;
736 
737 	case SO_NO_CHECK:
738 		sk->sk_no_check = valbool;
739 		break;
740 
741 	case SO_PRIORITY:
742 		if ((val >= 0 && val <= 6) ||
743 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
744 			sk->sk_priority = val;
745 		else
746 			ret = -EPERM;
747 		break;
748 
749 	case SO_LINGER:
750 		if (optlen < sizeof(ling)) {
751 			ret = -EINVAL;	/* 1003.1g */
752 			break;
753 		}
754 		if (copy_from_user(&ling, optval, sizeof(ling))) {
755 			ret = -EFAULT;
756 			break;
757 		}
758 		if (!ling.l_onoff)
759 			sock_reset_flag(sk, SOCK_LINGER);
760 		else {
761 #if (BITS_PER_LONG == 32)
762 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
763 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
764 			else
765 #endif
766 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
767 			sock_set_flag(sk, SOCK_LINGER);
768 		}
769 		break;
770 
771 	case SO_BSDCOMPAT:
772 		sock_warn_obsolete_bsdism("setsockopt");
773 		break;
774 
775 	case SO_PASSCRED:
776 		if (valbool)
777 			set_bit(SOCK_PASSCRED, &sock->flags);
778 		else
779 			clear_bit(SOCK_PASSCRED, &sock->flags);
780 		break;
781 
782 	case SO_TIMESTAMP:
783 	case SO_TIMESTAMPNS:
784 		if (valbool)  {
785 			if (optname == SO_TIMESTAMP)
786 				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
787 			else
788 				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
789 			sock_set_flag(sk, SOCK_RCVTSTAMP);
790 			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
791 		} else {
792 			sock_reset_flag(sk, SOCK_RCVTSTAMP);
793 			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
794 		}
795 		break;
796 
797 	case SO_TIMESTAMPING:
798 		if (val & ~SOF_TIMESTAMPING_MASK) {
799 			ret = -EINVAL;
800 			break;
801 		}
802 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
803 				  val & SOF_TIMESTAMPING_TX_HARDWARE);
804 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
805 				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
806 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
807 				  val & SOF_TIMESTAMPING_RX_HARDWARE);
808 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
809 			sock_enable_timestamp(sk,
810 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
811 		else
812 			sock_disable_timestamp(sk,
813 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
814 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
815 				  val & SOF_TIMESTAMPING_SOFTWARE);
816 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
817 				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
818 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
819 				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
820 		break;
821 
822 	case SO_RCVLOWAT:
823 		if (val < 0)
824 			val = INT_MAX;
825 		sk->sk_rcvlowat = val ? : 1;
826 		break;
827 
828 	case SO_RCVTIMEO:
829 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
830 		break;
831 
832 	case SO_SNDTIMEO:
833 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
834 		break;
835 
836 	case SO_ATTACH_FILTER:
837 		ret = -EINVAL;
838 		if (optlen == sizeof(struct sock_fprog)) {
839 			struct sock_fprog fprog;
840 
841 			ret = -EFAULT;
842 			if (copy_from_user(&fprog, optval, sizeof(fprog)))
843 				break;
844 
845 			ret = sk_attach_filter(&fprog, sk);
846 		}
847 		break;
848 
849 	case SO_DETACH_FILTER:
850 		ret = sk_detach_filter(sk);
851 		break;
852 
853 	case SO_LOCK_FILTER:
854 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
855 			ret = -EPERM;
856 		else
857 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
858 		break;
859 
860 	case SO_PASSSEC:
861 		if (valbool)
862 			set_bit(SOCK_PASSSEC, &sock->flags);
863 		else
864 			clear_bit(SOCK_PASSSEC, &sock->flags);
865 		break;
866 	case SO_MARK:
867 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
868 			ret = -EPERM;
869 		else
870 			sk->sk_mark = val;
871 		break;
872 
873 		/* We implement the SO_SNDLOWAT etc to
874 		   not be settable (1003.1g 5.3) */
875 	case SO_RXQ_OVFL:
876 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
877 		break;
878 
879 	case SO_WIFI_STATUS:
880 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
881 		break;
882 
883 	case SO_PEEK_OFF:
884 		if (sock->ops->set_peek_off)
885 			ret = sock->ops->set_peek_off(sk, val);
886 		else
887 			ret = -EOPNOTSUPP;
888 		break;
889 
890 	case SO_NOFCS:
891 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
892 		break;
893 
894 	case SO_SELECT_ERR_QUEUE:
895 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
896 		break;
897 
898 #ifdef CONFIG_NET_RX_BUSY_POLL
899 	case SO_BUSY_POLL:
900 		/* allow unprivileged users to decrease the value */
901 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
902 			ret = -EPERM;
903 		else {
904 			if (val < 0)
905 				ret = -EINVAL;
906 			else
907 				sk->sk_ll_usec = val;
908 		}
909 		break;
910 #endif
911 
912 	case SO_MAX_PACING_RATE:
913 		sk->sk_max_pacing_rate = val;
914 		sk->sk_pacing_rate = min(sk->sk_pacing_rate,
915 					 sk->sk_max_pacing_rate);
916 		break;
917 
918 	default:
919 		ret = -ENOPROTOOPT;
920 		break;
921 	}
922 	release_sock(sk);
923 	return ret;
924 }
925 EXPORT_SYMBOL(sock_setsockopt);
926 
927 
928 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
929 			  struct ucred *ucred)
930 {
931 	ucred->pid = pid_vnr(pid);
932 	ucred->uid = ucred->gid = -1;
933 	if (cred) {
934 		struct user_namespace *current_ns = current_user_ns();
935 
936 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
937 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
938 	}
939 }
940 
941 int sock_getsockopt(struct socket *sock, int level, int optname,
942 		    char __user *optval, int __user *optlen)
943 {
944 	struct sock *sk = sock->sk;
945 
946 	union {
947 		int val;
948 		struct linger ling;
949 		struct timeval tm;
950 	} v;
951 
952 	int lv = sizeof(int);
953 	int len;
954 
955 	if (get_user(len, optlen))
956 		return -EFAULT;
957 	if (len < 0)
958 		return -EINVAL;
959 
960 	memset(&v, 0, sizeof(v));
961 
962 	switch (optname) {
963 	case SO_DEBUG:
964 		v.val = sock_flag(sk, SOCK_DBG);
965 		break;
966 
967 	case SO_DONTROUTE:
968 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
969 		break;
970 
971 	case SO_BROADCAST:
972 		v.val = sock_flag(sk, SOCK_BROADCAST);
973 		break;
974 
975 	case SO_SNDBUF:
976 		v.val = sk->sk_sndbuf;
977 		break;
978 
979 	case SO_RCVBUF:
980 		v.val = sk->sk_rcvbuf;
981 		break;
982 
983 	case SO_REUSEADDR:
984 		v.val = sk->sk_reuse;
985 		break;
986 
987 	case SO_REUSEPORT:
988 		v.val = sk->sk_reuseport;
989 		break;
990 
991 	case SO_KEEPALIVE:
992 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
993 		break;
994 
995 	case SO_TYPE:
996 		v.val = sk->sk_type;
997 		break;
998 
999 	case SO_PROTOCOL:
1000 		v.val = sk->sk_protocol;
1001 		break;
1002 
1003 	case SO_DOMAIN:
1004 		v.val = sk->sk_family;
1005 		break;
1006 
1007 	case SO_ERROR:
1008 		v.val = -sock_error(sk);
1009 		if (v.val == 0)
1010 			v.val = xchg(&sk->sk_err_soft, 0);
1011 		break;
1012 
1013 	case SO_OOBINLINE:
1014 		v.val = sock_flag(sk, SOCK_URGINLINE);
1015 		break;
1016 
1017 	case SO_NO_CHECK:
1018 		v.val = sk->sk_no_check;
1019 		break;
1020 
1021 	case SO_PRIORITY:
1022 		v.val = sk->sk_priority;
1023 		break;
1024 
1025 	case SO_LINGER:
1026 		lv		= sizeof(v.ling);
1027 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1028 		v.ling.l_linger	= sk->sk_lingertime / HZ;
1029 		break;
1030 
1031 	case SO_BSDCOMPAT:
1032 		sock_warn_obsolete_bsdism("getsockopt");
1033 		break;
1034 
1035 	case SO_TIMESTAMP:
1036 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1037 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1038 		break;
1039 
1040 	case SO_TIMESTAMPNS:
1041 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1042 		break;
1043 
1044 	case SO_TIMESTAMPING:
1045 		v.val = 0;
1046 		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1047 			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1048 		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1049 			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1050 		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1051 			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1052 		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1053 			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1054 		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1055 			v.val |= SOF_TIMESTAMPING_SOFTWARE;
1056 		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1057 			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1058 		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1059 			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1060 		break;
1061 
1062 	case SO_RCVTIMEO:
1063 		lv = sizeof(struct timeval);
1064 		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1065 			v.tm.tv_sec = 0;
1066 			v.tm.tv_usec = 0;
1067 		} else {
1068 			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1069 			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1070 		}
1071 		break;
1072 
1073 	case SO_SNDTIMEO:
1074 		lv = sizeof(struct timeval);
1075 		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1076 			v.tm.tv_sec = 0;
1077 			v.tm.tv_usec = 0;
1078 		} else {
1079 			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1080 			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1081 		}
1082 		break;
1083 
1084 	case SO_RCVLOWAT:
1085 		v.val = sk->sk_rcvlowat;
1086 		break;
1087 
1088 	case SO_SNDLOWAT:
1089 		v.val = 1;
1090 		break;
1091 
1092 	case SO_PASSCRED:
1093 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1094 		break;
1095 
1096 	case SO_PEERCRED:
1097 	{
1098 		struct ucred peercred;
1099 		if (len > sizeof(peercred))
1100 			len = sizeof(peercred);
1101 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1102 		if (copy_to_user(optval, &peercred, len))
1103 			return -EFAULT;
1104 		goto lenout;
1105 	}
1106 
1107 	case SO_PEERNAME:
1108 	{
1109 		char address[128];
1110 
1111 		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1112 			return -ENOTCONN;
1113 		if (lv < len)
1114 			return -EINVAL;
1115 		if (copy_to_user(optval, address, len))
1116 			return -EFAULT;
1117 		goto lenout;
1118 	}
1119 
1120 	/* Dubious BSD thing... Probably nobody even uses it, but
1121 	 * the UNIX standard wants it for whatever reason... -DaveM
1122 	 */
1123 	case SO_ACCEPTCONN:
1124 		v.val = sk->sk_state == TCP_LISTEN;
1125 		break;
1126 
1127 	case SO_PASSSEC:
1128 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1129 		break;
1130 
1131 	case SO_PEERSEC:
1132 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1133 
1134 	case SO_MARK:
1135 		v.val = sk->sk_mark;
1136 		break;
1137 
1138 	case SO_RXQ_OVFL:
1139 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1140 		break;
1141 
1142 	case SO_WIFI_STATUS:
1143 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1144 		break;
1145 
1146 	case SO_PEEK_OFF:
1147 		if (!sock->ops->set_peek_off)
1148 			return -EOPNOTSUPP;
1149 
1150 		v.val = sk->sk_peek_off;
1151 		break;
1152 	case SO_NOFCS:
1153 		v.val = sock_flag(sk, SOCK_NOFCS);
1154 		break;
1155 
1156 	case SO_BINDTODEVICE:
1157 		return sock_getbindtodevice(sk, optval, optlen, len);
1158 
1159 	case SO_GET_FILTER:
1160 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1161 		if (len < 0)
1162 			return len;
1163 
1164 		goto lenout;
1165 
1166 	case SO_LOCK_FILTER:
1167 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1168 		break;
1169 
1170 	case SO_BPF_EXTENSIONS:
1171 		v.val = bpf_tell_extensions();
1172 		break;
1173 
1174 	case SO_SELECT_ERR_QUEUE:
1175 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1176 		break;
1177 
1178 #ifdef CONFIG_NET_RX_BUSY_POLL
1179 	case SO_BUSY_POLL:
1180 		v.val = sk->sk_ll_usec;
1181 		break;
1182 #endif
1183 
1184 	case SO_MAX_PACING_RATE:
1185 		v.val = sk->sk_max_pacing_rate;
1186 		break;
1187 
1188 	default:
1189 		return -ENOPROTOOPT;
1190 	}
1191 
1192 	if (len > lv)
1193 		len = lv;
1194 	if (copy_to_user(optval, &v, len))
1195 		return -EFAULT;
1196 lenout:
1197 	if (put_user(len, optlen))
1198 		return -EFAULT;
1199 	return 0;
1200 }
1201 
1202 /*
1203  * Initialize an sk_lock.
1204  *
1205  * (We also register the sk_lock with the lock validator.)
1206  */
1207 static inline void sock_lock_init(struct sock *sk)
1208 {
1209 	sock_lock_init_class_and_name(sk,
1210 			af_family_slock_key_strings[sk->sk_family],
1211 			af_family_slock_keys + sk->sk_family,
1212 			af_family_key_strings[sk->sk_family],
1213 			af_family_keys + sk->sk_family);
1214 }
1215 
1216 /*
1217  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1218  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1219  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1220  */
1221 static void sock_copy(struct sock *nsk, const struct sock *osk)
1222 {
1223 #ifdef CONFIG_SECURITY_NETWORK
1224 	void *sptr = nsk->sk_security;
1225 #endif
1226 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1227 
1228 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1229 	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1230 
1231 #ifdef CONFIG_SECURITY_NETWORK
1232 	nsk->sk_security = sptr;
1233 	security_sk_clone(osk, nsk);
1234 #endif
1235 }
1236 
1237 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1238 {
1239 	unsigned long nulls1, nulls2;
1240 
1241 	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1242 	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1243 	if (nulls1 > nulls2)
1244 		swap(nulls1, nulls2);
1245 
1246 	if (nulls1 != 0)
1247 		memset((char *)sk, 0, nulls1);
1248 	memset((char *)sk + nulls1 + sizeof(void *), 0,
1249 	       nulls2 - nulls1 - sizeof(void *));
1250 	memset((char *)sk + nulls2 + sizeof(void *), 0,
1251 	       size - nulls2 - sizeof(void *));
1252 }
1253 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1254 
1255 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1256 		int family)
1257 {
1258 	struct sock *sk;
1259 	struct kmem_cache *slab;
1260 
1261 	slab = prot->slab;
1262 	if (slab != NULL) {
1263 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1264 		if (!sk)
1265 			return sk;
1266 		if (priority & __GFP_ZERO) {
1267 			if (prot->clear_sk)
1268 				prot->clear_sk(sk, prot->obj_size);
1269 			else
1270 				sk_prot_clear_nulls(sk, prot->obj_size);
1271 		}
1272 	} else
1273 		sk = kmalloc(prot->obj_size, priority);
1274 
1275 	if (sk != NULL) {
1276 		kmemcheck_annotate_bitfield(sk, flags);
1277 
1278 		if (security_sk_alloc(sk, family, priority))
1279 			goto out_free;
1280 
1281 		if (!try_module_get(prot->owner))
1282 			goto out_free_sec;
1283 		sk_tx_queue_clear(sk);
1284 	}
1285 
1286 	return sk;
1287 
1288 out_free_sec:
1289 	security_sk_free(sk);
1290 out_free:
1291 	if (slab != NULL)
1292 		kmem_cache_free(slab, sk);
1293 	else
1294 		kfree(sk);
1295 	return NULL;
1296 }
1297 
1298 static void sk_prot_free(struct proto *prot, struct sock *sk)
1299 {
1300 	struct kmem_cache *slab;
1301 	struct module *owner;
1302 
1303 	owner = prot->owner;
1304 	slab = prot->slab;
1305 
1306 	security_sk_free(sk);
1307 	if (slab != NULL)
1308 		kmem_cache_free(slab, sk);
1309 	else
1310 		kfree(sk);
1311 	module_put(owner);
1312 }
1313 
1314 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1315 void sock_update_netprioidx(struct sock *sk)
1316 {
1317 	if (in_interrupt())
1318 		return;
1319 
1320 	sk->sk_cgrp_prioidx = task_netprioidx(current);
1321 }
1322 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1323 #endif
1324 
1325 /**
1326  *	sk_alloc - All socket objects are allocated here
1327  *	@net: the applicable net namespace
1328  *	@family: protocol family
1329  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1330  *	@prot: struct proto associated with this new sock instance
1331  */
1332 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1333 		      struct proto *prot)
1334 {
1335 	struct sock *sk;
1336 
1337 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1338 	if (sk) {
1339 		sk->sk_family = family;
1340 		/*
1341 		 * See comment in struct sock definition to understand
1342 		 * why we need sk_prot_creator -acme
1343 		 */
1344 		sk->sk_prot = sk->sk_prot_creator = prot;
1345 		sock_lock_init(sk);
1346 		sock_net_set(sk, get_net(net));
1347 		atomic_set(&sk->sk_wmem_alloc, 1);
1348 
1349 		sock_update_classid(sk);
1350 		sock_update_netprioidx(sk);
1351 	}
1352 
1353 	return sk;
1354 }
1355 EXPORT_SYMBOL(sk_alloc);
1356 
1357 static void __sk_free(struct sock *sk)
1358 {
1359 	struct sk_filter *filter;
1360 
1361 	if (sk->sk_destruct)
1362 		sk->sk_destruct(sk);
1363 
1364 	filter = rcu_dereference_check(sk->sk_filter,
1365 				       atomic_read(&sk->sk_wmem_alloc) == 0);
1366 	if (filter) {
1367 		sk_filter_uncharge(sk, filter);
1368 		RCU_INIT_POINTER(sk->sk_filter, NULL);
1369 	}
1370 
1371 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1372 
1373 	if (atomic_read(&sk->sk_omem_alloc))
1374 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1375 			 __func__, atomic_read(&sk->sk_omem_alloc));
1376 
1377 	if (sk->sk_peer_cred)
1378 		put_cred(sk->sk_peer_cred);
1379 	put_pid(sk->sk_peer_pid);
1380 	put_net(sock_net(sk));
1381 	sk_prot_free(sk->sk_prot_creator, sk);
1382 }
1383 
1384 void sk_free(struct sock *sk)
1385 {
1386 	/*
1387 	 * We subtract one from sk_wmem_alloc and can know if
1388 	 * some packets are still in some tx queue.
1389 	 * If not null, sock_wfree() will call __sk_free(sk) later
1390 	 */
1391 	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1392 		__sk_free(sk);
1393 }
1394 EXPORT_SYMBOL(sk_free);
1395 
1396 /*
1397  * Last sock_put should drop reference to sk->sk_net. It has already
1398  * been dropped in sk_change_net. Taking reference to stopping namespace
1399  * is not an option.
1400  * Take reference to a socket to remove it from hash _alive_ and after that
1401  * destroy it in the context of init_net.
1402  */
1403 void sk_release_kernel(struct sock *sk)
1404 {
1405 	if (sk == NULL || sk->sk_socket == NULL)
1406 		return;
1407 
1408 	sock_hold(sk);
1409 	sock_release(sk->sk_socket);
1410 	release_net(sock_net(sk));
1411 	sock_net_set(sk, get_net(&init_net));
1412 	sock_put(sk);
1413 }
1414 EXPORT_SYMBOL(sk_release_kernel);
1415 
1416 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1417 {
1418 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1419 		sock_update_memcg(newsk);
1420 }
1421 
1422 /**
1423  *	sk_clone_lock - clone a socket, and lock its clone
1424  *	@sk: the socket to clone
1425  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1426  *
1427  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1428  */
1429 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1430 {
1431 	struct sock *newsk;
1432 
1433 	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1434 	if (newsk != NULL) {
1435 		struct sk_filter *filter;
1436 
1437 		sock_copy(newsk, sk);
1438 
1439 		/* SANITY */
1440 		get_net(sock_net(newsk));
1441 		sk_node_init(&newsk->sk_node);
1442 		sock_lock_init(newsk);
1443 		bh_lock_sock(newsk);
1444 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1445 		newsk->sk_backlog.len = 0;
1446 
1447 		atomic_set(&newsk->sk_rmem_alloc, 0);
1448 		/*
1449 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1450 		 */
1451 		atomic_set(&newsk->sk_wmem_alloc, 1);
1452 		atomic_set(&newsk->sk_omem_alloc, 0);
1453 		skb_queue_head_init(&newsk->sk_receive_queue);
1454 		skb_queue_head_init(&newsk->sk_write_queue);
1455 #ifdef CONFIG_NET_DMA
1456 		skb_queue_head_init(&newsk->sk_async_wait_queue);
1457 #endif
1458 
1459 		spin_lock_init(&newsk->sk_dst_lock);
1460 		rwlock_init(&newsk->sk_callback_lock);
1461 		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1462 				af_callback_keys + newsk->sk_family,
1463 				af_family_clock_key_strings[newsk->sk_family]);
1464 
1465 		newsk->sk_dst_cache	= NULL;
1466 		newsk->sk_wmem_queued	= 0;
1467 		newsk->sk_forward_alloc = 0;
1468 		newsk->sk_send_head	= NULL;
1469 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1470 
1471 		sock_reset_flag(newsk, SOCK_DONE);
1472 		skb_queue_head_init(&newsk->sk_error_queue);
1473 
1474 		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1475 		if (filter != NULL)
1476 			sk_filter_charge(newsk, filter);
1477 
1478 		if (unlikely(xfrm_sk_clone_policy(newsk))) {
1479 			/* It is still raw copy of parent, so invalidate
1480 			 * destructor and make plain sk_free() */
1481 			newsk->sk_destruct = NULL;
1482 			bh_unlock_sock(newsk);
1483 			sk_free(newsk);
1484 			newsk = NULL;
1485 			goto out;
1486 		}
1487 
1488 		newsk->sk_err	   = 0;
1489 		newsk->sk_priority = 0;
1490 		/*
1491 		 * Before updating sk_refcnt, we must commit prior changes to memory
1492 		 * (Documentation/RCU/rculist_nulls.txt for details)
1493 		 */
1494 		smp_wmb();
1495 		atomic_set(&newsk->sk_refcnt, 2);
1496 
1497 		/*
1498 		 * Increment the counter in the same struct proto as the master
1499 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1500 		 * is the same as sk->sk_prot->socks, as this field was copied
1501 		 * with memcpy).
1502 		 *
1503 		 * This _changes_ the previous behaviour, where
1504 		 * tcp_create_openreq_child always was incrementing the
1505 		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1506 		 * to be taken into account in all callers. -acme
1507 		 */
1508 		sk_refcnt_debug_inc(newsk);
1509 		sk_set_socket(newsk, NULL);
1510 		newsk->sk_wq = NULL;
1511 
1512 		sk_update_clone(sk, newsk);
1513 
1514 		if (newsk->sk_prot->sockets_allocated)
1515 			sk_sockets_allocated_inc(newsk);
1516 
1517 		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1518 			net_enable_timestamp();
1519 	}
1520 out:
1521 	return newsk;
1522 }
1523 EXPORT_SYMBOL_GPL(sk_clone_lock);
1524 
1525 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1526 {
1527 	__sk_dst_set(sk, dst);
1528 	sk->sk_route_caps = dst->dev->features;
1529 	if (sk->sk_route_caps & NETIF_F_GSO)
1530 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1531 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1532 	if (sk_can_gso(sk)) {
1533 		if (dst->header_len) {
1534 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1535 		} else {
1536 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1537 			sk->sk_gso_max_size = dst->dev->gso_max_size;
1538 			sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1539 		}
1540 	}
1541 }
1542 EXPORT_SYMBOL_GPL(sk_setup_caps);
1543 
1544 /*
1545  *	Simple resource managers for sockets.
1546  */
1547 
1548 
1549 /*
1550  * Write buffer destructor automatically called from kfree_skb.
1551  */
1552 void sock_wfree(struct sk_buff *skb)
1553 {
1554 	struct sock *sk = skb->sk;
1555 	unsigned int len = skb->truesize;
1556 
1557 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1558 		/*
1559 		 * Keep a reference on sk_wmem_alloc, this will be released
1560 		 * after sk_write_space() call
1561 		 */
1562 		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1563 		sk->sk_write_space(sk);
1564 		len = 1;
1565 	}
1566 	/*
1567 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1568 	 * could not do because of in-flight packets
1569 	 */
1570 	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1571 		__sk_free(sk);
1572 }
1573 EXPORT_SYMBOL(sock_wfree);
1574 
1575 void skb_orphan_partial(struct sk_buff *skb)
1576 {
1577 	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1578 	 * so we do not completely orphan skb, but transfert all
1579 	 * accounted bytes but one, to avoid unexpected reorders.
1580 	 */
1581 	if (skb->destructor == sock_wfree
1582 #ifdef CONFIG_INET
1583 	    || skb->destructor == tcp_wfree
1584 #endif
1585 		) {
1586 		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1587 		skb->truesize = 1;
1588 	} else {
1589 		skb_orphan(skb);
1590 	}
1591 }
1592 EXPORT_SYMBOL(skb_orphan_partial);
1593 
1594 /*
1595  * Read buffer destructor automatically called from kfree_skb.
1596  */
1597 void sock_rfree(struct sk_buff *skb)
1598 {
1599 	struct sock *sk = skb->sk;
1600 	unsigned int len = skb->truesize;
1601 
1602 	atomic_sub(len, &sk->sk_rmem_alloc);
1603 	sk_mem_uncharge(sk, len);
1604 }
1605 EXPORT_SYMBOL(sock_rfree);
1606 
1607 void sock_edemux(struct sk_buff *skb)
1608 {
1609 	struct sock *sk = skb->sk;
1610 
1611 #ifdef CONFIG_INET
1612 	if (sk->sk_state == TCP_TIME_WAIT)
1613 		inet_twsk_put(inet_twsk(sk));
1614 	else
1615 #endif
1616 		sock_put(sk);
1617 }
1618 EXPORT_SYMBOL(sock_edemux);
1619 
1620 kuid_t sock_i_uid(struct sock *sk)
1621 {
1622 	kuid_t uid;
1623 
1624 	read_lock_bh(&sk->sk_callback_lock);
1625 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1626 	read_unlock_bh(&sk->sk_callback_lock);
1627 	return uid;
1628 }
1629 EXPORT_SYMBOL(sock_i_uid);
1630 
1631 unsigned long sock_i_ino(struct sock *sk)
1632 {
1633 	unsigned long ino;
1634 
1635 	read_lock_bh(&sk->sk_callback_lock);
1636 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1637 	read_unlock_bh(&sk->sk_callback_lock);
1638 	return ino;
1639 }
1640 EXPORT_SYMBOL(sock_i_ino);
1641 
1642 /*
1643  * Allocate a skb from the socket's send buffer.
1644  */
1645 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1646 			     gfp_t priority)
1647 {
1648 	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1649 		struct sk_buff *skb = alloc_skb(size, priority);
1650 		if (skb) {
1651 			skb_set_owner_w(skb, sk);
1652 			return skb;
1653 		}
1654 	}
1655 	return NULL;
1656 }
1657 EXPORT_SYMBOL(sock_wmalloc);
1658 
1659 /*
1660  * Allocate a memory block from the socket's option memory buffer.
1661  */
1662 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1663 {
1664 	if ((unsigned int)size <= sysctl_optmem_max &&
1665 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1666 		void *mem;
1667 		/* First do the add, to avoid the race if kmalloc
1668 		 * might sleep.
1669 		 */
1670 		atomic_add(size, &sk->sk_omem_alloc);
1671 		mem = kmalloc(size, priority);
1672 		if (mem)
1673 			return mem;
1674 		atomic_sub(size, &sk->sk_omem_alloc);
1675 	}
1676 	return NULL;
1677 }
1678 EXPORT_SYMBOL(sock_kmalloc);
1679 
1680 /*
1681  * Free an option memory block.
1682  */
1683 void sock_kfree_s(struct sock *sk, void *mem, int size)
1684 {
1685 	kfree(mem);
1686 	atomic_sub(size, &sk->sk_omem_alloc);
1687 }
1688 EXPORT_SYMBOL(sock_kfree_s);
1689 
1690 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1691    I think, these locks should be removed for datagram sockets.
1692  */
1693 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1694 {
1695 	DEFINE_WAIT(wait);
1696 
1697 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1698 	for (;;) {
1699 		if (!timeo)
1700 			break;
1701 		if (signal_pending(current))
1702 			break;
1703 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1704 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1705 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1706 			break;
1707 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1708 			break;
1709 		if (sk->sk_err)
1710 			break;
1711 		timeo = schedule_timeout(timeo);
1712 	}
1713 	finish_wait(sk_sleep(sk), &wait);
1714 	return timeo;
1715 }
1716 
1717 
1718 /*
1719  *	Generic send/receive buffer handlers
1720  */
1721 
1722 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1723 				     unsigned long data_len, int noblock,
1724 				     int *errcode, int max_page_order)
1725 {
1726 	struct sk_buff *skb = NULL;
1727 	unsigned long chunk;
1728 	gfp_t gfp_mask;
1729 	long timeo;
1730 	int err;
1731 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1732 	struct page *page;
1733 	int i;
1734 
1735 	err = -EMSGSIZE;
1736 	if (npages > MAX_SKB_FRAGS)
1737 		goto failure;
1738 
1739 	timeo = sock_sndtimeo(sk, noblock);
1740 	while (!skb) {
1741 		err = sock_error(sk);
1742 		if (err != 0)
1743 			goto failure;
1744 
1745 		err = -EPIPE;
1746 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1747 			goto failure;
1748 
1749 		if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
1750 			set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1751 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1752 			err = -EAGAIN;
1753 			if (!timeo)
1754 				goto failure;
1755 			if (signal_pending(current))
1756 				goto interrupted;
1757 			timeo = sock_wait_for_wmem(sk, timeo);
1758 			continue;
1759 		}
1760 
1761 		err = -ENOBUFS;
1762 		gfp_mask = sk->sk_allocation;
1763 		if (gfp_mask & __GFP_WAIT)
1764 			gfp_mask |= __GFP_REPEAT;
1765 
1766 		skb = alloc_skb(header_len, gfp_mask);
1767 		if (!skb)
1768 			goto failure;
1769 
1770 		skb->truesize += data_len;
1771 
1772 		for (i = 0; npages > 0; i++) {
1773 			int order = max_page_order;
1774 
1775 			while (order) {
1776 				if (npages >= 1 << order) {
1777 					page = alloc_pages(sk->sk_allocation |
1778 							   __GFP_COMP |
1779 							   __GFP_NOWARN |
1780 							   __GFP_NORETRY,
1781 							   order);
1782 					if (page)
1783 						goto fill_page;
1784 				}
1785 				order--;
1786 			}
1787 			page = alloc_page(sk->sk_allocation);
1788 			if (!page)
1789 				goto failure;
1790 fill_page:
1791 			chunk = min_t(unsigned long, data_len,
1792 				      PAGE_SIZE << order);
1793 			skb_fill_page_desc(skb, i, page, 0, chunk);
1794 			data_len -= chunk;
1795 			npages -= 1 << order;
1796 		}
1797 	}
1798 
1799 	skb_set_owner_w(skb, sk);
1800 	return skb;
1801 
1802 interrupted:
1803 	err = sock_intr_errno(timeo);
1804 failure:
1805 	kfree_skb(skb);
1806 	*errcode = err;
1807 	return NULL;
1808 }
1809 EXPORT_SYMBOL(sock_alloc_send_pskb);
1810 
1811 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1812 				    int noblock, int *errcode)
1813 {
1814 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1815 }
1816 EXPORT_SYMBOL(sock_alloc_send_skb);
1817 
1818 /* On 32bit arches, an skb frag is limited to 2^15 */
1819 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
1820 
1821 /**
1822  * skb_page_frag_refill - check that a page_frag contains enough room
1823  * @sz: minimum size of the fragment we want to get
1824  * @pfrag: pointer to page_frag
1825  * @prio: priority for memory allocation
1826  *
1827  * Note: While this allocator tries to use high order pages, there is
1828  * no guarantee that allocations succeed. Therefore, @sz MUST be
1829  * less or equal than PAGE_SIZE.
1830  */
1831 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1832 {
1833 	int order;
1834 
1835 	if (pfrag->page) {
1836 		if (atomic_read(&pfrag->page->_count) == 1) {
1837 			pfrag->offset = 0;
1838 			return true;
1839 		}
1840 		if (pfrag->offset + sz <= pfrag->size)
1841 			return true;
1842 		put_page(pfrag->page);
1843 	}
1844 
1845 	order = SKB_FRAG_PAGE_ORDER;
1846 	do {
1847 		gfp_t gfp = prio;
1848 
1849 		if (order)
1850 			gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1851 		pfrag->page = alloc_pages(gfp, order);
1852 		if (likely(pfrag->page)) {
1853 			pfrag->offset = 0;
1854 			pfrag->size = PAGE_SIZE << order;
1855 			return true;
1856 		}
1857 	} while (--order >= 0);
1858 
1859 	return false;
1860 }
1861 EXPORT_SYMBOL(skb_page_frag_refill);
1862 
1863 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1864 {
1865 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1866 		return true;
1867 
1868 	sk_enter_memory_pressure(sk);
1869 	sk_stream_moderate_sndbuf(sk);
1870 	return false;
1871 }
1872 EXPORT_SYMBOL(sk_page_frag_refill);
1873 
1874 static void __lock_sock(struct sock *sk)
1875 	__releases(&sk->sk_lock.slock)
1876 	__acquires(&sk->sk_lock.slock)
1877 {
1878 	DEFINE_WAIT(wait);
1879 
1880 	for (;;) {
1881 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1882 					TASK_UNINTERRUPTIBLE);
1883 		spin_unlock_bh(&sk->sk_lock.slock);
1884 		schedule();
1885 		spin_lock_bh(&sk->sk_lock.slock);
1886 		if (!sock_owned_by_user(sk))
1887 			break;
1888 	}
1889 	finish_wait(&sk->sk_lock.wq, &wait);
1890 }
1891 
1892 static void __release_sock(struct sock *sk)
1893 	__releases(&sk->sk_lock.slock)
1894 	__acquires(&sk->sk_lock.slock)
1895 {
1896 	struct sk_buff *skb = sk->sk_backlog.head;
1897 
1898 	do {
1899 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1900 		bh_unlock_sock(sk);
1901 
1902 		do {
1903 			struct sk_buff *next = skb->next;
1904 
1905 			prefetch(next);
1906 			WARN_ON_ONCE(skb_dst_is_noref(skb));
1907 			skb->next = NULL;
1908 			sk_backlog_rcv(sk, skb);
1909 
1910 			/*
1911 			 * We are in process context here with softirqs
1912 			 * disabled, use cond_resched_softirq() to preempt.
1913 			 * This is safe to do because we've taken the backlog
1914 			 * queue private:
1915 			 */
1916 			cond_resched_softirq();
1917 
1918 			skb = next;
1919 		} while (skb != NULL);
1920 
1921 		bh_lock_sock(sk);
1922 	} while ((skb = sk->sk_backlog.head) != NULL);
1923 
1924 	/*
1925 	 * Doing the zeroing here guarantee we can not loop forever
1926 	 * while a wild producer attempts to flood us.
1927 	 */
1928 	sk->sk_backlog.len = 0;
1929 }
1930 
1931 /**
1932  * sk_wait_data - wait for data to arrive at sk_receive_queue
1933  * @sk:    sock to wait on
1934  * @timeo: for how long
1935  *
1936  * Now socket state including sk->sk_err is changed only under lock,
1937  * hence we may omit checks after joining wait queue.
1938  * We check receive queue before schedule() only as optimization;
1939  * it is very likely that release_sock() added new data.
1940  */
1941 int sk_wait_data(struct sock *sk, long *timeo)
1942 {
1943 	int rc;
1944 	DEFINE_WAIT(wait);
1945 
1946 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1947 	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1948 	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1949 	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1950 	finish_wait(sk_sleep(sk), &wait);
1951 	return rc;
1952 }
1953 EXPORT_SYMBOL(sk_wait_data);
1954 
1955 /**
1956  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1957  *	@sk: socket
1958  *	@size: memory size to allocate
1959  *	@kind: allocation type
1960  *
1961  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1962  *	rmem allocation. This function assumes that protocols which have
1963  *	memory_pressure use sk_wmem_queued as write buffer accounting.
1964  */
1965 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1966 {
1967 	struct proto *prot = sk->sk_prot;
1968 	int amt = sk_mem_pages(size);
1969 	long allocated;
1970 	int parent_status = UNDER_LIMIT;
1971 
1972 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1973 
1974 	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1975 
1976 	/* Under limit. */
1977 	if (parent_status == UNDER_LIMIT &&
1978 			allocated <= sk_prot_mem_limits(sk, 0)) {
1979 		sk_leave_memory_pressure(sk);
1980 		return 1;
1981 	}
1982 
1983 	/* Under pressure. (we or our parents) */
1984 	if ((parent_status > SOFT_LIMIT) ||
1985 			allocated > sk_prot_mem_limits(sk, 1))
1986 		sk_enter_memory_pressure(sk);
1987 
1988 	/* Over hard limit (we or our parents) */
1989 	if ((parent_status == OVER_LIMIT) ||
1990 			(allocated > sk_prot_mem_limits(sk, 2)))
1991 		goto suppress_allocation;
1992 
1993 	/* guarantee minimum buffer size under pressure */
1994 	if (kind == SK_MEM_RECV) {
1995 		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1996 			return 1;
1997 
1998 	} else { /* SK_MEM_SEND */
1999 		if (sk->sk_type == SOCK_STREAM) {
2000 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2001 				return 1;
2002 		} else if (atomic_read(&sk->sk_wmem_alloc) <
2003 			   prot->sysctl_wmem[0])
2004 				return 1;
2005 	}
2006 
2007 	if (sk_has_memory_pressure(sk)) {
2008 		int alloc;
2009 
2010 		if (!sk_under_memory_pressure(sk))
2011 			return 1;
2012 		alloc = sk_sockets_allocated_read_positive(sk);
2013 		if (sk_prot_mem_limits(sk, 2) > alloc *
2014 		    sk_mem_pages(sk->sk_wmem_queued +
2015 				 atomic_read(&sk->sk_rmem_alloc) +
2016 				 sk->sk_forward_alloc))
2017 			return 1;
2018 	}
2019 
2020 suppress_allocation:
2021 
2022 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2023 		sk_stream_moderate_sndbuf(sk);
2024 
2025 		/* Fail only if socket is _under_ its sndbuf.
2026 		 * In this case we cannot block, so that we have to fail.
2027 		 */
2028 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2029 			return 1;
2030 	}
2031 
2032 	trace_sock_exceed_buf_limit(sk, prot, allocated);
2033 
2034 	/* Alas. Undo changes. */
2035 	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2036 
2037 	sk_memory_allocated_sub(sk, amt);
2038 
2039 	return 0;
2040 }
2041 EXPORT_SYMBOL(__sk_mem_schedule);
2042 
2043 /**
2044  *	__sk_reclaim - reclaim memory_allocated
2045  *	@sk: socket
2046  */
2047 void __sk_mem_reclaim(struct sock *sk)
2048 {
2049 	sk_memory_allocated_sub(sk,
2050 				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
2051 	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2052 
2053 	if (sk_under_memory_pressure(sk) &&
2054 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2055 		sk_leave_memory_pressure(sk);
2056 }
2057 EXPORT_SYMBOL(__sk_mem_reclaim);
2058 
2059 
2060 /*
2061  * Set of default routines for initialising struct proto_ops when
2062  * the protocol does not support a particular function. In certain
2063  * cases where it makes no sense for a protocol to have a "do nothing"
2064  * function, some default processing is provided.
2065  */
2066 
2067 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2068 {
2069 	return -EOPNOTSUPP;
2070 }
2071 EXPORT_SYMBOL(sock_no_bind);
2072 
2073 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2074 		    int len, int flags)
2075 {
2076 	return -EOPNOTSUPP;
2077 }
2078 EXPORT_SYMBOL(sock_no_connect);
2079 
2080 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2081 {
2082 	return -EOPNOTSUPP;
2083 }
2084 EXPORT_SYMBOL(sock_no_socketpair);
2085 
2086 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2087 {
2088 	return -EOPNOTSUPP;
2089 }
2090 EXPORT_SYMBOL(sock_no_accept);
2091 
2092 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2093 		    int *len, int peer)
2094 {
2095 	return -EOPNOTSUPP;
2096 }
2097 EXPORT_SYMBOL(sock_no_getname);
2098 
2099 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2100 {
2101 	return 0;
2102 }
2103 EXPORT_SYMBOL(sock_no_poll);
2104 
2105 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2106 {
2107 	return -EOPNOTSUPP;
2108 }
2109 EXPORT_SYMBOL(sock_no_ioctl);
2110 
2111 int sock_no_listen(struct socket *sock, int backlog)
2112 {
2113 	return -EOPNOTSUPP;
2114 }
2115 EXPORT_SYMBOL(sock_no_listen);
2116 
2117 int sock_no_shutdown(struct socket *sock, int how)
2118 {
2119 	return -EOPNOTSUPP;
2120 }
2121 EXPORT_SYMBOL(sock_no_shutdown);
2122 
2123 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2124 		    char __user *optval, unsigned int optlen)
2125 {
2126 	return -EOPNOTSUPP;
2127 }
2128 EXPORT_SYMBOL(sock_no_setsockopt);
2129 
2130 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2131 		    char __user *optval, int __user *optlen)
2132 {
2133 	return -EOPNOTSUPP;
2134 }
2135 EXPORT_SYMBOL(sock_no_getsockopt);
2136 
2137 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2138 		    size_t len)
2139 {
2140 	return -EOPNOTSUPP;
2141 }
2142 EXPORT_SYMBOL(sock_no_sendmsg);
2143 
2144 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2145 		    size_t len, int flags)
2146 {
2147 	return -EOPNOTSUPP;
2148 }
2149 EXPORT_SYMBOL(sock_no_recvmsg);
2150 
2151 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2152 {
2153 	/* Mirror missing mmap method error code */
2154 	return -ENODEV;
2155 }
2156 EXPORT_SYMBOL(sock_no_mmap);
2157 
2158 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2159 {
2160 	ssize_t res;
2161 	struct msghdr msg = {.msg_flags = flags};
2162 	struct kvec iov;
2163 	char *kaddr = kmap(page);
2164 	iov.iov_base = kaddr + offset;
2165 	iov.iov_len = size;
2166 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2167 	kunmap(page);
2168 	return res;
2169 }
2170 EXPORT_SYMBOL(sock_no_sendpage);
2171 
2172 /*
2173  *	Default Socket Callbacks
2174  */
2175 
2176 static void sock_def_wakeup(struct sock *sk)
2177 {
2178 	struct socket_wq *wq;
2179 
2180 	rcu_read_lock();
2181 	wq = rcu_dereference(sk->sk_wq);
2182 	if (wq_has_sleeper(wq))
2183 		wake_up_interruptible_all(&wq->wait);
2184 	rcu_read_unlock();
2185 }
2186 
2187 static void sock_def_error_report(struct sock *sk)
2188 {
2189 	struct socket_wq *wq;
2190 
2191 	rcu_read_lock();
2192 	wq = rcu_dereference(sk->sk_wq);
2193 	if (wq_has_sleeper(wq))
2194 		wake_up_interruptible_poll(&wq->wait, POLLERR);
2195 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2196 	rcu_read_unlock();
2197 }
2198 
2199 static void sock_def_readable(struct sock *sk, int len)
2200 {
2201 	struct socket_wq *wq;
2202 
2203 	rcu_read_lock();
2204 	wq = rcu_dereference(sk->sk_wq);
2205 	if (wq_has_sleeper(wq))
2206 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2207 						POLLRDNORM | POLLRDBAND);
2208 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2209 	rcu_read_unlock();
2210 }
2211 
2212 static void sock_def_write_space(struct sock *sk)
2213 {
2214 	struct socket_wq *wq;
2215 
2216 	rcu_read_lock();
2217 
2218 	/* Do not wake up a writer until he can make "significant"
2219 	 * progress.  --DaveM
2220 	 */
2221 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2222 		wq = rcu_dereference(sk->sk_wq);
2223 		if (wq_has_sleeper(wq))
2224 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2225 						POLLWRNORM | POLLWRBAND);
2226 
2227 		/* Should agree with poll, otherwise some programs break */
2228 		if (sock_writeable(sk))
2229 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2230 	}
2231 
2232 	rcu_read_unlock();
2233 }
2234 
2235 static void sock_def_destruct(struct sock *sk)
2236 {
2237 	kfree(sk->sk_protinfo);
2238 }
2239 
2240 void sk_send_sigurg(struct sock *sk)
2241 {
2242 	if (sk->sk_socket && sk->sk_socket->file)
2243 		if (send_sigurg(&sk->sk_socket->file->f_owner))
2244 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2245 }
2246 EXPORT_SYMBOL(sk_send_sigurg);
2247 
2248 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2249 		    unsigned long expires)
2250 {
2251 	if (!mod_timer(timer, expires))
2252 		sock_hold(sk);
2253 }
2254 EXPORT_SYMBOL(sk_reset_timer);
2255 
2256 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2257 {
2258 	if (del_timer(timer))
2259 		__sock_put(sk);
2260 }
2261 EXPORT_SYMBOL(sk_stop_timer);
2262 
2263 void sock_init_data(struct socket *sock, struct sock *sk)
2264 {
2265 	skb_queue_head_init(&sk->sk_receive_queue);
2266 	skb_queue_head_init(&sk->sk_write_queue);
2267 	skb_queue_head_init(&sk->sk_error_queue);
2268 #ifdef CONFIG_NET_DMA
2269 	skb_queue_head_init(&sk->sk_async_wait_queue);
2270 #endif
2271 
2272 	sk->sk_send_head	=	NULL;
2273 
2274 	init_timer(&sk->sk_timer);
2275 
2276 	sk->sk_allocation	=	GFP_KERNEL;
2277 	sk->sk_rcvbuf		=	sysctl_rmem_default;
2278 	sk->sk_sndbuf		=	sysctl_wmem_default;
2279 	sk->sk_state		=	TCP_CLOSE;
2280 	sk_set_socket(sk, sock);
2281 
2282 	sock_set_flag(sk, SOCK_ZAPPED);
2283 
2284 	if (sock) {
2285 		sk->sk_type	=	sock->type;
2286 		sk->sk_wq	=	sock->wq;
2287 		sock->sk	=	sk;
2288 	} else
2289 		sk->sk_wq	=	NULL;
2290 
2291 	spin_lock_init(&sk->sk_dst_lock);
2292 	rwlock_init(&sk->sk_callback_lock);
2293 	lockdep_set_class_and_name(&sk->sk_callback_lock,
2294 			af_callback_keys + sk->sk_family,
2295 			af_family_clock_key_strings[sk->sk_family]);
2296 
2297 	sk->sk_state_change	=	sock_def_wakeup;
2298 	sk->sk_data_ready	=	sock_def_readable;
2299 	sk->sk_write_space	=	sock_def_write_space;
2300 	sk->sk_error_report	=	sock_def_error_report;
2301 	sk->sk_destruct		=	sock_def_destruct;
2302 
2303 	sk->sk_frag.page	=	NULL;
2304 	sk->sk_frag.offset	=	0;
2305 	sk->sk_peek_off		=	-1;
2306 
2307 	sk->sk_peer_pid 	=	NULL;
2308 	sk->sk_peer_cred	=	NULL;
2309 	sk->sk_write_pending	=	0;
2310 	sk->sk_rcvlowat		=	1;
2311 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2312 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2313 
2314 	sk->sk_stamp = ktime_set(-1L, 0);
2315 
2316 #ifdef CONFIG_NET_RX_BUSY_POLL
2317 	sk->sk_napi_id		=	0;
2318 	sk->sk_ll_usec		=	sysctl_net_busy_read;
2319 #endif
2320 
2321 	sk->sk_max_pacing_rate = ~0U;
2322 	sk->sk_pacing_rate = ~0U;
2323 	/*
2324 	 * Before updating sk_refcnt, we must commit prior changes to memory
2325 	 * (Documentation/RCU/rculist_nulls.txt for details)
2326 	 */
2327 	smp_wmb();
2328 	atomic_set(&sk->sk_refcnt, 1);
2329 	atomic_set(&sk->sk_drops, 0);
2330 }
2331 EXPORT_SYMBOL(sock_init_data);
2332 
2333 void lock_sock_nested(struct sock *sk, int subclass)
2334 {
2335 	might_sleep();
2336 	spin_lock_bh(&sk->sk_lock.slock);
2337 	if (sk->sk_lock.owned)
2338 		__lock_sock(sk);
2339 	sk->sk_lock.owned = 1;
2340 	spin_unlock(&sk->sk_lock.slock);
2341 	/*
2342 	 * The sk_lock has mutex_lock() semantics here:
2343 	 */
2344 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2345 	local_bh_enable();
2346 }
2347 EXPORT_SYMBOL(lock_sock_nested);
2348 
2349 void release_sock(struct sock *sk)
2350 {
2351 	/*
2352 	 * The sk_lock has mutex_unlock() semantics:
2353 	 */
2354 	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2355 
2356 	spin_lock_bh(&sk->sk_lock.slock);
2357 	if (sk->sk_backlog.tail)
2358 		__release_sock(sk);
2359 
2360 	if (sk->sk_prot->release_cb)
2361 		sk->sk_prot->release_cb(sk);
2362 
2363 	sk->sk_lock.owned = 0;
2364 	if (waitqueue_active(&sk->sk_lock.wq))
2365 		wake_up(&sk->sk_lock.wq);
2366 	spin_unlock_bh(&sk->sk_lock.slock);
2367 }
2368 EXPORT_SYMBOL(release_sock);
2369 
2370 /**
2371  * lock_sock_fast - fast version of lock_sock
2372  * @sk: socket
2373  *
2374  * This version should be used for very small section, where process wont block
2375  * return false if fast path is taken
2376  *   sk_lock.slock locked, owned = 0, BH disabled
2377  * return true if slow path is taken
2378  *   sk_lock.slock unlocked, owned = 1, BH enabled
2379  */
2380 bool lock_sock_fast(struct sock *sk)
2381 {
2382 	might_sleep();
2383 	spin_lock_bh(&sk->sk_lock.slock);
2384 
2385 	if (!sk->sk_lock.owned)
2386 		/*
2387 		 * Note : We must disable BH
2388 		 */
2389 		return false;
2390 
2391 	__lock_sock(sk);
2392 	sk->sk_lock.owned = 1;
2393 	spin_unlock(&sk->sk_lock.slock);
2394 	/*
2395 	 * The sk_lock has mutex_lock() semantics here:
2396 	 */
2397 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2398 	local_bh_enable();
2399 	return true;
2400 }
2401 EXPORT_SYMBOL(lock_sock_fast);
2402 
2403 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2404 {
2405 	struct timeval tv;
2406 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2407 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2408 	tv = ktime_to_timeval(sk->sk_stamp);
2409 	if (tv.tv_sec == -1)
2410 		return -ENOENT;
2411 	if (tv.tv_sec == 0) {
2412 		sk->sk_stamp = ktime_get_real();
2413 		tv = ktime_to_timeval(sk->sk_stamp);
2414 	}
2415 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2416 }
2417 EXPORT_SYMBOL(sock_get_timestamp);
2418 
2419 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2420 {
2421 	struct timespec ts;
2422 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2423 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2424 	ts = ktime_to_timespec(sk->sk_stamp);
2425 	if (ts.tv_sec == -1)
2426 		return -ENOENT;
2427 	if (ts.tv_sec == 0) {
2428 		sk->sk_stamp = ktime_get_real();
2429 		ts = ktime_to_timespec(sk->sk_stamp);
2430 	}
2431 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2432 }
2433 EXPORT_SYMBOL(sock_get_timestampns);
2434 
2435 void sock_enable_timestamp(struct sock *sk, int flag)
2436 {
2437 	if (!sock_flag(sk, flag)) {
2438 		unsigned long previous_flags = sk->sk_flags;
2439 
2440 		sock_set_flag(sk, flag);
2441 		/*
2442 		 * we just set one of the two flags which require net
2443 		 * time stamping, but time stamping might have been on
2444 		 * already because of the other one
2445 		 */
2446 		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2447 			net_enable_timestamp();
2448 	}
2449 }
2450 
2451 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2452 		       int level, int type)
2453 {
2454 	struct sock_exterr_skb *serr;
2455 	struct sk_buff *skb, *skb2;
2456 	int copied, err;
2457 
2458 	err = -EAGAIN;
2459 	skb = skb_dequeue(&sk->sk_error_queue);
2460 	if (skb == NULL)
2461 		goto out;
2462 
2463 	copied = skb->len;
2464 	if (copied > len) {
2465 		msg->msg_flags |= MSG_TRUNC;
2466 		copied = len;
2467 	}
2468 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2469 	if (err)
2470 		goto out_free_skb;
2471 
2472 	sock_recv_timestamp(msg, sk, skb);
2473 
2474 	serr = SKB_EXT_ERR(skb);
2475 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2476 
2477 	msg->msg_flags |= MSG_ERRQUEUE;
2478 	err = copied;
2479 
2480 	/* Reset and regenerate socket error */
2481 	spin_lock_bh(&sk->sk_error_queue.lock);
2482 	sk->sk_err = 0;
2483 	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2484 		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2485 		spin_unlock_bh(&sk->sk_error_queue.lock);
2486 		sk->sk_error_report(sk);
2487 	} else
2488 		spin_unlock_bh(&sk->sk_error_queue.lock);
2489 
2490 out_free_skb:
2491 	kfree_skb(skb);
2492 out:
2493 	return err;
2494 }
2495 EXPORT_SYMBOL(sock_recv_errqueue);
2496 
2497 /*
2498  *	Get a socket option on an socket.
2499  *
2500  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2501  *	asynchronous errors should be reported by getsockopt. We assume
2502  *	this means if you specify SO_ERROR (otherwise whats the point of it).
2503  */
2504 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2505 			   char __user *optval, int __user *optlen)
2506 {
2507 	struct sock *sk = sock->sk;
2508 
2509 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2510 }
2511 EXPORT_SYMBOL(sock_common_getsockopt);
2512 
2513 #ifdef CONFIG_COMPAT
2514 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2515 				  char __user *optval, int __user *optlen)
2516 {
2517 	struct sock *sk = sock->sk;
2518 
2519 	if (sk->sk_prot->compat_getsockopt != NULL)
2520 		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2521 						      optval, optlen);
2522 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2523 }
2524 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2525 #endif
2526 
2527 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2528 			struct msghdr *msg, size_t size, int flags)
2529 {
2530 	struct sock *sk = sock->sk;
2531 	int addr_len = 0;
2532 	int err;
2533 
2534 	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2535 				   flags & ~MSG_DONTWAIT, &addr_len);
2536 	if (err >= 0)
2537 		msg->msg_namelen = addr_len;
2538 	return err;
2539 }
2540 EXPORT_SYMBOL(sock_common_recvmsg);
2541 
2542 /*
2543  *	Set socket options on an inet socket.
2544  */
2545 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2546 			   char __user *optval, unsigned int optlen)
2547 {
2548 	struct sock *sk = sock->sk;
2549 
2550 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2551 }
2552 EXPORT_SYMBOL(sock_common_setsockopt);
2553 
2554 #ifdef CONFIG_COMPAT
2555 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2556 				  char __user *optval, unsigned int optlen)
2557 {
2558 	struct sock *sk = sock->sk;
2559 
2560 	if (sk->sk_prot->compat_setsockopt != NULL)
2561 		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2562 						      optval, optlen);
2563 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2564 }
2565 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2566 #endif
2567 
2568 void sk_common_release(struct sock *sk)
2569 {
2570 	if (sk->sk_prot->destroy)
2571 		sk->sk_prot->destroy(sk);
2572 
2573 	/*
2574 	 * Observation: when sock_common_release is called, processes have
2575 	 * no access to socket. But net still has.
2576 	 * Step one, detach it from networking:
2577 	 *
2578 	 * A. Remove from hash tables.
2579 	 */
2580 
2581 	sk->sk_prot->unhash(sk);
2582 
2583 	/*
2584 	 * In this point socket cannot receive new packets, but it is possible
2585 	 * that some packets are in flight because some CPU runs receiver and
2586 	 * did hash table lookup before we unhashed socket. They will achieve
2587 	 * receive queue and will be purged by socket destructor.
2588 	 *
2589 	 * Also we still have packets pending on receive queue and probably,
2590 	 * our own packets waiting in device queues. sock_destroy will drain
2591 	 * receive queue, but transmitted packets will delay socket destruction
2592 	 * until the last reference will be released.
2593 	 */
2594 
2595 	sock_orphan(sk);
2596 
2597 	xfrm_sk_free_policy(sk);
2598 
2599 	sk_refcnt_debug_release(sk);
2600 
2601 	if (sk->sk_frag.page) {
2602 		put_page(sk->sk_frag.page);
2603 		sk->sk_frag.page = NULL;
2604 	}
2605 
2606 	sock_put(sk);
2607 }
2608 EXPORT_SYMBOL(sk_common_release);
2609 
2610 #ifdef CONFIG_PROC_FS
2611 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
2612 struct prot_inuse {
2613 	int val[PROTO_INUSE_NR];
2614 };
2615 
2616 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2617 
2618 #ifdef CONFIG_NET_NS
2619 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2620 {
2621 	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2622 }
2623 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2624 
2625 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2626 {
2627 	int cpu, idx = prot->inuse_idx;
2628 	int res = 0;
2629 
2630 	for_each_possible_cpu(cpu)
2631 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2632 
2633 	return res >= 0 ? res : 0;
2634 }
2635 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2636 
2637 static int __net_init sock_inuse_init_net(struct net *net)
2638 {
2639 	net->core.inuse = alloc_percpu(struct prot_inuse);
2640 	return net->core.inuse ? 0 : -ENOMEM;
2641 }
2642 
2643 static void __net_exit sock_inuse_exit_net(struct net *net)
2644 {
2645 	free_percpu(net->core.inuse);
2646 }
2647 
2648 static struct pernet_operations net_inuse_ops = {
2649 	.init = sock_inuse_init_net,
2650 	.exit = sock_inuse_exit_net,
2651 };
2652 
2653 static __init int net_inuse_init(void)
2654 {
2655 	if (register_pernet_subsys(&net_inuse_ops))
2656 		panic("Cannot initialize net inuse counters");
2657 
2658 	return 0;
2659 }
2660 
2661 core_initcall(net_inuse_init);
2662 #else
2663 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2664 
2665 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2666 {
2667 	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2668 }
2669 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2670 
2671 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2672 {
2673 	int cpu, idx = prot->inuse_idx;
2674 	int res = 0;
2675 
2676 	for_each_possible_cpu(cpu)
2677 		res += per_cpu(prot_inuse, cpu).val[idx];
2678 
2679 	return res >= 0 ? res : 0;
2680 }
2681 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2682 #endif
2683 
2684 static void assign_proto_idx(struct proto *prot)
2685 {
2686 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2687 
2688 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2689 		pr_err("PROTO_INUSE_NR exhausted\n");
2690 		return;
2691 	}
2692 
2693 	set_bit(prot->inuse_idx, proto_inuse_idx);
2694 }
2695 
2696 static void release_proto_idx(struct proto *prot)
2697 {
2698 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2699 		clear_bit(prot->inuse_idx, proto_inuse_idx);
2700 }
2701 #else
2702 static inline void assign_proto_idx(struct proto *prot)
2703 {
2704 }
2705 
2706 static inline void release_proto_idx(struct proto *prot)
2707 {
2708 }
2709 #endif
2710 
2711 int proto_register(struct proto *prot, int alloc_slab)
2712 {
2713 	if (alloc_slab) {
2714 		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2715 					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2716 					NULL);
2717 
2718 		if (prot->slab == NULL) {
2719 			pr_crit("%s: Can't create sock SLAB cache!\n",
2720 				prot->name);
2721 			goto out;
2722 		}
2723 
2724 		if (prot->rsk_prot != NULL) {
2725 			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2726 			if (prot->rsk_prot->slab_name == NULL)
2727 				goto out_free_sock_slab;
2728 
2729 			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2730 								 prot->rsk_prot->obj_size, 0,
2731 								 SLAB_HWCACHE_ALIGN, NULL);
2732 
2733 			if (prot->rsk_prot->slab == NULL) {
2734 				pr_crit("%s: Can't create request sock SLAB cache!\n",
2735 					prot->name);
2736 				goto out_free_request_sock_slab_name;
2737 			}
2738 		}
2739 
2740 		if (prot->twsk_prot != NULL) {
2741 			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2742 
2743 			if (prot->twsk_prot->twsk_slab_name == NULL)
2744 				goto out_free_request_sock_slab;
2745 
2746 			prot->twsk_prot->twsk_slab =
2747 				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2748 						  prot->twsk_prot->twsk_obj_size,
2749 						  0,
2750 						  SLAB_HWCACHE_ALIGN |
2751 							prot->slab_flags,
2752 						  NULL);
2753 			if (prot->twsk_prot->twsk_slab == NULL)
2754 				goto out_free_timewait_sock_slab_name;
2755 		}
2756 	}
2757 
2758 	mutex_lock(&proto_list_mutex);
2759 	list_add(&prot->node, &proto_list);
2760 	assign_proto_idx(prot);
2761 	mutex_unlock(&proto_list_mutex);
2762 	return 0;
2763 
2764 out_free_timewait_sock_slab_name:
2765 	kfree(prot->twsk_prot->twsk_slab_name);
2766 out_free_request_sock_slab:
2767 	if (prot->rsk_prot && prot->rsk_prot->slab) {
2768 		kmem_cache_destroy(prot->rsk_prot->slab);
2769 		prot->rsk_prot->slab = NULL;
2770 	}
2771 out_free_request_sock_slab_name:
2772 	if (prot->rsk_prot)
2773 		kfree(prot->rsk_prot->slab_name);
2774 out_free_sock_slab:
2775 	kmem_cache_destroy(prot->slab);
2776 	prot->slab = NULL;
2777 out:
2778 	return -ENOBUFS;
2779 }
2780 EXPORT_SYMBOL(proto_register);
2781 
2782 void proto_unregister(struct proto *prot)
2783 {
2784 	mutex_lock(&proto_list_mutex);
2785 	release_proto_idx(prot);
2786 	list_del(&prot->node);
2787 	mutex_unlock(&proto_list_mutex);
2788 
2789 	if (prot->slab != NULL) {
2790 		kmem_cache_destroy(prot->slab);
2791 		prot->slab = NULL;
2792 	}
2793 
2794 	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2795 		kmem_cache_destroy(prot->rsk_prot->slab);
2796 		kfree(prot->rsk_prot->slab_name);
2797 		prot->rsk_prot->slab = NULL;
2798 	}
2799 
2800 	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2801 		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2802 		kfree(prot->twsk_prot->twsk_slab_name);
2803 		prot->twsk_prot->twsk_slab = NULL;
2804 	}
2805 }
2806 EXPORT_SYMBOL(proto_unregister);
2807 
2808 #ifdef CONFIG_PROC_FS
2809 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2810 	__acquires(proto_list_mutex)
2811 {
2812 	mutex_lock(&proto_list_mutex);
2813 	return seq_list_start_head(&proto_list, *pos);
2814 }
2815 
2816 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2817 {
2818 	return seq_list_next(v, &proto_list, pos);
2819 }
2820 
2821 static void proto_seq_stop(struct seq_file *seq, void *v)
2822 	__releases(proto_list_mutex)
2823 {
2824 	mutex_unlock(&proto_list_mutex);
2825 }
2826 
2827 static char proto_method_implemented(const void *method)
2828 {
2829 	return method == NULL ? 'n' : 'y';
2830 }
2831 static long sock_prot_memory_allocated(struct proto *proto)
2832 {
2833 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2834 }
2835 
2836 static char *sock_prot_memory_pressure(struct proto *proto)
2837 {
2838 	return proto->memory_pressure != NULL ?
2839 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2840 }
2841 
2842 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2843 {
2844 
2845 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2846 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2847 		   proto->name,
2848 		   proto->obj_size,
2849 		   sock_prot_inuse_get(seq_file_net(seq), proto),
2850 		   sock_prot_memory_allocated(proto),
2851 		   sock_prot_memory_pressure(proto),
2852 		   proto->max_header,
2853 		   proto->slab == NULL ? "no" : "yes",
2854 		   module_name(proto->owner),
2855 		   proto_method_implemented(proto->close),
2856 		   proto_method_implemented(proto->connect),
2857 		   proto_method_implemented(proto->disconnect),
2858 		   proto_method_implemented(proto->accept),
2859 		   proto_method_implemented(proto->ioctl),
2860 		   proto_method_implemented(proto->init),
2861 		   proto_method_implemented(proto->destroy),
2862 		   proto_method_implemented(proto->shutdown),
2863 		   proto_method_implemented(proto->setsockopt),
2864 		   proto_method_implemented(proto->getsockopt),
2865 		   proto_method_implemented(proto->sendmsg),
2866 		   proto_method_implemented(proto->recvmsg),
2867 		   proto_method_implemented(proto->sendpage),
2868 		   proto_method_implemented(proto->bind),
2869 		   proto_method_implemented(proto->backlog_rcv),
2870 		   proto_method_implemented(proto->hash),
2871 		   proto_method_implemented(proto->unhash),
2872 		   proto_method_implemented(proto->get_port),
2873 		   proto_method_implemented(proto->enter_memory_pressure));
2874 }
2875 
2876 static int proto_seq_show(struct seq_file *seq, void *v)
2877 {
2878 	if (v == &proto_list)
2879 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2880 			   "protocol",
2881 			   "size",
2882 			   "sockets",
2883 			   "memory",
2884 			   "press",
2885 			   "maxhdr",
2886 			   "slab",
2887 			   "module",
2888 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2889 	else
2890 		proto_seq_printf(seq, list_entry(v, struct proto, node));
2891 	return 0;
2892 }
2893 
2894 static const struct seq_operations proto_seq_ops = {
2895 	.start  = proto_seq_start,
2896 	.next   = proto_seq_next,
2897 	.stop   = proto_seq_stop,
2898 	.show   = proto_seq_show,
2899 };
2900 
2901 static int proto_seq_open(struct inode *inode, struct file *file)
2902 {
2903 	return seq_open_net(inode, file, &proto_seq_ops,
2904 			    sizeof(struct seq_net_private));
2905 }
2906 
2907 static const struct file_operations proto_seq_fops = {
2908 	.owner		= THIS_MODULE,
2909 	.open		= proto_seq_open,
2910 	.read		= seq_read,
2911 	.llseek		= seq_lseek,
2912 	.release	= seq_release_net,
2913 };
2914 
2915 static __net_init int proto_init_net(struct net *net)
2916 {
2917 	if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2918 		return -ENOMEM;
2919 
2920 	return 0;
2921 }
2922 
2923 static __net_exit void proto_exit_net(struct net *net)
2924 {
2925 	remove_proc_entry("protocols", net->proc_net);
2926 }
2927 
2928 
2929 static __net_initdata struct pernet_operations proto_net_ops = {
2930 	.init = proto_init_net,
2931 	.exit = proto_exit_net,
2932 };
2933 
2934 static int __init proto_init(void)
2935 {
2936 	return register_pernet_subsys(&proto_net_ops);
2937 }
2938 
2939 subsys_initcall(proto_init);
2940 
2941 #endif /* PROC_FS */
2942