xref: /openbmc/linux/net/core/sock.c (revision 275876e2)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic socket support routines. Memory allocators, socket lock/release
7  *		handler for protocols to use and generic option handler.
8  *
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  *
85  *
86  *		This program is free software; you can redistribute it and/or
87  *		modify it under the terms of the GNU General Public License
88  *		as published by the Free Software Foundation; either version
89  *		2 of the License, or (at your option) any later version.
90  */
91 
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93 
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
120 
121 #include <asm/uaccess.h>
122 
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134 
135 #include <linux/filter.h>
136 
137 #include <trace/events/sock.h>
138 
139 #ifdef CONFIG_INET
140 #include <net/tcp.h>
141 #endif
142 
143 #include <net/busy_poll.h>
144 
145 static DEFINE_MUTEX(proto_list_mutex);
146 static LIST_HEAD(proto_list);
147 
148 /**
149  * sk_ns_capable - General socket capability test
150  * @sk: Socket to use a capability on or through
151  * @user_ns: The user namespace of the capability to use
152  * @cap: The capability to use
153  *
154  * Test to see if the opener of the socket had when the socket was
155  * created and the current process has the capability @cap in the user
156  * namespace @user_ns.
157  */
158 bool sk_ns_capable(const struct sock *sk,
159 		   struct user_namespace *user_ns, int cap)
160 {
161 	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 		ns_capable(user_ns, cap);
163 }
164 EXPORT_SYMBOL(sk_ns_capable);
165 
166 /**
167  * sk_capable - Socket global capability test
168  * @sk: Socket to use a capability on or through
169  * @cap: The global capbility to use
170  *
171  * Test to see if the opener of the socket had when the socket was
172  * created and the current process has the capability @cap in all user
173  * namespaces.
174  */
175 bool sk_capable(const struct sock *sk, int cap)
176 {
177 	return sk_ns_capable(sk, &init_user_ns, cap);
178 }
179 EXPORT_SYMBOL(sk_capable);
180 
181 /**
182  * sk_net_capable - Network namespace socket capability test
183  * @sk: Socket to use a capability on or through
184  * @cap: The capability to use
185  *
186  * Test to see if the opener of the socket had when the socke was created
187  * and the current process has the capability @cap over the network namespace
188  * the socket is a member of.
189  */
190 bool sk_net_capable(const struct sock *sk, int cap)
191 {
192 	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193 }
194 EXPORT_SYMBOL(sk_net_capable);
195 
196 
197 #ifdef CONFIG_MEMCG_KMEM
198 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
199 {
200 	struct proto *proto;
201 	int ret = 0;
202 
203 	mutex_lock(&proto_list_mutex);
204 	list_for_each_entry(proto, &proto_list, node) {
205 		if (proto->init_cgroup) {
206 			ret = proto->init_cgroup(memcg, ss);
207 			if (ret)
208 				goto out;
209 		}
210 	}
211 
212 	mutex_unlock(&proto_list_mutex);
213 	return ret;
214 out:
215 	list_for_each_entry_continue_reverse(proto, &proto_list, node)
216 		if (proto->destroy_cgroup)
217 			proto->destroy_cgroup(memcg);
218 	mutex_unlock(&proto_list_mutex);
219 	return ret;
220 }
221 
222 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
223 {
224 	struct proto *proto;
225 
226 	mutex_lock(&proto_list_mutex);
227 	list_for_each_entry_reverse(proto, &proto_list, node)
228 		if (proto->destroy_cgroup)
229 			proto->destroy_cgroup(memcg);
230 	mutex_unlock(&proto_list_mutex);
231 }
232 #endif
233 
234 /*
235  * Each address family might have different locking rules, so we have
236  * one slock key per address family:
237  */
238 static struct lock_class_key af_family_keys[AF_MAX];
239 static struct lock_class_key af_family_slock_keys[AF_MAX];
240 
241 #if defined(CONFIG_MEMCG_KMEM)
242 struct static_key memcg_socket_limit_enabled;
243 EXPORT_SYMBOL(memcg_socket_limit_enabled);
244 #endif
245 
246 /*
247  * Make lock validator output more readable. (we pre-construct these
248  * strings build-time, so that runtime initialization of socket
249  * locks is fast):
250  */
251 static const char *const af_family_key_strings[AF_MAX+1] = {
252   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
253   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
254   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
255   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
256   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
257   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
258   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
259   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
260   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
261   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
262   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
263   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
264   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
265   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
266 };
267 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
268   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
269   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
270   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
271   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
272   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
273   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
274   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
275   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
276   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
277   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
278   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
279   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
280   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
281   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
282 };
283 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
284   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
285   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
286   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
287   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
288   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
289   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
290   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
291   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
292   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
293   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
294   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
295   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
296   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
297   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
298 };
299 
300 /*
301  * sk_callback_lock locking rules are per-address-family,
302  * so split the lock classes by using a per-AF key:
303  */
304 static struct lock_class_key af_callback_keys[AF_MAX];
305 
306 /* Take into consideration the size of the struct sk_buff overhead in the
307  * determination of these values, since that is non-constant across
308  * platforms.  This makes socket queueing behavior and performance
309  * not depend upon such differences.
310  */
311 #define _SK_MEM_PACKETS		256
312 #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
313 #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
314 #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315 
316 /* Run time adjustable parameters. */
317 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
318 EXPORT_SYMBOL(sysctl_wmem_max);
319 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
320 EXPORT_SYMBOL(sysctl_rmem_max);
321 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
322 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
323 
324 /* Maximal space eaten by iovec or ancillary data plus some space */
325 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
326 EXPORT_SYMBOL(sysctl_optmem_max);
327 
328 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
329 EXPORT_SYMBOL_GPL(memalloc_socks);
330 
331 /**
332  * sk_set_memalloc - sets %SOCK_MEMALLOC
333  * @sk: socket to set it on
334  *
335  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
336  * It's the responsibility of the admin to adjust min_free_kbytes
337  * to meet the requirements
338  */
339 void sk_set_memalloc(struct sock *sk)
340 {
341 	sock_set_flag(sk, SOCK_MEMALLOC);
342 	sk->sk_allocation |= __GFP_MEMALLOC;
343 	static_key_slow_inc(&memalloc_socks);
344 }
345 EXPORT_SYMBOL_GPL(sk_set_memalloc);
346 
347 void sk_clear_memalloc(struct sock *sk)
348 {
349 	sock_reset_flag(sk, SOCK_MEMALLOC);
350 	sk->sk_allocation &= ~__GFP_MEMALLOC;
351 	static_key_slow_dec(&memalloc_socks);
352 
353 	/*
354 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
355 	 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
356 	 * it has rmem allocations there is a risk that the user of the
357 	 * socket cannot make forward progress due to exceeding the rmem
358 	 * limits. By rights, sk_clear_memalloc() should only be called
359 	 * on sockets being torn down but warn and reset the accounting if
360 	 * that assumption breaks.
361 	 */
362 	if (WARN_ON(sk->sk_forward_alloc))
363 		sk_mem_reclaim(sk);
364 }
365 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
366 
367 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
368 {
369 	int ret;
370 	unsigned long pflags = current->flags;
371 
372 	/* these should have been dropped before queueing */
373 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
374 
375 	current->flags |= PF_MEMALLOC;
376 	ret = sk->sk_backlog_rcv(sk, skb);
377 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
378 
379 	return ret;
380 }
381 EXPORT_SYMBOL(__sk_backlog_rcv);
382 
383 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
384 {
385 	struct timeval tv;
386 
387 	if (optlen < sizeof(tv))
388 		return -EINVAL;
389 	if (copy_from_user(&tv, optval, sizeof(tv)))
390 		return -EFAULT;
391 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
392 		return -EDOM;
393 
394 	if (tv.tv_sec < 0) {
395 		static int warned __read_mostly;
396 
397 		*timeo_p = 0;
398 		if (warned < 10 && net_ratelimit()) {
399 			warned++;
400 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
401 				__func__, current->comm, task_pid_nr(current));
402 		}
403 		return 0;
404 	}
405 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
406 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
407 		return 0;
408 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
409 		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
410 	return 0;
411 }
412 
413 static void sock_warn_obsolete_bsdism(const char *name)
414 {
415 	static int warned;
416 	static char warncomm[TASK_COMM_LEN];
417 	if (strcmp(warncomm, current->comm) && warned < 5) {
418 		strcpy(warncomm,  current->comm);
419 		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
420 			warncomm, name);
421 		warned++;
422 	}
423 }
424 
425 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
426 
427 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
428 {
429 	if (sk->sk_flags & flags) {
430 		sk->sk_flags &= ~flags;
431 		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
432 			net_disable_timestamp();
433 	}
434 }
435 
436 
437 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
438 {
439 	int err;
440 	int skb_len;
441 	unsigned long flags;
442 	struct sk_buff_head *list = &sk->sk_receive_queue;
443 
444 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
445 		atomic_inc(&sk->sk_drops);
446 		trace_sock_rcvqueue_full(sk, skb);
447 		return -ENOMEM;
448 	}
449 
450 	err = sk_filter(sk, skb);
451 	if (err)
452 		return err;
453 
454 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
455 		atomic_inc(&sk->sk_drops);
456 		return -ENOBUFS;
457 	}
458 
459 	skb->dev = NULL;
460 	skb_set_owner_r(skb, sk);
461 
462 	/* Cache the SKB length before we tack it onto the receive
463 	 * queue.  Once it is added it no longer belongs to us and
464 	 * may be freed by other threads of control pulling packets
465 	 * from the queue.
466 	 */
467 	skb_len = skb->len;
468 
469 	/* we escape from rcu protected region, make sure we dont leak
470 	 * a norefcounted dst
471 	 */
472 	skb_dst_force(skb);
473 
474 	spin_lock_irqsave(&list->lock, flags);
475 	skb->dropcount = atomic_read(&sk->sk_drops);
476 	__skb_queue_tail(list, skb);
477 	spin_unlock_irqrestore(&list->lock, flags);
478 
479 	if (!sock_flag(sk, SOCK_DEAD))
480 		sk->sk_data_ready(sk);
481 	return 0;
482 }
483 EXPORT_SYMBOL(sock_queue_rcv_skb);
484 
485 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
486 {
487 	int rc = NET_RX_SUCCESS;
488 
489 	if (sk_filter(sk, skb))
490 		goto discard_and_relse;
491 
492 	skb->dev = NULL;
493 
494 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
495 		atomic_inc(&sk->sk_drops);
496 		goto discard_and_relse;
497 	}
498 	if (nested)
499 		bh_lock_sock_nested(sk);
500 	else
501 		bh_lock_sock(sk);
502 	if (!sock_owned_by_user(sk)) {
503 		/*
504 		 * trylock + unlock semantics:
505 		 */
506 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
507 
508 		rc = sk_backlog_rcv(sk, skb);
509 
510 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
511 	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
512 		bh_unlock_sock(sk);
513 		atomic_inc(&sk->sk_drops);
514 		goto discard_and_relse;
515 	}
516 
517 	bh_unlock_sock(sk);
518 out:
519 	sock_put(sk);
520 	return rc;
521 discard_and_relse:
522 	kfree_skb(skb);
523 	goto out;
524 }
525 EXPORT_SYMBOL(sk_receive_skb);
526 
527 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
528 {
529 	struct dst_entry *dst = __sk_dst_get(sk);
530 
531 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
532 		sk_tx_queue_clear(sk);
533 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
534 		dst_release(dst);
535 		return NULL;
536 	}
537 
538 	return dst;
539 }
540 EXPORT_SYMBOL(__sk_dst_check);
541 
542 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
543 {
544 	struct dst_entry *dst = sk_dst_get(sk);
545 
546 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
547 		sk_dst_reset(sk);
548 		dst_release(dst);
549 		return NULL;
550 	}
551 
552 	return dst;
553 }
554 EXPORT_SYMBOL(sk_dst_check);
555 
556 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
557 				int optlen)
558 {
559 	int ret = -ENOPROTOOPT;
560 #ifdef CONFIG_NETDEVICES
561 	struct net *net = sock_net(sk);
562 	char devname[IFNAMSIZ];
563 	int index;
564 
565 	/* Sorry... */
566 	ret = -EPERM;
567 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
568 		goto out;
569 
570 	ret = -EINVAL;
571 	if (optlen < 0)
572 		goto out;
573 
574 	/* Bind this socket to a particular device like "eth0",
575 	 * as specified in the passed interface name. If the
576 	 * name is "" or the option length is zero the socket
577 	 * is not bound.
578 	 */
579 	if (optlen > IFNAMSIZ - 1)
580 		optlen = IFNAMSIZ - 1;
581 	memset(devname, 0, sizeof(devname));
582 
583 	ret = -EFAULT;
584 	if (copy_from_user(devname, optval, optlen))
585 		goto out;
586 
587 	index = 0;
588 	if (devname[0] != '\0') {
589 		struct net_device *dev;
590 
591 		rcu_read_lock();
592 		dev = dev_get_by_name_rcu(net, devname);
593 		if (dev)
594 			index = dev->ifindex;
595 		rcu_read_unlock();
596 		ret = -ENODEV;
597 		if (!dev)
598 			goto out;
599 	}
600 
601 	lock_sock(sk);
602 	sk->sk_bound_dev_if = index;
603 	sk_dst_reset(sk);
604 	release_sock(sk);
605 
606 	ret = 0;
607 
608 out:
609 #endif
610 
611 	return ret;
612 }
613 
614 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
615 				int __user *optlen, int len)
616 {
617 	int ret = -ENOPROTOOPT;
618 #ifdef CONFIG_NETDEVICES
619 	struct net *net = sock_net(sk);
620 	char devname[IFNAMSIZ];
621 
622 	if (sk->sk_bound_dev_if == 0) {
623 		len = 0;
624 		goto zero;
625 	}
626 
627 	ret = -EINVAL;
628 	if (len < IFNAMSIZ)
629 		goto out;
630 
631 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
632 	if (ret)
633 		goto out;
634 
635 	len = strlen(devname) + 1;
636 
637 	ret = -EFAULT;
638 	if (copy_to_user(optval, devname, len))
639 		goto out;
640 
641 zero:
642 	ret = -EFAULT;
643 	if (put_user(len, optlen))
644 		goto out;
645 
646 	ret = 0;
647 
648 out:
649 #endif
650 
651 	return ret;
652 }
653 
654 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
655 {
656 	if (valbool)
657 		sock_set_flag(sk, bit);
658 	else
659 		sock_reset_flag(sk, bit);
660 }
661 
662 /*
663  *	This is meant for all protocols to use and covers goings on
664  *	at the socket level. Everything here is generic.
665  */
666 
667 int sock_setsockopt(struct socket *sock, int level, int optname,
668 		    char __user *optval, unsigned int optlen)
669 {
670 	struct sock *sk = sock->sk;
671 	int val;
672 	int valbool;
673 	struct linger ling;
674 	int ret = 0;
675 
676 	/*
677 	 *	Options without arguments
678 	 */
679 
680 	if (optname == SO_BINDTODEVICE)
681 		return sock_setbindtodevice(sk, optval, optlen);
682 
683 	if (optlen < sizeof(int))
684 		return -EINVAL;
685 
686 	if (get_user(val, (int __user *)optval))
687 		return -EFAULT;
688 
689 	valbool = val ? 1 : 0;
690 
691 	lock_sock(sk);
692 
693 	switch (optname) {
694 	case SO_DEBUG:
695 		if (val && !capable(CAP_NET_ADMIN))
696 			ret = -EACCES;
697 		else
698 			sock_valbool_flag(sk, SOCK_DBG, valbool);
699 		break;
700 	case SO_REUSEADDR:
701 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
702 		break;
703 	case SO_REUSEPORT:
704 		sk->sk_reuseport = valbool;
705 		break;
706 	case SO_TYPE:
707 	case SO_PROTOCOL:
708 	case SO_DOMAIN:
709 	case SO_ERROR:
710 		ret = -ENOPROTOOPT;
711 		break;
712 	case SO_DONTROUTE:
713 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
714 		break;
715 	case SO_BROADCAST:
716 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
717 		break;
718 	case SO_SNDBUF:
719 		/* Don't error on this BSD doesn't and if you think
720 		 * about it this is right. Otherwise apps have to
721 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
722 		 * are treated in BSD as hints
723 		 */
724 		val = min_t(u32, val, sysctl_wmem_max);
725 set_sndbuf:
726 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
727 		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
728 		/* Wake up sending tasks if we upped the value. */
729 		sk->sk_write_space(sk);
730 		break;
731 
732 	case SO_SNDBUFFORCE:
733 		if (!capable(CAP_NET_ADMIN)) {
734 			ret = -EPERM;
735 			break;
736 		}
737 		goto set_sndbuf;
738 
739 	case SO_RCVBUF:
740 		/* Don't error on this BSD doesn't and if you think
741 		 * about it this is right. Otherwise apps have to
742 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
743 		 * are treated in BSD as hints
744 		 */
745 		val = min_t(u32, val, sysctl_rmem_max);
746 set_rcvbuf:
747 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
748 		/*
749 		 * We double it on the way in to account for
750 		 * "struct sk_buff" etc. overhead.   Applications
751 		 * assume that the SO_RCVBUF setting they make will
752 		 * allow that much actual data to be received on that
753 		 * socket.
754 		 *
755 		 * Applications are unaware that "struct sk_buff" and
756 		 * other overheads allocate from the receive buffer
757 		 * during socket buffer allocation.
758 		 *
759 		 * And after considering the possible alternatives,
760 		 * returning the value we actually used in getsockopt
761 		 * is the most desirable behavior.
762 		 */
763 		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
764 		break;
765 
766 	case SO_RCVBUFFORCE:
767 		if (!capable(CAP_NET_ADMIN)) {
768 			ret = -EPERM;
769 			break;
770 		}
771 		goto set_rcvbuf;
772 
773 	case SO_KEEPALIVE:
774 #ifdef CONFIG_INET
775 		if (sk->sk_protocol == IPPROTO_TCP &&
776 		    sk->sk_type == SOCK_STREAM)
777 			tcp_set_keepalive(sk, valbool);
778 #endif
779 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
780 		break;
781 
782 	case SO_OOBINLINE:
783 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
784 		break;
785 
786 	case SO_NO_CHECK:
787 		sk->sk_no_check_tx = valbool;
788 		break;
789 
790 	case SO_PRIORITY:
791 		if ((val >= 0 && val <= 6) ||
792 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
793 			sk->sk_priority = val;
794 		else
795 			ret = -EPERM;
796 		break;
797 
798 	case SO_LINGER:
799 		if (optlen < sizeof(ling)) {
800 			ret = -EINVAL;	/* 1003.1g */
801 			break;
802 		}
803 		if (copy_from_user(&ling, optval, sizeof(ling))) {
804 			ret = -EFAULT;
805 			break;
806 		}
807 		if (!ling.l_onoff)
808 			sock_reset_flag(sk, SOCK_LINGER);
809 		else {
810 #if (BITS_PER_LONG == 32)
811 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
812 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
813 			else
814 #endif
815 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
816 			sock_set_flag(sk, SOCK_LINGER);
817 		}
818 		break;
819 
820 	case SO_BSDCOMPAT:
821 		sock_warn_obsolete_bsdism("setsockopt");
822 		break;
823 
824 	case SO_PASSCRED:
825 		if (valbool)
826 			set_bit(SOCK_PASSCRED, &sock->flags);
827 		else
828 			clear_bit(SOCK_PASSCRED, &sock->flags);
829 		break;
830 
831 	case SO_TIMESTAMP:
832 	case SO_TIMESTAMPNS:
833 		if (valbool)  {
834 			if (optname == SO_TIMESTAMP)
835 				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
836 			else
837 				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
838 			sock_set_flag(sk, SOCK_RCVTSTAMP);
839 			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
840 		} else {
841 			sock_reset_flag(sk, SOCK_RCVTSTAMP);
842 			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
843 		}
844 		break;
845 
846 	case SO_TIMESTAMPING:
847 		if (val & ~SOF_TIMESTAMPING_MASK) {
848 			ret = -EINVAL;
849 			break;
850 		}
851 		if (val & SOF_TIMESTAMPING_OPT_ID &&
852 		    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
853 			if (sk->sk_protocol == IPPROTO_TCP) {
854 				if (sk->sk_state != TCP_ESTABLISHED) {
855 					ret = -EINVAL;
856 					break;
857 				}
858 				sk->sk_tskey = tcp_sk(sk)->snd_una;
859 			} else {
860 				sk->sk_tskey = 0;
861 			}
862 		}
863 		sk->sk_tsflags = val;
864 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
865 			sock_enable_timestamp(sk,
866 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
867 		else
868 			sock_disable_timestamp(sk,
869 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
870 		break;
871 
872 	case SO_RCVLOWAT:
873 		if (val < 0)
874 			val = INT_MAX;
875 		sk->sk_rcvlowat = val ? : 1;
876 		break;
877 
878 	case SO_RCVTIMEO:
879 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
880 		break;
881 
882 	case SO_SNDTIMEO:
883 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
884 		break;
885 
886 	case SO_ATTACH_FILTER:
887 		ret = -EINVAL;
888 		if (optlen == sizeof(struct sock_fprog)) {
889 			struct sock_fprog fprog;
890 
891 			ret = -EFAULT;
892 			if (copy_from_user(&fprog, optval, sizeof(fprog)))
893 				break;
894 
895 			ret = sk_attach_filter(&fprog, sk);
896 		}
897 		break;
898 
899 	case SO_DETACH_FILTER:
900 		ret = sk_detach_filter(sk);
901 		break;
902 
903 	case SO_LOCK_FILTER:
904 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
905 			ret = -EPERM;
906 		else
907 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
908 		break;
909 
910 	case SO_PASSSEC:
911 		if (valbool)
912 			set_bit(SOCK_PASSSEC, &sock->flags);
913 		else
914 			clear_bit(SOCK_PASSSEC, &sock->flags);
915 		break;
916 	case SO_MARK:
917 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
918 			ret = -EPERM;
919 		else
920 			sk->sk_mark = val;
921 		break;
922 
923 		/* We implement the SO_SNDLOWAT etc to
924 		   not be settable (1003.1g 5.3) */
925 	case SO_RXQ_OVFL:
926 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
927 		break;
928 
929 	case SO_WIFI_STATUS:
930 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
931 		break;
932 
933 	case SO_PEEK_OFF:
934 		if (sock->ops->set_peek_off)
935 			ret = sock->ops->set_peek_off(sk, val);
936 		else
937 			ret = -EOPNOTSUPP;
938 		break;
939 
940 	case SO_NOFCS:
941 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
942 		break;
943 
944 	case SO_SELECT_ERR_QUEUE:
945 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
946 		break;
947 
948 #ifdef CONFIG_NET_RX_BUSY_POLL
949 	case SO_BUSY_POLL:
950 		/* allow unprivileged users to decrease the value */
951 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
952 			ret = -EPERM;
953 		else {
954 			if (val < 0)
955 				ret = -EINVAL;
956 			else
957 				sk->sk_ll_usec = val;
958 		}
959 		break;
960 #endif
961 
962 	case SO_MAX_PACING_RATE:
963 		sk->sk_max_pacing_rate = val;
964 		sk->sk_pacing_rate = min(sk->sk_pacing_rate,
965 					 sk->sk_max_pacing_rate);
966 		break;
967 
968 	default:
969 		ret = -ENOPROTOOPT;
970 		break;
971 	}
972 	release_sock(sk);
973 	return ret;
974 }
975 EXPORT_SYMBOL(sock_setsockopt);
976 
977 
978 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
979 			  struct ucred *ucred)
980 {
981 	ucred->pid = pid_vnr(pid);
982 	ucred->uid = ucred->gid = -1;
983 	if (cred) {
984 		struct user_namespace *current_ns = current_user_ns();
985 
986 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
987 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
988 	}
989 }
990 
991 int sock_getsockopt(struct socket *sock, int level, int optname,
992 		    char __user *optval, int __user *optlen)
993 {
994 	struct sock *sk = sock->sk;
995 
996 	union {
997 		int val;
998 		struct linger ling;
999 		struct timeval tm;
1000 	} v;
1001 
1002 	int lv = sizeof(int);
1003 	int len;
1004 
1005 	if (get_user(len, optlen))
1006 		return -EFAULT;
1007 	if (len < 0)
1008 		return -EINVAL;
1009 
1010 	memset(&v, 0, sizeof(v));
1011 
1012 	switch (optname) {
1013 	case SO_DEBUG:
1014 		v.val = sock_flag(sk, SOCK_DBG);
1015 		break;
1016 
1017 	case SO_DONTROUTE:
1018 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1019 		break;
1020 
1021 	case SO_BROADCAST:
1022 		v.val = sock_flag(sk, SOCK_BROADCAST);
1023 		break;
1024 
1025 	case SO_SNDBUF:
1026 		v.val = sk->sk_sndbuf;
1027 		break;
1028 
1029 	case SO_RCVBUF:
1030 		v.val = sk->sk_rcvbuf;
1031 		break;
1032 
1033 	case SO_REUSEADDR:
1034 		v.val = sk->sk_reuse;
1035 		break;
1036 
1037 	case SO_REUSEPORT:
1038 		v.val = sk->sk_reuseport;
1039 		break;
1040 
1041 	case SO_KEEPALIVE:
1042 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1043 		break;
1044 
1045 	case SO_TYPE:
1046 		v.val = sk->sk_type;
1047 		break;
1048 
1049 	case SO_PROTOCOL:
1050 		v.val = sk->sk_protocol;
1051 		break;
1052 
1053 	case SO_DOMAIN:
1054 		v.val = sk->sk_family;
1055 		break;
1056 
1057 	case SO_ERROR:
1058 		v.val = -sock_error(sk);
1059 		if (v.val == 0)
1060 			v.val = xchg(&sk->sk_err_soft, 0);
1061 		break;
1062 
1063 	case SO_OOBINLINE:
1064 		v.val = sock_flag(sk, SOCK_URGINLINE);
1065 		break;
1066 
1067 	case SO_NO_CHECK:
1068 		v.val = sk->sk_no_check_tx;
1069 		break;
1070 
1071 	case SO_PRIORITY:
1072 		v.val = sk->sk_priority;
1073 		break;
1074 
1075 	case SO_LINGER:
1076 		lv		= sizeof(v.ling);
1077 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1078 		v.ling.l_linger	= sk->sk_lingertime / HZ;
1079 		break;
1080 
1081 	case SO_BSDCOMPAT:
1082 		sock_warn_obsolete_bsdism("getsockopt");
1083 		break;
1084 
1085 	case SO_TIMESTAMP:
1086 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1087 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1088 		break;
1089 
1090 	case SO_TIMESTAMPNS:
1091 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1092 		break;
1093 
1094 	case SO_TIMESTAMPING:
1095 		v.val = sk->sk_tsflags;
1096 		break;
1097 
1098 	case SO_RCVTIMEO:
1099 		lv = sizeof(struct timeval);
1100 		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1101 			v.tm.tv_sec = 0;
1102 			v.tm.tv_usec = 0;
1103 		} else {
1104 			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1105 			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1106 		}
1107 		break;
1108 
1109 	case SO_SNDTIMEO:
1110 		lv = sizeof(struct timeval);
1111 		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1112 			v.tm.tv_sec = 0;
1113 			v.tm.tv_usec = 0;
1114 		} else {
1115 			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1116 			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1117 		}
1118 		break;
1119 
1120 	case SO_RCVLOWAT:
1121 		v.val = sk->sk_rcvlowat;
1122 		break;
1123 
1124 	case SO_SNDLOWAT:
1125 		v.val = 1;
1126 		break;
1127 
1128 	case SO_PASSCRED:
1129 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1130 		break;
1131 
1132 	case SO_PEERCRED:
1133 	{
1134 		struct ucred peercred;
1135 		if (len > sizeof(peercred))
1136 			len = sizeof(peercred);
1137 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1138 		if (copy_to_user(optval, &peercred, len))
1139 			return -EFAULT;
1140 		goto lenout;
1141 	}
1142 
1143 	case SO_PEERNAME:
1144 	{
1145 		char address[128];
1146 
1147 		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1148 			return -ENOTCONN;
1149 		if (lv < len)
1150 			return -EINVAL;
1151 		if (copy_to_user(optval, address, len))
1152 			return -EFAULT;
1153 		goto lenout;
1154 	}
1155 
1156 	/* Dubious BSD thing... Probably nobody even uses it, but
1157 	 * the UNIX standard wants it for whatever reason... -DaveM
1158 	 */
1159 	case SO_ACCEPTCONN:
1160 		v.val = sk->sk_state == TCP_LISTEN;
1161 		break;
1162 
1163 	case SO_PASSSEC:
1164 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1165 		break;
1166 
1167 	case SO_PEERSEC:
1168 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1169 
1170 	case SO_MARK:
1171 		v.val = sk->sk_mark;
1172 		break;
1173 
1174 	case SO_RXQ_OVFL:
1175 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1176 		break;
1177 
1178 	case SO_WIFI_STATUS:
1179 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1180 		break;
1181 
1182 	case SO_PEEK_OFF:
1183 		if (!sock->ops->set_peek_off)
1184 			return -EOPNOTSUPP;
1185 
1186 		v.val = sk->sk_peek_off;
1187 		break;
1188 	case SO_NOFCS:
1189 		v.val = sock_flag(sk, SOCK_NOFCS);
1190 		break;
1191 
1192 	case SO_BINDTODEVICE:
1193 		return sock_getbindtodevice(sk, optval, optlen, len);
1194 
1195 	case SO_GET_FILTER:
1196 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1197 		if (len < 0)
1198 			return len;
1199 
1200 		goto lenout;
1201 
1202 	case SO_LOCK_FILTER:
1203 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1204 		break;
1205 
1206 	case SO_BPF_EXTENSIONS:
1207 		v.val = bpf_tell_extensions();
1208 		break;
1209 
1210 	case SO_SELECT_ERR_QUEUE:
1211 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1212 		break;
1213 
1214 #ifdef CONFIG_NET_RX_BUSY_POLL
1215 	case SO_BUSY_POLL:
1216 		v.val = sk->sk_ll_usec;
1217 		break;
1218 #endif
1219 
1220 	case SO_MAX_PACING_RATE:
1221 		v.val = sk->sk_max_pacing_rate;
1222 		break;
1223 
1224 	default:
1225 		return -ENOPROTOOPT;
1226 	}
1227 
1228 	if (len > lv)
1229 		len = lv;
1230 	if (copy_to_user(optval, &v, len))
1231 		return -EFAULT;
1232 lenout:
1233 	if (put_user(len, optlen))
1234 		return -EFAULT;
1235 	return 0;
1236 }
1237 
1238 /*
1239  * Initialize an sk_lock.
1240  *
1241  * (We also register the sk_lock with the lock validator.)
1242  */
1243 static inline void sock_lock_init(struct sock *sk)
1244 {
1245 	sock_lock_init_class_and_name(sk,
1246 			af_family_slock_key_strings[sk->sk_family],
1247 			af_family_slock_keys + sk->sk_family,
1248 			af_family_key_strings[sk->sk_family],
1249 			af_family_keys + sk->sk_family);
1250 }
1251 
1252 /*
1253  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1254  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1255  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1256  */
1257 static void sock_copy(struct sock *nsk, const struct sock *osk)
1258 {
1259 #ifdef CONFIG_SECURITY_NETWORK
1260 	void *sptr = nsk->sk_security;
1261 #endif
1262 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1263 
1264 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1265 	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1266 
1267 #ifdef CONFIG_SECURITY_NETWORK
1268 	nsk->sk_security = sptr;
1269 	security_sk_clone(osk, nsk);
1270 #endif
1271 }
1272 
1273 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1274 {
1275 	unsigned long nulls1, nulls2;
1276 
1277 	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1278 	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1279 	if (nulls1 > nulls2)
1280 		swap(nulls1, nulls2);
1281 
1282 	if (nulls1 != 0)
1283 		memset((char *)sk, 0, nulls1);
1284 	memset((char *)sk + nulls1 + sizeof(void *), 0,
1285 	       nulls2 - nulls1 - sizeof(void *));
1286 	memset((char *)sk + nulls2 + sizeof(void *), 0,
1287 	       size - nulls2 - sizeof(void *));
1288 }
1289 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1290 
1291 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1292 		int family)
1293 {
1294 	struct sock *sk;
1295 	struct kmem_cache *slab;
1296 
1297 	slab = prot->slab;
1298 	if (slab != NULL) {
1299 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1300 		if (!sk)
1301 			return sk;
1302 		if (priority & __GFP_ZERO) {
1303 			if (prot->clear_sk)
1304 				prot->clear_sk(sk, prot->obj_size);
1305 			else
1306 				sk_prot_clear_nulls(sk, prot->obj_size);
1307 		}
1308 	} else
1309 		sk = kmalloc(prot->obj_size, priority);
1310 
1311 	if (sk != NULL) {
1312 		kmemcheck_annotate_bitfield(sk, flags);
1313 
1314 		if (security_sk_alloc(sk, family, priority))
1315 			goto out_free;
1316 
1317 		if (!try_module_get(prot->owner))
1318 			goto out_free_sec;
1319 		sk_tx_queue_clear(sk);
1320 	}
1321 
1322 	return sk;
1323 
1324 out_free_sec:
1325 	security_sk_free(sk);
1326 out_free:
1327 	if (slab != NULL)
1328 		kmem_cache_free(slab, sk);
1329 	else
1330 		kfree(sk);
1331 	return NULL;
1332 }
1333 
1334 static void sk_prot_free(struct proto *prot, struct sock *sk)
1335 {
1336 	struct kmem_cache *slab;
1337 	struct module *owner;
1338 
1339 	owner = prot->owner;
1340 	slab = prot->slab;
1341 
1342 	security_sk_free(sk);
1343 	if (slab != NULL)
1344 		kmem_cache_free(slab, sk);
1345 	else
1346 		kfree(sk);
1347 	module_put(owner);
1348 }
1349 
1350 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1351 void sock_update_netprioidx(struct sock *sk)
1352 {
1353 	if (in_interrupt())
1354 		return;
1355 
1356 	sk->sk_cgrp_prioidx = task_netprioidx(current);
1357 }
1358 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1359 #endif
1360 
1361 /**
1362  *	sk_alloc - All socket objects are allocated here
1363  *	@net: the applicable net namespace
1364  *	@family: protocol family
1365  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1366  *	@prot: struct proto associated with this new sock instance
1367  */
1368 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1369 		      struct proto *prot)
1370 {
1371 	struct sock *sk;
1372 
1373 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1374 	if (sk) {
1375 		sk->sk_family = family;
1376 		/*
1377 		 * See comment in struct sock definition to understand
1378 		 * why we need sk_prot_creator -acme
1379 		 */
1380 		sk->sk_prot = sk->sk_prot_creator = prot;
1381 		sock_lock_init(sk);
1382 		sock_net_set(sk, get_net(net));
1383 		atomic_set(&sk->sk_wmem_alloc, 1);
1384 
1385 		sock_update_classid(sk);
1386 		sock_update_netprioidx(sk);
1387 	}
1388 
1389 	return sk;
1390 }
1391 EXPORT_SYMBOL(sk_alloc);
1392 
1393 static void __sk_free(struct sock *sk)
1394 {
1395 	struct sk_filter *filter;
1396 
1397 	if (sk->sk_destruct)
1398 		sk->sk_destruct(sk);
1399 
1400 	filter = rcu_dereference_check(sk->sk_filter,
1401 				       atomic_read(&sk->sk_wmem_alloc) == 0);
1402 	if (filter) {
1403 		sk_filter_uncharge(sk, filter);
1404 		RCU_INIT_POINTER(sk->sk_filter, NULL);
1405 	}
1406 
1407 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1408 
1409 	if (atomic_read(&sk->sk_omem_alloc))
1410 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1411 			 __func__, atomic_read(&sk->sk_omem_alloc));
1412 
1413 	if (sk->sk_peer_cred)
1414 		put_cred(sk->sk_peer_cred);
1415 	put_pid(sk->sk_peer_pid);
1416 	put_net(sock_net(sk));
1417 	sk_prot_free(sk->sk_prot_creator, sk);
1418 }
1419 
1420 void sk_free(struct sock *sk)
1421 {
1422 	/*
1423 	 * We subtract one from sk_wmem_alloc and can know if
1424 	 * some packets are still in some tx queue.
1425 	 * If not null, sock_wfree() will call __sk_free(sk) later
1426 	 */
1427 	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1428 		__sk_free(sk);
1429 }
1430 EXPORT_SYMBOL(sk_free);
1431 
1432 /*
1433  * Last sock_put should drop reference to sk->sk_net. It has already
1434  * been dropped in sk_change_net. Taking reference to stopping namespace
1435  * is not an option.
1436  * Take reference to a socket to remove it from hash _alive_ and after that
1437  * destroy it in the context of init_net.
1438  */
1439 void sk_release_kernel(struct sock *sk)
1440 {
1441 	if (sk == NULL || sk->sk_socket == NULL)
1442 		return;
1443 
1444 	sock_hold(sk);
1445 	sock_release(sk->sk_socket);
1446 	release_net(sock_net(sk));
1447 	sock_net_set(sk, get_net(&init_net));
1448 	sock_put(sk);
1449 }
1450 EXPORT_SYMBOL(sk_release_kernel);
1451 
1452 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1453 {
1454 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1455 		sock_update_memcg(newsk);
1456 }
1457 
1458 /**
1459  *	sk_clone_lock - clone a socket, and lock its clone
1460  *	@sk: the socket to clone
1461  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1462  *
1463  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1464  */
1465 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1466 {
1467 	struct sock *newsk;
1468 	bool is_charged = true;
1469 
1470 	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1471 	if (newsk != NULL) {
1472 		struct sk_filter *filter;
1473 
1474 		sock_copy(newsk, sk);
1475 
1476 		/* SANITY */
1477 		get_net(sock_net(newsk));
1478 		sk_node_init(&newsk->sk_node);
1479 		sock_lock_init(newsk);
1480 		bh_lock_sock(newsk);
1481 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1482 		newsk->sk_backlog.len = 0;
1483 
1484 		atomic_set(&newsk->sk_rmem_alloc, 0);
1485 		/*
1486 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1487 		 */
1488 		atomic_set(&newsk->sk_wmem_alloc, 1);
1489 		atomic_set(&newsk->sk_omem_alloc, 0);
1490 		skb_queue_head_init(&newsk->sk_receive_queue);
1491 		skb_queue_head_init(&newsk->sk_write_queue);
1492 #ifdef CONFIG_NET_DMA
1493 		skb_queue_head_init(&newsk->sk_async_wait_queue);
1494 #endif
1495 
1496 		spin_lock_init(&newsk->sk_dst_lock);
1497 		rwlock_init(&newsk->sk_callback_lock);
1498 		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1499 				af_callback_keys + newsk->sk_family,
1500 				af_family_clock_key_strings[newsk->sk_family]);
1501 
1502 		newsk->sk_dst_cache	= NULL;
1503 		newsk->sk_wmem_queued	= 0;
1504 		newsk->sk_forward_alloc = 0;
1505 		newsk->sk_send_head	= NULL;
1506 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1507 
1508 		sock_reset_flag(newsk, SOCK_DONE);
1509 		skb_queue_head_init(&newsk->sk_error_queue);
1510 
1511 		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1512 		if (filter != NULL)
1513 			/* though it's an empty new sock, the charging may fail
1514 			 * if sysctl_optmem_max was changed between creation of
1515 			 * original socket and cloning
1516 			 */
1517 			is_charged = sk_filter_charge(newsk, filter);
1518 
1519 		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
1520 			/* It is still raw copy of parent, so invalidate
1521 			 * destructor and make plain sk_free() */
1522 			newsk->sk_destruct = NULL;
1523 			bh_unlock_sock(newsk);
1524 			sk_free(newsk);
1525 			newsk = NULL;
1526 			goto out;
1527 		}
1528 
1529 		newsk->sk_err	   = 0;
1530 		newsk->sk_priority = 0;
1531 		/*
1532 		 * Before updating sk_refcnt, we must commit prior changes to memory
1533 		 * (Documentation/RCU/rculist_nulls.txt for details)
1534 		 */
1535 		smp_wmb();
1536 		atomic_set(&newsk->sk_refcnt, 2);
1537 
1538 		/*
1539 		 * Increment the counter in the same struct proto as the master
1540 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1541 		 * is the same as sk->sk_prot->socks, as this field was copied
1542 		 * with memcpy).
1543 		 *
1544 		 * This _changes_ the previous behaviour, where
1545 		 * tcp_create_openreq_child always was incrementing the
1546 		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1547 		 * to be taken into account in all callers. -acme
1548 		 */
1549 		sk_refcnt_debug_inc(newsk);
1550 		sk_set_socket(newsk, NULL);
1551 		newsk->sk_wq = NULL;
1552 
1553 		sk_update_clone(sk, newsk);
1554 
1555 		if (newsk->sk_prot->sockets_allocated)
1556 			sk_sockets_allocated_inc(newsk);
1557 
1558 		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1559 			net_enable_timestamp();
1560 	}
1561 out:
1562 	return newsk;
1563 }
1564 EXPORT_SYMBOL_GPL(sk_clone_lock);
1565 
1566 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1567 {
1568 	__sk_dst_set(sk, dst);
1569 	sk->sk_route_caps = dst->dev->features;
1570 	if (sk->sk_route_caps & NETIF_F_GSO)
1571 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1572 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1573 	if (sk_can_gso(sk)) {
1574 		if (dst->header_len) {
1575 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1576 		} else {
1577 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1578 			sk->sk_gso_max_size = dst->dev->gso_max_size;
1579 			sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1580 		}
1581 	}
1582 }
1583 EXPORT_SYMBOL_GPL(sk_setup_caps);
1584 
1585 /*
1586  *	Simple resource managers for sockets.
1587  */
1588 
1589 
1590 /*
1591  * Write buffer destructor automatically called from kfree_skb.
1592  */
1593 void sock_wfree(struct sk_buff *skb)
1594 {
1595 	struct sock *sk = skb->sk;
1596 	unsigned int len = skb->truesize;
1597 
1598 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1599 		/*
1600 		 * Keep a reference on sk_wmem_alloc, this will be released
1601 		 * after sk_write_space() call
1602 		 */
1603 		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1604 		sk->sk_write_space(sk);
1605 		len = 1;
1606 	}
1607 	/*
1608 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1609 	 * could not do because of in-flight packets
1610 	 */
1611 	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1612 		__sk_free(sk);
1613 }
1614 EXPORT_SYMBOL(sock_wfree);
1615 
1616 void skb_orphan_partial(struct sk_buff *skb)
1617 {
1618 	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1619 	 * so we do not completely orphan skb, but transfert all
1620 	 * accounted bytes but one, to avoid unexpected reorders.
1621 	 */
1622 	if (skb->destructor == sock_wfree
1623 #ifdef CONFIG_INET
1624 	    || skb->destructor == tcp_wfree
1625 #endif
1626 		) {
1627 		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1628 		skb->truesize = 1;
1629 	} else {
1630 		skb_orphan(skb);
1631 	}
1632 }
1633 EXPORT_SYMBOL(skb_orphan_partial);
1634 
1635 /*
1636  * Read buffer destructor automatically called from kfree_skb.
1637  */
1638 void sock_rfree(struct sk_buff *skb)
1639 {
1640 	struct sock *sk = skb->sk;
1641 	unsigned int len = skb->truesize;
1642 
1643 	atomic_sub(len, &sk->sk_rmem_alloc);
1644 	sk_mem_uncharge(sk, len);
1645 }
1646 EXPORT_SYMBOL(sock_rfree);
1647 
1648 void sock_edemux(struct sk_buff *skb)
1649 {
1650 	struct sock *sk = skb->sk;
1651 
1652 #ifdef CONFIG_INET
1653 	if (sk->sk_state == TCP_TIME_WAIT)
1654 		inet_twsk_put(inet_twsk(sk));
1655 	else
1656 #endif
1657 		sock_put(sk);
1658 }
1659 EXPORT_SYMBOL(sock_edemux);
1660 
1661 kuid_t sock_i_uid(struct sock *sk)
1662 {
1663 	kuid_t uid;
1664 
1665 	read_lock_bh(&sk->sk_callback_lock);
1666 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1667 	read_unlock_bh(&sk->sk_callback_lock);
1668 	return uid;
1669 }
1670 EXPORT_SYMBOL(sock_i_uid);
1671 
1672 unsigned long sock_i_ino(struct sock *sk)
1673 {
1674 	unsigned long ino;
1675 
1676 	read_lock_bh(&sk->sk_callback_lock);
1677 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1678 	read_unlock_bh(&sk->sk_callback_lock);
1679 	return ino;
1680 }
1681 EXPORT_SYMBOL(sock_i_ino);
1682 
1683 /*
1684  * Allocate a skb from the socket's send buffer.
1685  */
1686 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1687 			     gfp_t priority)
1688 {
1689 	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1690 		struct sk_buff *skb = alloc_skb(size, priority);
1691 		if (skb) {
1692 			skb_set_owner_w(skb, sk);
1693 			return skb;
1694 		}
1695 	}
1696 	return NULL;
1697 }
1698 EXPORT_SYMBOL(sock_wmalloc);
1699 
1700 /*
1701  * Allocate a memory block from the socket's option memory buffer.
1702  */
1703 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1704 {
1705 	if ((unsigned int)size <= sysctl_optmem_max &&
1706 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1707 		void *mem;
1708 		/* First do the add, to avoid the race if kmalloc
1709 		 * might sleep.
1710 		 */
1711 		atomic_add(size, &sk->sk_omem_alloc);
1712 		mem = kmalloc(size, priority);
1713 		if (mem)
1714 			return mem;
1715 		atomic_sub(size, &sk->sk_omem_alloc);
1716 	}
1717 	return NULL;
1718 }
1719 EXPORT_SYMBOL(sock_kmalloc);
1720 
1721 /*
1722  * Free an option memory block.
1723  */
1724 void sock_kfree_s(struct sock *sk, void *mem, int size)
1725 {
1726 	kfree(mem);
1727 	atomic_sub(size, &sk->sk_omem_alloc);
1728 }
1729 EXPORT_SYMBOL(sock_kfree_s);
1730 
1731 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1732    I think, these locks should be removed for datagram sockets.
1733  */
1734 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1735 {
1736 	DEFINE_WAIT(wait);
1737 
1738 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1739 	for (;;) {
1740 		if (!timeo)
1741 			break;
1742 		if (signal_pending(current))
1743 			break;
1744 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1745 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1746 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1747 			break;
1748 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1749 			break;
1750 		if (sk->sk_err)
1751 			break;
1752 		timeo = schedule_timeout(timeo);
1753 	}
1754 	finish_wait(sk_sleep(sk), &wait);
1755 	return timeo;
1756 }
1757 
1758 
1759 /*
1760  *	Generic send/receive buffer handlers
1761  */
1762 
1763 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1764 				     unsigned long data_len, int noblock,
1765 				     int *errcode, int max_page_order)
1766 {
1767 	struct sk_buff *skb = NULL;
1768 	unsigned long chunk;
1769 	gfp_t gfp_mask;
1770 	long timeo;
1771 	int err;
1772 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1773 	struct page *page;
1774 	int i;
1775 
1776 	err = -EMSGSIZE;
1777 	if (npages > MAX_SKB_FRAGS)
1778 		goto failure;
1779 
1780 	timeo = sock_sndtimeo(sk, noblock);
1781 	while (!skb) {
1782 		err = sock_error(sk);
1783 		if (err != 0)
1784 			goto failure;
1785 
1786 		err = -EPIPE;
1787 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1788 			goto failure;
1789 
1790 		if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
1791 			set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1792 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1793 			err = -EAGAIN;
1794 			if (!timeo)
1795 				goto failure;
1796 			if (signal_pending(current))
1797 				goto interrupted;
1798 			timeo = sock_wait_for_wmem(sk, timeo);
1799 			continue;
1800 		}
1801 
1802 		err = -ENOBUFS;
1803 		gfp_mask = sk->sk_allocation;
1804 		if (gfp_mask & __GFP_WAIT)
1805 			gfp_mask |= __GFP_REPEAT;
1806 
1807 		skb = alloc_skb(header_len, gfp_mask);
1808 		if (!skb)
1809 			goto failure;
1810 
1811 		skb->truesize += data_len;
1812 
1813 		for (i = 0; npages > 0; i++) {
1814 			int order = max_page_order;
1815 
1816 			while (order) {
1817 				if (npages >= 1 << order) {
1818 					page = alloc_pages(sk->sk_allocation |
1819 							   __GFP_COMP |
1820 							   __GFP_NOWARN |
1821 							   __GFP_NORETRY,
1822 							   order);
1823 					if (page)
1824 						goto fill_page;
1825 				}
1826 				order--;
1827 			}
1828 			page = alloc_page(sk->sk_allocation);
1829 			if (!page)
1830 				goto failure;
1831 fill_page:
1832 			chunk = min_t(unsigned long, data_len,
1833 				      PAGE_SIZE << order);
1834 			skb_fill_page_desc(skb, i, page, 0, chunk);
1835 			data_len -= chunk;
1836 			npages -= 1 << order;
1837 		}
1838 	}
1839 
1840 	skb_set_owner_w(skb, sk);
1841 	return skb;
1842 
1843 interrupted:
1844 	err = sock_intr_errno(timeo);
1845 failure:
1846 	kfree_skb(skb);
1847 	*errcode = err;
1848 	return NULL;
1849 }
1850 EXPORT_SYMBOL(sock_alloc_send_pskb);
1851 
1852 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1853 				    int noblock, int *errcode)
1854 {
1855 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1856 }
1857 EXPORT_SYMBOL(sock_alloc_send_skb);
1858 
1859 /* On 32bit arches, an skb frag is limited to 2^15 */
1860 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
1861 
1862 /**
1863  * skb_page_frag_refill - check that a page_frag contains enough room
1864  * @sz: minimum size of the fragment we want to get
1865  * @pfrag: pointer to page_frag
1866  * @prio: priority for memory allocation
1867  *
1868  * Note: While this allocator tries to use high order pages, there is
1869  * no guarantee that allocations succeed. Therefore, @sz MUST be
1870  * less or equal than PAGE_SIZE.
1871  */
1872 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1873 {
1874 	int order;
1875 
1876 	if (pfrag->page) {
1877 		if (atomic_read(&pfrag->page->_count) == 1) {
1878 			pfrag->offset = 0;
1879 			return true;
1880 		}
1881 		if (pfrag->offset + sz <= pfrag->size)
1882 			return true;
1883 		put_page(pfrag->page);
1884 	}
1885 
1886 	order = SKB_FRAG_PAGE_ORDER;
1887 	do {
1888 		gfp_t gfp = prio;
1889 
1890 		if (order)
1891 			gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1892 		pfrag->page = alloc_pages(gfp, order);
1893 		if (likely(pfrag->page)) {
1894 			pfrag->offset = 0;
1895 			pfrag->size = PAGE_SIZE << order;
1896 			return true;
1897 		}
1898 	} while (--order >= 0);
1899 
1900 	return false;
1901 }
1902 EXPORT_SYMBOL(skb_page_frag_refill);
1903 
1904 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1905 {
1906 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1907 		return true;
1908 
1909 	sk_enter_memory_pressure(sk);
1910 	sk_stream_moderate_sndbuf(sk);
1911 	return false;
1912 }
1913 EXPORT_SYMBOL(sk_page_frag_refill);
1914 
1915 static void __lock_sock(struct sock *sk)
1916 	__releases(&sk->sk_lock.slock)
1917 	__acquires(&sk->sk_lock.slock)
1918 {
1919 	DEFINE_WAIT(wait);
1920 
1921 	for (;;) {
1922 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1923 					TASK_UNINTERRUPTIBLE);
1924 		spin_unlock_bh(&sk->sk_lock.slock);
1925 		schedule();
1926 		spin_lock_bh(&sk->sk_lock.slock);
1927 		if (!sock_owned_by_user(sk))
1928 			break;
1929 	}
1930 	finish_wait(&sk->sk_lock.wq, &wait);
1931 }
1932 
1933 static void __release_sock(struct sock *sk)
1934 	__releases(&sk->sk_lock.slock)
1935 	__acquires(&sk->sk_lock.slock)
1936 {
1937 	struct sk_buff *skb = sk->sk_backlog.head;
1938 
1939 	do {
1940 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1941 		bh_unlock_sock(sk);
1942 
1943 		do {
1944 			struct sk_buff *next = skb->next;
1945 
1946 			prefetch(next);
1947 			WARN_ON_ONCE(skb_dst_is_noref(skb));
1948 			skb->next = NULL;
1949 			sk_backlog_rcv(sk, skb);
1950 
1951 			/*
1952 			 * We are in process context here with softirqs
1953 			 * disabled, use cond_resched_softirq() to preempt.
1954 			 * This is safe to do because we've taken the backlog
1955 			 * queue private:
1956 			 */
1957 			cond_resched_softirq();
1958 
1959 			skb = next;
1960 		} while (skb != NULL);
1961 
1962 		bh_lock_sock(sk);
1963 	} while ((skb = sk->sk_backlog.head) != NULL);
1964 
1965 	/*
1966 	 * Doing the zeroing here guarantee we can not loop forever
1967 	 * while a wild producer attempts to flood us.
1968 	 */
1969 	sk->sk_backlog.len = 0;
1970 }
1971 
1972 /**
1973  * sk_wait_data - wait for data to arrive at sk_receive_queue
1974  * @sk:    sock to wait on
1975  * @timeo: for how long
1976  *
1977  * Now socket state including sk->sk_err is changed only under lock,
1978  * hence we may omit checks after joining wait queue.
1979  * We check receive queue before schedule() only as optimization;
1980  * it is very likely that release_sock() added new data.
1981  */
1982 int sk_wait_data(struct sock *sk, long *timeo)
1983 {
1984 	int rc;
1985 	DEFINE_WAIT(wait);
1986 
1987 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1988 	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1989 	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1990 	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1991 	finish_wait(sk_sleep(sk), &wait);
1992 	return rc;
1993 }
1994 EXPORT_SYMBOL(sk_wait_data);
1995 
1996 /**
1997  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1998  *	@sk: socket
1999  *	@size: memory size to allocate
2000  *	@kind: allocation type
2001  *
2002  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2003  *	rmem allocation. This function assumes that protocols which have
2004  *	memory_pressure use sk_wmem_queued as write buffer accounting.
2005  */
2006 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2007 {
2008 	struct proto *prot = sk->sk_prot;
2009 	int amt = sk_mem_pages(size);
2010 	long allocated;
2011 	int parent_status = UNDER_LIMIT;
2012 
2013 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2014 
2015 	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
2016 
2017 	/* Under limit. */
2018 	if (parent_status == UNDER_LIMIT &&
2019 			allocated <= sk_prot_mem_limits(sk, 0)) {
2020 		sk_leave_memory_pressure(sk);
2021 		return 1;
2022 	}
2023 
2024 	/* Under pressure. (we or our parents) */
2025 	if ((parent_status > SOFT_LIMIT) ||
2026 			allocated > sk_prot_mem_limits(sk, 1))
2027 		sk_enter_memory_pressure(sk);
2028 
2029 	/* Over hard limit (we or our parents) */
2030 	if ((parent_status == OVER_LIMIT) ||
2031 			(allocated > sk_prot_mem_limits(sk, 2)))
2032 		goto suppress_allocation;
2033 
2034 	/* guarantee minimum buffer size under pressure */
2035 	if (kind == SK_MEM_RECV) {
2036 		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2037 			return 1;
2038 
2039 	} else { /* SK_MEM_SEND */
2040 		if (sk->sk_type == SOCK_STREAM) {
2041 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2042 				return 1;
2043 		} else if (atomic_read(&sk->sk_wmem_alloc) <
2044 			   prot->sysctl_wmem[0])
2045 				return 1;
2046 	}
2047 
2048 	if (sk_has_memory_pressure(sk)) {
2049 		int alloc;
2050 
2051 		if (!sk_under_memory_pressure(sk))
2052 			return 1;
2053 		alloc = sk_sockets_allocated_read_positive(sk);
2054 		if (sk_prot_mem_limits(sk, 2) > alloc *
2055 		    sk_mem_pages(sk->sk_wmem_queued +
2056 				 atomic_read(&sk->sk_rmem_alloc) +
2057 				 sk->sk_forward_alloc))
2058 			return 1;
2059 	}
2060 
2061 suppress_allocation:
2062 
2063 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2064 		sk_stream_moderate_sndbuf(sk);
2065 
2066 		/* Fail only if socket is _under_ its sndbuf.
2067 		 * In this case we cannot block, so that we have to fail.
2068 		 */
2069 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2070 			return 1;
2071 	}
2072 
2073 	trace_sock_exceed_buf_limit(sk, prot, allocated);
2074 
2075 	/* Alas. Undo changes. */
2076 	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2077 
2078 	sk_memory_allocated_sub(sk, amt);
2079 
2080 	return 0;
2081 }
2082 EXPORT_SYMBOL(__sk_mem_schedule);
2083 
2084 /**
2085  *	__sk_reclaim - reclaim memory_allocated
2086  *	@sk: socket
2087  */
2088 void __sk_mem_reclaim(struct sock *sk)
2089 {
2090 	sk_memory_allocated_sub(sk,
2091 				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
2092 	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2093 
2094 	if (sk_under_memory_pressure(sk) &&
2095 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2096 		sk_leave_memory_pressure(sk);
2097 }
2098 EXPORT_SYMBOL(__sk_mem_reclaim);
2099 
2100 
2101 /*
2102  * Set of default routines for initialising struct proto_ops when
2103  * the protocol does not support a particular function. In certain
2104  * cases where it makes no sense for a protocol to have a "do nothing"
2105  * function, some default processing is provided.
2106  */
2107 
2108 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2109 {
2110 	return -EOPNOTSUPP;
2111 }
2112 EXPORT_SYMBOL(sock_no_bind);
2113 
2114 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2115 		    int len, int flags)
2116 {
2117 	return -EOPNOTSUPP;
2118 }
2119 EXPORT_SYMBOL(sock_no_connect);
2120 
2121 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2122 {
2123 	return -EOPNOTSUPP;
2124 }
2125 EXPORT_SYMBOL(sock_no_socketpair);
2126 
2127 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2128 {
2129 	return -EOPNOTSUPP;
2130 }
2131 EXPORT_SYMBOL(sock_no_accept);
2132 
2133 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2134 		    int *len, int peer)
2135 {
2136 	return -EOPNOTSUPP;
2137 }
2138 EXPORT_SYMBOL(sock_no_getname);
2139 
2140 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2141 {
2142 	return 0;
2143 }
2144 EXPORT_SYMBOL(sock_no_poll);
2145 
2146 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2147 {
2148 	return -EOPNOTSUPP;
2149 }
2150 EXPORT_SYMBOL(sock_no_ioctl);
2151 
2152 int sock_no_listen(struct socket *sock, int backlog)
2153 {
2154 	return -EOPNOTSUPP;
2155 }
2156 EXPORT_SYMBOL(sock_no_listen);
2157 
2158 int sock_no_shutdown(struct socket *sock, int how)
2159 {
2160 	return -EOPNOTSUPP;
2161 }
2162 EXPORT_SYMBOL(sock_no_shutdown);
2163 
2164 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2165 		    char __user *optval, unsigned int optlen)
2166 {
2167 	return -EOPNOTSUPP;
2168 }
2169 EXPORT_SYMBOL(sock_no_setsockopt);
2170 
2171 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2172 		    char __user *optval, int __user *optlen)
2173 {
2174 	return -EOPNOTSUPP;
2175 }
2176 EXPORT_SYMBOL(sock_no_getsockopt);
2177 
2178 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2179 		    size_t len)
2180 {
2181 	return -EOPNOTSUPP;
2182 }
2183 EXPORT_SYMBOL(sock_no_sendmsg);
2184 
2185 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2186 		    size_t len, int flags)
2187 {
2188 	return -EOPNOTSUPP;
2189 }
2190 EXPORT_SYMBOL(sock_no_recvmsg);
2191 
2192 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2193 {
2194 	/* Mirror missing mmap method error code */
2195 	return -ENODEV;
2196 }
2197 EXPORT_SYMBOL(sock_no_mmap);
2198 
2199 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2200 {
2201 	ssize_t res;
2202 	struct msghdr msg = {.msg_flags = flags};
2203 	struct kvec iov;
2204 	char *kaddr = kmap(page);
2205 	iov.iov_base = kaddr + offset;
2206 	iov.iov_len = size;
2207 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2208 	kunmap(page);
2209 	return res;
2210 }
2211 EXPORT_SYMBOL(sock_no_sendpage);
2212 
2213 /*
2214  *	Default Socket Callbacks
2215  */
2216 
2217 static void sock_def_wakeup(struct sock *sk)
2218 {
2219 	struct socket_wq *wq;
2220 
2221 	rcu_read_lock();
2222 	wq = rcu_dereference(sk->sk_wq);
2223 	if (wq_has_sleeper(wq))
2224 		wake_up_interruptible_all(&wq->wait);
2225 	rcu_read_unlock();
2226 }
2227 
2228 static void sock_def_error_report(struct sock *sk)
2229 {
2230 	struct socket_wq *wq;
2231 
2232 	rcu_read_lock();
2233 	wq = rcu_dereference(sk->sk_wq);
2234 	if (wq_has_sleeper(wq))
2235 		wake_up_interruptible_poll(&wq->wait, POLLERR);
2236 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2237 	rcu_read_unlock();
2238 }
2239 
2240 static void sock_def_readable(struct sock *sk)
2241 {
2242 	struct socket_wq *wq;
2243 
2244 	rcu_read_lock();
2245 	wq = rcu_dereference(sk->sk_wq);
2246 	if (wq_has_sleeper(wq))
2247 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2248 						POLLRDNORM | POLLRDBAND);
2249 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2250 	rcu_read_unlock();
2251 }
2252 
2253 static void sock_def_write_space(struct sock *sk)
2254 {
2255 	struct socket_wq *wq;
2256 
2257 	rcu_read_lock();
2258 
2259 	/* Do not wake up a writer until he can make "significant"
2260 	 * progress.  --DaveM
2261 	 */
2262 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2263 		wq = rcu_dereference(sk->sk_wq);
2264 		if (wq_has_sleeper(wq))
2265 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2266 						POLLWRNORM | POLLWRBAND);
2267 
2268 		/* Should agree with poll, otherwise some programs break */
2269 		if (sock_writeable(sk))
2270 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2271 	}
2272 
2273 	rcu_read_unlock();
2274 }
2275 
2276 static void sock_def_destruct(struct sock *sk)
2277 {
2278 	kfree(sk->sk_protinfo);
2279 }
2280 
2281 void sk_send_sigurg(struct sock *sk)
2282 {
2283 	if (sk->sk_socket && sk->sk_socket->file)
2284 		if (send_sigurg(&sk->sk_socket->file->f_owner))
2285 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2286 }
2287 EXPORT_SYMBOL(sk_send_sigurg);
2288 
2289 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2290 		    unsigned long expires)
2291 {
2292 	if (!mod_timer(timer, expires))
2293 		sock_hold(sk);
2294 }
2295 EXPORT_SYMBOL(sk_reset_timer);
2296 
2297 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2298 {
2299 	if (del_timer(timer))
2300 		__sock_put(sk);
2301 }
2302 EXPORT_SYMBOL(sk_stop_timer);
2303 
2304 void sock_init_data(struct socket *sock, struct sock *sk)
2305 {
2306 	skb_queue_head_init(&sk->sk_receive_queue);
2307 	skb_queue_head_init(&sk->sk_write_queue);
2308 	skb_queue_head_init(&sk->sk_error_queue);
2309 #ifdef CONFIG_NET_DMA
2310 	skb_queue_head_init(&sk->sk_async_wait_queue);
2311 #endif
2312 
2313 	sk->sk_send_head	=	NULL;
2314 
2315 	init_timer(&sk->sk_timer);
2316 
2317 	sk->sk_allocation	=	GFP_KERNEL;
2318 	sk->sk_rcvbuf		=	sysctl_rmem_default;
2319 	sk->sk_sndbuf		=	sysctl_wmem_default;
2320 	sk->sk_state		=	TCP_CLOSE;
2321 	sk_set_socket(sk, sock);
2322 
2323 	sock_set_flag(sk, SOCK_ZAPPED);
2324 
2325 	if (sock) {
2326 		sk->sk_type	=	sock->type;
2327 		sk->sk_wq	=	sock->wq;
2328 		sock->sk	=	sk;
2329 	} else
2330 		sk->sk_wq	=	NULL;
2331 
2332 	spin_lock_init(&sk->sk_dst_lock);
2333 	rwlock_init(&sk->sk_callback_lock);
2334 	lockdep_set_class_and_name(&sk->sk_callback_lock,
2335 			af_callback_keys + sk->sk_family,
2336 			af_family_clock_key_strings[sk->sk_family]);
2337 
2338 	sk->sk_state_change	=	sock_def_wakeup;
2339 	sk->sk_data_ready	=	sock_def_readable;
2340 	sk->sk_write_space	=	sock_def_write_space;
2341 	sk->sk_error_report	=	sock_def_error_report;
2342 	sk->sk_destruct		=	sock_def_destruct;
2343 
2344 	sk->sk_frag.page	=	NULL;
2345 	sk->sk_frag.offset	=	0;
2346 	sk->sk_peek_off		=	-1;
2347 
2348 	sk->sk_peer_pid 	=	NULL;
2349 	sk->sk_peer_cred	=	NULL;
2350 	sk->sk_write_pending	=	0;
2351 	sk->sk_rcvlowat		=	1;
2352 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2353 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2354 
2355 	sk->sk_stamp = ktime_set(-1L, 0);
2356 
2357 #ifdef CONFIG_NET_RX_BUSY_POLL
2358 	sk->sk_napi_id		=	0;
2359 	sk->sk_ll_usec		=	sysctl_net_busy_read;
2360 #endif
2361 
2362 	sk->sk_max_pacing_rate = ~0U;
2363 	sk->sk_pacing_rate = ~0U;
2364 	/*
2365 	 * Before updating sk_refcnt, we must commit prior changes to memory
2366 	 * (Documentation/RCU/rculist_nulls.txt for details)
2367 	 */
2368 	smp_wmb();
2369 	atomic_set(&sk->sk_refcnt, 1);
2370 	atomic_set(&sk->sk_drops, 0);
2371 }
2372 EXPORT_SYMBOL(sock_init_data);
2373 
2374 void lock_sock_nested(struct sock *sk, int subclass)
2375 {
2376 	might_sleep();
2377 	spin_lock_bh(&sk->sk_lock.slock);
2378 	if (sk->sk_lock.owned)
2379 		__lock_sock(sk);
2380 	sk->sk_lock.owned = 1;
2381 	spin_unlock(&sk->sk_lock.slock);
2382 	/*
2383 	 * The sk_lock has mutex_lock() semantics here:
2384 	 */
2385 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2386 	local_bh_enable();
2387 }
2388 EXPORT_SYMBOL(lock_sock_nested);
2389 
2390 void release_sock(struct sock *sk)
2391 {
2392 	/*
2393 	 * The sk_lock has mutex_unlock() semantics:
2394 	 */
2395 	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2396 
2397 	spin_lock_bh(&sk->sk_lock.slock);
2398 	if (sk->sk_backlog.tail)
2399 		__release_sock(sk);
2400 
2401 	/* Warning : release_cb() might need to release sk ownership,
2402 	 * ie call sock_release_ownership(sk) before us.
2403 	 */
2404 	if (sk->sk_prot->release_cb)
2405 		sk->sk_prot->release_cb(sk);
2406 
2407 	sock_release_ownership(sk);
2408 	if (waitqueue_active(&sk->sk_lock.wq))
2409 		wake_up(&sk->sk_lock.wq);
2410 	spin_unlock_bh(&sk->sk_lock.slock);
2411 }
2412 EXPORT_SYMBOL(release_sock);
2413 
2414 /**
2415  * lock_sock_fast - fast version of lock_sock
2416  * @sk: socket
2417  *
2418  * This version should be used for very small section, where process wont block
2419  * return false if fast path is taken
2420  *   sk_lock.slock locked, owned = 0, BH disabled
2421  * return true if slow path is taken
2422  *   sk_lock.slock unlocked, owned = 1, BH enabled
2423  */
2424 bool lock_sock_fast(struct sock *sk)
2425 {
2426 	might_sleep();
2427 	spin_lock_bh(&sk->sk_lock.slock);
2428 
2429 	if (!sk->sk_lock.owned)
2430 		/*
2431 		 * Note : We must disable BH
2432 		 */
2433 		return false;
2434 
2435 	__lock_sock(sk);
2436 	sk->sk_lock.owned = 1;
2437 	spin_unlock(&sk->sk_lock.slock);
2438 	/*
2439 	 * The sk_lock has mutex_lock() semantics here:
2440 	 */
2441 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2442 	local_bh_enable();
2443 	return true;
2444 }
2445 EXPORT_SYMBOL(lock_sock_fast);
2446 
2447 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2448 {
2449 	struct timeval tv;
2450 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2451 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2452 	tv = ktime_to_timeval(sk->sk_stamp);
2453 	if (tv.tv_sec == -1)
2454 		return -ENOENT;
2455 	if (tv.tv_sec == 0) {
2456 		sk->sk_stamp = ktime_get_real();
2457 		tv = ktime_to_timeval(sk->sk_stamp);
2458 	}
2459 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2460 }
2461 EXPORT_SYMBOL(sock_get_timestamp);
2462 
2463 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2464 {
2465 	struct timespec ts;
2466 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2467 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2468 	ts = ktime_to_timespec(sk->sk_stamp);
2469 	if (ts.tv_sec == -1)
2470 		return -ENOENT;
2471 	if (ts.tv_sec == 0) {
2472 		sk->sk_stamp = ktime_get_real();
2473 		ts = ktime_to_timespec(sk->sk_stamp);
2474 	}
2475 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2476 }
2477 EXPORT_SYMBOL(sock_get_timestampns);
2478 
2479 void sock_enable_timestamp(struct sock *sk, int flag)
2480 {
2481 	if (!sock_flag(sk, flag)) {
2482 		unsigned long previous_flags = sk->sk_flags;
2483 
2484 		sock_set_flag(sk, flag);
2485 		/*
2486 		 * we just set one of the two flags which require net
2487 		 * time stamping, but time stamping might have been on
2488 		 * already because of the other one
2489 		 */
2490 		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2491 			net_enable_timestamp();
2492 	}
2493 }
2494 
2495 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2496 		       int level, int type)
2497 {
2498 	struct sock_exterr_skb *serr;
2499 	struct sk_buff *skb, *skb2;
2500 	int copied, err;
2501 
2502 	err = -EAGAIN;
2503 	skb = skb_dequeue(&sk->sk_error_queue);
2504 	if (skb == NULL)
2505 		goto out;
2506 
2507 	copied = skb->len;
2508 	if (copied > len) {
2509 		msg->msg_flags |= MSG_TRUNC;
2510 		copied = len;
2511 	}
2512 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2513 	if (err)
2514 		goto out_free_skb;
2515 
2516 	sock_recv_timestamp(msg, sk, skb);
2517 
2518 	serr = SKB_EXT_ERR(skb);
2519 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2520 
2521 	msg->msg_flags |= MSG_ERRQUEUE;
2522 	err = copied;
2523 
2524 	/* Reset and regenerate socket error */
2525 	spin_lock_bh(&sk->sk_error_queue.lock);
2526 	sk->sk_err = 0;
2527 	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2528 		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2529 		spin_unlock_bh(&sk->sk_error_queue.lock);
2530 		sk->sk_error_report(sk);
2531 	} else
2532 		spin_unlock_bh(&sk->sk_error_queue.lock);
2533 
2534 out_free_skb:
2535 	kfree_skb(skb);
2536 out:
2537 	return err;
2538 }
2539 EXPORT_SYMBOL(sock_recv_errqueue);
2540 
2541 /*
2542  *	Get a socket option on an socket.
2543  *
2544  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2545  *	asynchronous errors should be reported by getsockopt. We assume
2546  *	this means if you specify SO_ERROR (otherwise whats the point of it).
2547  */
2548 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2549 			   char __user *optval, int __user *optlen)
2550 {
2551 	struct sock *sk = sock->sk;
2552 
2553 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2554 }
2555 EXPORT_SYMBOL(sock_common_getsockopt);
2556 
2557 #ifdef CONFIG_COMPAT
2558 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2559 				  char __user *optval, int __user *optlen)
2560 {
2561 	struct sock *sk = sock->sk;
2562 
2563 	if (sk->sk_prot->compat_getsockopt != NULL)
2564 		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2565 						      optval, optlen);
2566 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2567 }
2568 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2569 #endif
2570 
2571 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2572 			struct msghdr *msg, size_t size, int flags)
2573 {
2574 	struct sock *sk = sock->sk;
2575 	int addr_len = 0;
2576 	int err;
2577 
2578 	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2579 				   flags & ~MSG_DONTWAIT, &addr_len);
2580 	if (err >= 0)
2581 		msg->msg_namelen = addr_len;
2582 	return err;
2583 }
2584 EXPORT_SYMBOL(sock_common_recvmsg);
2585 
2586 /*
2587  *	Set socket options on an inet socket.
2588  */
2589 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2590 			   char __user *optval, unsigned int optlen)
2591 {
2592 	struct sock *sk = sock->sk;
2593 
2594 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2595 }
2596 EXPORT_SYMBOL(sock_common_setsockopt);
2597 
2598 #ifdef CONFIG_COMPAT
2599 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2600 				  char __user *optval, unsigned int optlen)
2601 {
2602 	struct sock *sk = sock->sk;
2603 
2604 	if (sk->sk_prot->compat_setsockopt != NULL)
2605 		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2606 						      optval, optlen);
2607 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2608 }
2609 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2610 #endif
2611 
2612 void sk_common_release(struct sock *sk)
2613 {
2614 	if (sk->sk_prot->destroy)
2615 		sk->sk_prot->destroy(sk);
2616 
2617 	/*
2618 	 * Observation: when sock_common_release is called, processes have
2619 	 * no access to socket. But net still has.
2620 	 * Step one, detach it from networking:
2621 	 *
2622 	 * A. Remove from hash tables.
2623 	 */
2624 
2625 	sk->sk_prot->unhash(sk);
2626 
2627 	/*
2628 	 * In this point socket cannot receive new packets, but it is possible
2629 	 * that some packets are in flight because some CPU runs receiver and
2630 	 * did hash table lookup before we unhashed socket. They will achieve
2631 	 * receive queue and will be purged by socket destructor.
2632 	 *
2633 	 * Also we still have packets pending on receive queue and probably,
2634 	 * our own packets waiting in device queues. sock_destroy will drain
2635 	 * receive queue, but transmitted packets will delay socket destruction
2636 	 * until the last reference will be released.
2637 	 */
2638 
2639 	sock_orphan(sk);
2640 
2641 	xfrm_sk_free_policy(sk);
2642 
2643 	sk_refcnt_debug_release(sk);
2644 
2645 	if (sk->sk_frag.page) {
2646 		put_page(sk->sk_frag.page);
2647 		sk->sk_frag.page = NULL;
2648 	}
2649 
2650 	sock_put(sk);
2651 }
2652 EXPORT_SYMBOL(sk_common_release);
2653 
2654 #ifdef CONFIG_PROC_FS
2655 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
2656 struct prot_inuse {
2657 	int val[PROTO_INUSE_NR];
2658 };
2659 
2660 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2661 
2662 #ifdef CONFIG_NET_NS
2663 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2664 {
2665 	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2666 }
2667 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2668 
2669 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2670 {
2671 	int cpu, idx = prot->inuse_idx;
2672 	int res = 0;
2673 
2674 	for_each_possible_cpu(cpu)
2675 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2676 
2677 	return res >= 0 ? res : 0;
2678 }
2679 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2680 
2681 static int __net_init sock_inuse_init_net(struct net *net)
2682 {
2683 	net->core.inuse = alloc_percpu(struct prot_inuse);
2684 	return net->core.inuse ? 0 : -ENOMEM;
2685 }
2686 
2687 static void __net_exit sock_inuse_exit_net(struct net *net)
2688 {
2689 	free_percpu(net->core.inuse);
2690 }
2691 
2692 static struct pernet_operations net_inuse_ops = {
2693 	.init = sock_inuse_init_net,
2694 	.exit = sock_inuse_exit_net,
2695 };
2696 
2697 static __init int net_inuse_init(void)
2698 {
2699 	if (register_pernet_subsys(&net_inuse_ops))
2700 		panic("Cannot initialize net inuse counters");
2701 
2702 	return 0;
2703 }
2704 
2705 core_initcall(net_inuse_init);
2706 #else
2707 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2708 
2709 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2710 {
2711 	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2712 }
2713 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2714 
2715 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2716 {
2717 	int cpu, idx = prot->inuse_idx;
2718 	int res = 0;
2719 
2720 	for_each_possible_cpu(cpu)
2721 		res += per_cpu(prot_inuse, cpu).val[idx];
2722 
2723 	return res >= 0 ? res : 0;
2724 }
2725 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2726 #endif
2727 
2728 static void assign_proto_idx(struct proto *prot)
2729 {
2730 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2731 
2732 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2733 		pr_err("PROTO_INUSE_NR exhausted\n");
2734 		return;
2735 	}
2736 
2737 	set_bit(prot->inuse_idx, proto_inuse_idx);
2738 }
2739 
2740 static void release_proto_idx(struct proto *prot)
2741 {
2742 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2743 		clear_bit(prot->inuse_idx, proto_inuse_idx);
2744 }
2745 #else
2746 static inline void assign_proto_idx(struct proto *prot)
2747 {
2748 }
2749 
2750 static inline void release_proto_idx(struct proto *prot)
2751 {
2752 }
2753 #endif
2754 
2755 int proto_register(struct proto *prot, int alloc_slab)
2756 {
2757 	if (alloc_slab) {
2758 		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2759 					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2760 					NULL);
2761 
2762 		if (prot->slab == NULL) {
2763 			pr_crit("%s: Can't create sock SLAB cache!\n",
2764 				prot->name);
2765 			goto out;
2766 		}
2767 
2768 		if (prot->rsk_prot != NULL) {
2769 			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2770 			if (prot->rsk_prot->slab_name == NULL)
2771 				goto out_free_sock_slab;
2772 
2773 			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2774 								 prot->rsk_prot->obj_size, 0,
2775 								 SLAB_HWCACHE_ALIGN, NULL);
2776 
2777 			if (prot->rsk_prot->slab == NULL) {
2778 				pr_crit("%s: Can't create request sock SLAB cache!\n",
2779 					prot->name);
2780 				goto out_free_request_sock_slab_name;
2781 			}
2782 		}
2783 
2784 		if (prot->twsk_prot != NULL) {
2785 			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2786 
2787 			if (prot->twsk_prot->twsk_slab_name == NULL)
2788 				goto out_free_request_sock_slab;
2789 
2790 			prot->twsk_prot->twsk_slab =
2791 				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2792 						  prot->twsk_prot->twsk_obj_size,
2793 						  0,
2794 						  SLAB_HWCACHE_ALIGN |
2795 							prot->slab_flags,
2796 						  NULL);
2797 			if (prot->twsk_prot->twsk_slab == NULL)
2798 				goto out_free_timewait_sock_slab_name;
2799 		}
2800 	}
2801 
2802 	mutex_lock(&proto_list_mutex);
2803 	list_add(&prot->node, &proto_list);
2804 	assign_proto_idx(prot);
2805 	mutex_unlock(&proto_list_mutex);
2806 	return 0;
2807 
2808 out_free_timewait_sock_slab_name:
2809 	kfree(prot->twsk_prot->twsk_slab_name);
2810 out_free_request_sock_slab:
2811 	if (prot->rsk_prot && prot->rsk_prot->slab) {
2812 		kmem_cache_destroy(prot->rsk_prot->slab);
2813 		prot->rsk_prot->slab = NULL;
2814 	}
2815 out_free_request_sock_slab_name:
2816 	if (prot->rsk_prot)
2817 		kfree(prot->rsk_prot->slab_name);
2818 out_free_sock_slab:
2819 	kmem_cache_destroy(prot->slab);
2820 	prot->slab = NULL;
2821 out:
2822 	return -ENOBUFS;
2823 }
2824 EXPORT_SYMBOL(proto_register);
2825 
2826 void proto_unregister(struct proto *prot)
2827 {
2828 	mutex_lock(&proto_list_mutex);
2829 	release_proto_idx(prot);
2830 	list_del(&prot->node);
2831 	mutex_unlock(&proto_list_mutex);
2832 
2833 	if (prot->slab != NULL) {
2834 		kmem_cache_destroy(prot->slab);
2835 		prot->slab = NULL;
2836 	}
2837 
2838 	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2839 		kmem_cache_destroy(prot->rsk_prot->slab);
2840 		kfree(prot->rsk_prot->slab_name);
2841 		prot->rsk_prot->slab = NULL;
2842 	}
2843 
2844 	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2845 		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2846 		kfree(prot->twsk_prot->twsk_slab_name);
2847 		prot->twsk_prot->twsk_slab = NULL;
2848 	}
2849 }
2850 EXPORT_SYMBOL(proto_unregister);
2851 
2852 #ifdef CONFIG_PROC_FS
2853 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2854 	__acquires(proto_list_mutex)
2855 {
2856 	mutex_lock(&proto_list_mutex);
2857 	return seq_list_start_head(&proto_list, *pos);
2858 }
2859 
2860 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2861 {
2862 	return seq_list_next(v, &proto_list, pos);
2863 }
2864 
2865 static void proto_seq_stop(struct seq_file *seq, void *v)
2866 	__releases(proto_list_mutex)
2867 {
2868 	mutex_unlock(&proto_list_mutex);
2869 }
2870 
2871 static char proto_method_implemented(const void *method)
2872 {
2873 	return method == NULL ? 'n' : 'y';
2874 }
2875 static long sock_prot_memory_allocated(struct proto *proto)
2876 {
2877 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2878 }
2879 
2880 static char *sock_prot_memory_pressure(struct proto *proto)
2881 {
2882 	return proto->memory_pressure != NULL ?
2883 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2884 }
2885 
2886 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2887 {
2888 
2889 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2890 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2891 		   proto->name,
2892 		   proto->obj_size,
2893 		   sock_prot_inuse_get(seq_file_net(seq), proto),
2894 		   sock_prot_memory_allocated(proto),
2895 		   sock_prot_memory_pressure(proto),
2896 		   proto->max_header,
2897 		   proto->slab == NULL ? "no" : "yes",
2898 		   module_name(proto->owner),
2899 		   proto_method_implemented(proto->close),
2900 		   proto_method_implemented(proto->connect),
2901 		   proto_method_implemented(proto->disconnect),
2902 		   proto_method_implemented(proto->accept),
2903 		   proto_method_implemented(proto->ioctl),
2904 		   proto_method_implemented(proto->init),
2905 		   proto_method_implemented(proto->destroy),
2906 		   proto_method_implemented(proto->shutdown),
2907 		   proto_method_implemented(proto->setsockopt),
2908 		   proto_method_implemented(proto->getsockopt),
2909 		   proto_method_implemented(proto->sendmsg),
2910 		   proto_method_implemented(proto->recvmsg),
2911 		   proto_method_implemented(proto->sendpage),
2912 		   proto_method_implemented(proto->bind),
2913 		   proto_method_implemented(proto->backlog_rcv),
2914 		   proto_method_implemented(proto->hash),
2915 		   proto_method_implemented(proto->unhash),
2916 		   proto_method_implemented(proto->get_port),
2917 		   proto_method_implemented(proto->enter_memory_pressure));
2918 }
2919 
2920 static int proto_seq_show(struct seq_file *seq, void *v)
2921 {
2922 	if (v == &proto_list)
2923 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2924 			   "protocol",
2925 			   "size",
2926 			   "sockets",
2927 			   "memory",
2928 			   "press",
2929 			   "maxhdr",
2930 			   "slab",
2931 			   "module",
2932 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2933 	else
2934 		proto_seq_printf(seq, list_entry(v, struct proto, node));
2935 	return 0;
2936 }
2937 
2938 static const struct seq_operations proto_seq_ops = {
2939 	.start  = proto_seq_start,
2940 	.next   = proto_seq_next,
2941 	.stop   = proto_seq_stop,
2942 	.show   = proto_seq_show,
2943 };
2944 
2945 static int proto_seq_open(struct inode *inode, struct file *file)
2946 {
2947 	return seq_open_net(inode, file, &proto_seq_ops,
2948 			    sizeof(struct seq_net_private));
2949 }
2950 
2951 static const struct file_operations proto_seq_fops = {
2952 	.owner		= THIS_MODULE,
2953 	.open		= proto_seq_open,
2954 	.read		= seq_read,
2955 	.llseek		= seq_lseek,
2956 	.release	= seq_release_net,
2957 };
2958 
2959 static __net_init int proto_init_net(struct net *net)
2960 {
2961 	if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2962 		return -ENOMEM;
2963 
2964 	return 0;
2965 }
2966 
2967 static __net_exit void proto_exit_net(struct net *net)
2968 {
2969 	remove_proc_entry("protocols", net->proc_net);
2970 }
2971 
2972 
2973 static __net_initdata struct pernet_operations proto_net_ops = {
2974 	.init = proto_init_net,
2975 	.exit = proto_exit_net,
2976 };
2977 
2978 static int __init proto_init(void)
2979 {
2980 	return register_pernet_subsys(&proto_net_ops);
2981 }
2982 
2983 subsys_initcall(proto_init);
2984 
2985 #endif /* PROC_FS */
2986