xref: /openbmc/linux/net/ipv4/af_inet.c (revision e0f6d1a5)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		PF_INET protocol family socket handler.
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Florian La Roche, <flla@stud.uni-sb.de>
11  *		Alan Cox, <A.Cox@swansea.ac.uk>
12  *
13  * Changes (see also sock.c)
14  *
15  *		piggy,
16  *		Karl Knutson	:	Socket protocol table
17  *		A.N.Kuznetsov	:	Socket death error in accept().
18  *		John Richardson :	Fix non blocking error in connect()
19  *					so sockets that fail to connect
20  *					don't return -EINPROGRESS.
21  *		Alan Cox	:	Asynchronous I/O support
22  *		Alan Cox	:	Keep correct socket pointer on sock
23  *					structures
24  *					when accept() ed
25  *		Alan Cox	:	Semantics of SO_LINGER aren't state
26  *					moved to close when you look carefully.
27  *					With this fixed and the accept bug fixed
28  *					some RPC stuff seems happier.
29  *		Niibe Yutaka	:	4.4BSD style write async I/O
30  *		Alan Cox,
31  *		Tony Gale 	:	Fixed reuse semantics.
32  *		Alan Cox	:	bind() shouldn't abort existing but dead
33  *					sockets. Stops FTP netin:.. I hope.
34  *		Alan Cox	:	bind() works correctly for RAW sockets.
35  *					Note that FreeBSD at least was broken
36  *					in this respect so be careful with
37  *					compatibility tests...
38  *		Alan Cox	:	routing cache support
39  *		Alan Cox	:	memzero the socket structure for
40  *					compactness.
41  *		Matt Day	:	nonblock connect error handler
42  *		Alan Cox	:	Allow large numbers of pending sockets
43  *					(eg for big web sites), but only if
44  *					specifically application requested.
45  *		Alan Cox	:	New buffering throughout IP. Used
46  *					dumbly.
47  *		Alan Cox	:	New buffering now used smartly.
48  *		Alan Cox	:	BSD rather than common sense
49  *					interpretation of listen.
50  *		Germano Caronni	:	Assorted small races.
51  *		Alan Cox	:	sendmsg/recvmsg basic support.
52  *		Alan Cox	:	Only sendmsg/recvmsg now supported.
53  *		Alan Cox	:	Locked down bind (see security list).
54  *		Alan Cox	:	Loosened bind a little.
55  *		Mike McLagan	:	ADD/DEL DLCI Ioctls
56  *	Willy Konynenberg	:	Transparent proxying support.
57  *		David S. Miller	:	New socket lookup architecture.
58  *					Some other random speedups.
59  *		Cyrus Durgin	:	Cleaned up file for kmod hacks.
60  *		Andi Kleen	:	Fix inet_stream_connect TCP race.
61  *
62  *		This program is free software; you can redistribute it and/or
63  *		modify it under the terms of the GNU General Public License
64  *		as published by the Free Software Foundation; either version
65  *		2 of the License, or (at your option) any later version.
66  */
67 
68 #define pr_fmt(fmt) "IPv4: " fmt
69 
70 #include <linux/err.h>
71 #include <linux/errno.h>
72 #include <linux/types.h>
73 #include <linux/socket.h>
74 #include <linux/in.h>
75 #include <linux/kernel.h>
76 #include <linux/kmod.h>
77 #include <linux/sched.h>
78 #include <linux/timer.h>
79 #include <linux/string.h>
80 #include <linux/sockios.h>
81 #include <linux/net.h>
82 #include <linux/capability.h>
83 #include <linux/fcntl.h>
84 #include <linux/mm.h>
85 #include <linux/interrupt.h>
86 #include <linux/stat.h>
87 #include <linux/init.h>
88 #include <linux/poll.h>
89 #include <linux/netfilter_ipv4.h>
90 #include <linux/random.h>
91 #include <linux/slab.h>
92 
93 #include <linux/uaccess.h>
94 
95 #include <linux/inet.h>
96 #include <linux/igmp.h>
97 #include <linux/inetdevice.h>
98 #include <linux/netdevice.h>
99 #include <net/checksum.h>
100 #include <net/ip.h>
101 #include <net/protocol.h>
102 #include <net/arp.h>
103 #include <net/route.h>
104 #include <net/ip_fib.h>
105 #include <net/inet_connection_sock.h>
106 #include <net/tcp.h>
107 #include <net/udp.h>
108 #include <net/udplite.h>
109 #include <net/ping.h>
110 #include <linux/skbuff.h>
111 #include <net/sock.h>
112 #include <net/raw.h>
113 #include <net/icmp.h>
114 #include <net/inet_common.h>
115 #include <net/ip_tunnels.h>
116 #include <net/xfrm.h>
117 #include <net/net_namespace.h>
118 #include <net/secure_seq.h>
119 #ifdef CONFIG_IP_MROUTE
120 #include <linux/mroute.h>
121 #endif
122 #include <net/l3mdev.h>
123 
124 #include <trace/events/sock.h>
125 
126 /* The inetsw table contains everything that inet_create needs to
127  * build a new socket.
128  */
129 static struct list_head inetsw[SOCK_MAX];
130 static DEFINE_SPINLOCK(inetsw_lock);
131 
132 /* New destruction routine */
133 
134 void inet_sock_destruct(struct sock *sk)
135 {
136 	struct inet_sock *inet = inet_sk(sk);
137 
138 	__skb_queue_purge(&sk->sk_receive_queue);
139 	__skb_queue_purge(&sk->sk_error_queue);
140 
141 	sk_mem_reclaim(sk);
142 
143 	if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
144 		pr_err("Attempt to release TCP socket in state %d %p\n",
145 		       sk->sk_state, sk);
146 		return;
147 	}
148 	if (!sock_flag(sk, SOCK_DEAD)) {
149 		pr_err("Attempt to release alive inet socket %p\n", sk);
150 		return;
151 	}
152 
153 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
154 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
155 	WARN_ON(sk->sk_wmem_queued);
156 	WARN_ON(sk->sk_forward_alloc);
157 
158 	kfree(rcu_dereference_protected(inet->inet_opt, 1));
159 	dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
160 	dst_release(sk->sk_rx_dst);
161 	sk_refcnt_debug_dec(sk);
162 }
163 EXPORT_SYMBOL(inet_sock_destruct);
164 
165 /*
166  *	The routines beyond this point handle the behaviour of an AF_INET
167  *	socket object. Mostly it punts to the subprotocols of IP to do
168  *	the work.
169  */
170 
171 /*
172  *	Automatically bind an unbound socket.
173  */
174 
175 static int inet_autobind(struct sock *sk)
176 {
177 	struct inet_sock *inet;
178 	/* We may need to bind the socket. */
179 	lock_sock(sk);
180 	inet = inet_sk(sk);
181 	if (!inet->inet_num) {
182 		if (sk->sk_prot->get_port(sk, 0)) {
183 			release_sock(sk);
184 			return -EAGAIN;
185 		}
186 		inet->inet_sport = htons(inet->inet_num);
187 	}
188 	release_sock(sk);
189 	return 0;
190 }
191 
192 /*
193  *	Move a socket into listening state.
194  */
195 int inet_listen(struct socket *sock, int backlog)
196 {
197 	struct sock *sk = sock->sk;
198 	unsigned char old_state;
199 	int err, tcp_fastopen;
200 
201 	lock_sock(sk);
202 
203 	err = -EINVAL;
204 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
205 		goto out;
206 
207 	old_state = sk->sk_state;
208 	if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
209 		goto out;
210 
211 	/* Really, if the socket is already in listen state
212 	 * we can only allow the backlog to be adjusted.
213 	 */
214 	if (old_state != TCP_LISTEN) {
215 		/* Enable TFO w/o requiring TCP_FASTOPEN socket option.
216 		 * Note that only TCP sockets (SOCK_STREAM) will reach here.
217 		 * Also fastopen backlog may already been set via the option
218 		 * because the socket was in TCP_LISTEN state previously but
219 		 * was shutdown() rather than close().
220 		 */
221 		tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
222 		if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
223 		    (tcp_fastopen & TFO_SERVER_ENABLE) &&
224 		    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
225 			fastopen_queue_tune(sk, backlog);
226 			tcp_fastopen_init_key_once(sock_net(sk));
227 		}
228 
229 		err = inet_csk_listen_start(sk, backlog);
230 		if (err)
231 			goto out;
232 	}
233 	sk->sk_max_ack_backlog = backlog;
234 	err = 0;
235 
236 out:
237 	release_sock(sk);
238 	return err;
239 }
240 EXPORT_SYMBOL(inet_listen);
241 
242 /*
243  *	Create an inet socket.
244  */
245 
246 static int inet_create(struct net *net, struct socket *sock, int protocol,
247 		       int kern)
248 {
249 	struct sock *sk;
250 	struct inet_protosw *answer;
251 	struct inet_sock *inet;
252 	struct proto *answer_prot;
253 	unsigned char answer_flags;
254 	int try_loading_module = 0;
255 	int err;
256 
257 	if (protocol < 0 || protocol >= IPPROTO_MAX)
258 		return -EINVAL;
259 
260 	sock->state = SS_UNCONNECTED;
261 
262 	/* Look for the requested type/protocol pair. */
263 lookup_protocol:
264 	err = -ESOCKTNOSUPPORT;
265 	rcu_read_lock();
266 	list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
267 
268 		err = 0;
269 		/* Check the non-wild match. */
270 		if (protocol == answer->protocol) {
271 			if (protocol != IPPROTO_IP)
272 				break;
273 		} else {
274 			/* Check for the two wild cases. */
275 			if (IPPROTO_IP == protocol) {
276 				protocol = answer->protocol;
277 				break;
278 			}
279 			if (IPPROTO_IP == answer->protocol)
280 				break;
281 		}
282 		err = -EPROTONOSUPPORT;
283 	}
284 
285 	if (unlikely(err)) {
286 		if (try_loading_module < 2) {
287 			rcu_read_unlock();
288 			/*
289 			 * Be more specific, e.g. net-pf-2-proto-132-type-1
290 			 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
291 			 */
292 			if (++try_loading_module == 1)
293 				request_module("net-pf-%d-proto-%d-type-%d",
294 					       PF_INET, protocol, sock->type);
295 			/*
296 			 * Fall back to generic, e.g. net-pf-2-proto-132
297 			 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
298 			 */
299 			else
300 				request_module("net-pf-%d-proto-%d",
301 					       PF_INET, protocol);
302 			goto lookup_protocol;
303 		} else
304 			goto out_rcu_unlock;
305 	}
306 
307 	err = -EPERM;
308 	if (sock->type == SOCK_RAW && !kern &&
309 	    !ns_capable(net->user_ns, CAP_NET_RAW))
310 		goto out_rcu_unlock;
311 
312 	sock->ops = answer->ops;
313 	answer_prot = answer->prot;
314 	answer_flags = answer->flags;
315 	rcu_read_unlock();
316 
317 	WARN_ON(!answer_prot->slab);
318 
319 	err = -ENOBUFS;
320 	sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
321 	if (!sk)
322 		goto out;
323 
324 	err = 0;
325 	if (INET_PROTOSW_REUSE & answer_flags)
326 		sk->sk_reuse = SK_CAN_REUSE;
327 
328 	inet = inet_sk(sk);
329 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
330 
331 	inet->nodefrag = 0;
332 
333 	if (SOCK_RAW == sock->type) {
334 		inet->inet_num = protocol;
335 		if (IPPROTO_RAW == protocol)
336 			inet->hdrincl = 1;
337 	}
338 
339 	if (net->ipv4.sysctl_ip_no_pmtu_disc)
340 		inet->pmtudisc = IP_PMTUDISC_DONT;
341 	else
342 		inet->pmtudisc = IP_PMTUDISC_WANT;
343 
344 	inet->inet_id = 0;
345 
346 	sock_init_data(sock, sk);
347 
348 	sk->sk_destruct	   = inet_sock_destruct;
349 	sk->sk_protocol	   = protocol;
350 	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
351 
352 	inet->uc_ttl	= -1;
353 	inet->mc_loop	= 1;
354 	inet->mc_ttl	= 1;
355 	inet->mc_all	= 1;
356 	inet->mc_index	= 0;
357 	inet->mc_list	= NULL;
358 	inet->rcv_tos	= 0;
359 
360 	sk_refcnt_debug_inc(sk);
361 
362 	if (inet->inet_num) {
363 		/* It assumes that any protocol which allows
364 		 * the user to assign a number at socket
365 		 * creation time automatically
366 		 * shares.
367 		 */
368 		inet->inet_sport = htons(inet->inet_num);
369 		/* Add to protocol hash chains. */
370 		err = sk->sk_prot->hash(sk);
371 		if (err) {
372 			sk_common_release(sk);
373 			goto out;
374 		}
375 	}
376 
377 	if (sk->sk_prot->init) {
378 		err = sk->sk_prot->init(sk);
379 		if (err) {
380 			sk_common_release(sk);
381 			goto out;
382 		}
383 	}
384 
385 	if (!kern) {
386 		err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
387 		if (err) {
388 			sk_common_release(sk);
389 			goto out;
390 		}
391 	}
392 out:
393 	return err;
394 out_rcu_unlock:
395 	rcu_read_unlock();
396 	goto out;
397 }
398 
399 
400 /*
401  *	The peer socket should always be NULL (or else). When we call this
402  *	function we are destroying the object and from then on nobody
403  *	should refer to it.
404  */
405 int inet_release(struct socket *sock)
406 {
407 	struct sock *sk = sock->sk;
408 
409 	if (sk) {
410 		long timeout;
411 
412 		/* Applications forget to leave groups before exiting */
413 		ip_mc_drop_socket(sk);
414 
415 		/* If linger is set, we don't return until the close
416 		 * is complete.  Otherwise we return immediately. The
417 		 * actually closing is done the same either way.
418 		 *
419 		 * If the close is due to the process exiting, we never
420 		 * linger..
421 		 */
422 		timeout = 0;
423 		if (sock_flag(sk, SOCK_LINGER) &&
424 		    !(current->flags & PF_EXITING))
425 			timeout = sk->sk_lingertime;
426 		sock->sk = NULL;
427 		sk->sk_prot->close(sk, timeout);
428 	}
429 	return 0;
430 }
431 EXPORT_SYMBOL(inet_release);
432 
433 int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
434 {
435 	struct sock *sk = sock->sk;
436 	int err;
437 
438 	/* If the socket has its own bind function then use it. (RAW) */
439 	if (sk->sk_prot->bind) {
440 		return sk->sk_prot->bind(sk, uaddr, addr_len);
441 	}
442 	if (addr_len < sizeof(struct sockaddr_in))
443 		return -EINVAL;
444 
445 	/* BPF prog is run before any checks are done so that if the prog
446 	 * changes context in a wrong way it will be caught.
447 	 */
448 	err = BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr);
449 	if (err)
450 		return err;
451 
452 	return __inet_bind(sk, uaddr, addr_len, false, true);
453 }
454 EXPORT_SYMBOL(inet_bind);
455 
456 int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
457 		bool force_bind_address_no_port, bool with_lock)
458 {
459 	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
460 	struct inet_sock *inet = inet_sk(sk);
461 	struct net *net = sock_net(sk);
462 	unsigned short snum;
463 	int chk_addr_ret;
464 	u32 tb_id = RT_TABLE_LOCAL;
465 	int err;
466 
467 	if (addr->sin_family != AF_INET) {
468 		/* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
469 		 * only if s_addr is INADDR_ANY.
470 		 */
471 		err = -EAFNOSUPPORT;
472 		if (addr->sin_family != AF_UNSPEC ||
473 		    addr->sin_addr.s_addr != htonl(INADDR_ANY))
474 			goto out;
475 	}
476 
477 	tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
478 	chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
479 
480 	/* Not specified by any standard per-se, however it breaks too
481 	 * many applications when removed.  It is unfortunate since
482 	 * allowing applications to make a non-local bind solves
483 	 * several problems with systems using dynamic addressing.
484 	 * (ie. your servers still start up even if your ISDN link
485 	 *  is temporarily down)
486 	 */
487 	err = -EADDRNOTAVAIL;
488 	if (!net->ipv4.sysctl_ip_nonlocal_bind &&
489 	    !(inet->freebind || inet->transparent) &&
490 	    addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
491 	    chk_addr_ret != RTN_LOCAL &&
492 	    chk_addr_ret != RTN_MULTICAST &&
493 	    chk_addr_ret != RTN_BROADCAST)
494 		goto out;
495 
496 	snum = ntohs(addr->sin_port);
497 	err = -EACCES;
498 	if (snum && snum < inet_prot_sock(net) &&
499 	    !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
500 		goto out;
501 
502 	/*      We keep a pair of addresses. rcv_saddr is the one
503 	 *      used by hash lookups, and saddr is used for transmit.
504 	 *
505 	 *      In the BSD API these are the same except where it
506 	 *      would be illegal to use them (multicast/broadcast) in
507 	 *      which case the sending device address is used.
508 	 */
509 	if (with_lock)
510 		lock_sock(sk);
511 
512 	/* Check these errors (active socket, double bind). */
513 	err = -EINVAL;
514 	if (sk->sk_state != TCP_CLOSE || inet->inet_num)
515 		goto out_release_sock;
516 
517 	inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
518 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
519 		inet->inet_saddr = 0;  /* Use device */
520 
521 	/* Make sure we are allowed to bind here. */
522 	if (snum || !(inet->bind_address_no_port ||
523 		      force_bind_address_no_port)) {
524 		if (sk->sk_prot->get_port(sk, snum)) {
525 			inet->inet_saddr = inet->inet_rcv_saddr = 0;
526 			err = -EADDRINUSE;
527 			goto out_release_sock;
528 		}
529 		err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
530 		if (err) {
531 			inet->inet_saddr = inet->inet_rcv_saddr = 0;
532 			goto out_release_sock;
533 		}
534 	}
535 
536 	if (inet->inet_rcv_saddr)
537 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
538 	if (snum)
539 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
540 	inet->inet_sport = htons(inet->inet_num);
541 	inet->inet_daddr = 0;
542 	inet->inet_dport = 0;
543 	sk_dst_reset(sk);
544 	err = 0;
545 out_release_sock:
546 	if (with_lock)
547 		release_sock(sk);
548 out:
549 	return err;
550 }
551 
552 int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
553 		       int addr_len, int flags)
554 {
555 	struct sock *sk = sock->sk;
556 	int err;
557 
558 	if (addr_len < sizeof(uaddr->sa_family))
559 		return -EINVAL;
560 	if (uaddr->sa_family == AF_UNSPEC)
561 		return sk->sk_prot->disconnect(sk, flags);
562 
563 	if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
564 		err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
565 		if (err)
566 			return err;
567 	}
568 
569 	if (!inet_sk(sk)->inet_num && inet_autobind(sk))
570 		return -EAGAIN;
571 	return sk->sk_prot->connect(sk, uaddr, addr_len);
572 }
573 EXPORT_SYMBOL(inet_dgram_connect);
574 
575 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
576 {
577 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
578 
579 	add_wait_queue(sk_sleep(sk), &wait);
580 	sk->sk_write_pending += writebias;
581 
582 	/* Basic assumption: if someone sets sk->sk_err, he _must_
583 	 * change state of the socket from TCP_SYN_*.
584 	 * Connect() does not allow to get error notifications
585 	 * without closing the socket.
586 	 */
587 	while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
588 		release_sock(sk);
589 		timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
590 		lock_sock(sk);
591 		if (signal_pending(current) || !timeo)
592 			break;
593 	}
594 	remove_wait_queue(sk_sleep(sk), &wait);
595 	sk->sk_write_pending -= writebias;
596 	return timeo;
597 }
598 
599 /*
600  *	Connect to a remote host. There is regrettably still a little
601  *	TCP 'magic' in here.
602  */
603 int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
604 			  int addr_len, int flags, int is_sendmsg)
605 {
606 	struct sock *sk = sock->sk;
607 	int err;
608 	long timeo;
609 
610 	/*
611 	 * uaddr can be NULL and addr_len can be 0 if:
612 	 * sk is a TCP fastopen active socket and
613 	 * TCP_FASTOPEN_CONNECT sockopt is set and
614 	 * we already have a valid cookie for this socket.
615 	 * In this case, user can call write() after connect().
616 	 * write() will invoke tcp_sendmsg_fastopen() which calls
617 	 * __inet_stream_connect().
618 	 */
619 	if (uaddr) {
620 		if (addr_len < sizeof(uaddr->sa_family))
621 			return -EINVAL;
622 
623 		if (uaddr->sa_family == AF_UNSPEC) {
624 			err = sk->sk_prot->disconnect(sk, flags);
625 			sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
626 			goto out;
627 		}
628 	}
629 
630 	switch (sock->state) {
631 	default:
632 		err = -EINVAL;
633 		goto out;
634 	case SS_CONNECTED:
635 		err = -EISCONN;
636 		goto out;
637 	case SS_CONNECTING:
638 		if (inet_sk(sk)->defer_connect)
639 			err = is_sendmsg ? -EINPROGRESS : -EISCONN;
640 		else
641 			err = -EALREADY;
642 		/* Fall out of switch with err, set for this state */
643 		break;
644 	case SS_UNCONNECTED:
645 		err = -EISCONN;
646 		if (sk->sk_state != TCP_CLOSE)
647 			goto out;
648 
649 		if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
650 			err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
651 			if (err)
652 				goto out;
653 		}
654 
655 		err = sk->sk_prot->connect(sk, uaddr, addr_len);
656 		if (err < 0)
657 			goto out;
658 
659 		sock->state = SS_CONNECTING;
660 
661 		if (!err && inet_sk(sk)->defer_connect)
662 			goto out;
663 
664 		/* Just entered SS_CONNECTING state; the only
665 		 * difference is that return value in non-blocking
666 		 * case is EINPROGRESS, rather than EALREADY.
667 		 */
668 		err = -EINPROGRESS;
669 		break;
670 	}
671 
672 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
673 
674 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
675 		int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
676 				tcp_sk(sk)->fastopen_req &&
677 				tcp_sk(sk)->fastopen_req->data ? 1 : 0;
678 
679 		/* Error code is set above */
680 		if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
681 			goto out;
682 
683 		err = sock_intr_errno(timeo);
684 		if (signal_pending(current))
685 			goto out;
686 	}
687 
688 	/* Connection was closed by RST, timeout, ICMP error
689 	 * or another process disconnected us.
690 	 */
691 	if (sk->sk_state == TCP_CLOSE)
692 		goto sock_error;
693 
694 	/* sk->sk_err may be not zero now, if RECVERR was ordered by user
695 	 * and error was received after socket entered established state.
696 	 * Hence, it is handled normally after connect() return successfully.
697 	 */
698 
699 	sock->state = SS_CONNECTED;
700 	err = 0;
701 out:
702 	return err;
703 
704 sock_error:
705 	err = sock_error(sk) ? : -ECONNABORTED;
706 	sock->state = SS_UNCONNECTED;
707 	if (sk->sk_prot->disconnect(sk, flags))
708 		sock->state = SS_DISCONNECTING;
709 	goto out;
710 }
711 EXPORT_SYMBOL(__inet_stream_connect);
712 
713 int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
714 			int addr_len, int flags)
715 {
716 	int err;
717 
718 	lock_sock(sock->sk);
719 	err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
720 	release_sock(sock->sk);
721 	return err;
722 }
723 EXPORT_SYMBOL(inet_stream_connect);
724 
725 /*
726  *	Accept a pending connection. The TCP layer now gives BSD semantics.
727  */
728 
729 int inet_accept(struct socket *sock, struct socket *newsock, int flags,
730 		bool kern)
731 {
732 	struct sock *sk1 = sock->sk;
733 	int err = -EINVAL;
734 	struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
735 
736 	if (!sk2)
737 		goto do_err;
738 
739 	lock_sock(sk2);
740 
741 	sock_rps_record_flow(sk2);
742 	WARN_ON(!((1 << sk2->sk_state) &
743 		  (TCPF_ESTABLISHED | TCPF_SYN_RECV |
744 		  TCPF_CLOSE_WAIT | TCPF_CLOSE)));
745 
746 	sock_graft(sk2, newsock);
747 
748 	newsock->state = SS_CONNECTED;
749 	err = 0;
750 	release_sock(sk2);
751 do_err:
752 	return err;
753 }
754 EXPORT_SYMBOL(inet_accept);
755 
756 
757 /*
758  *	This does both peername and sockname.
759  */
760 int inet_getname(struct socket *sock, struct sockaddr *uaddr,
761 			int peer)
762 {
763 	struct sock *sk		= sock->sk;
764 	struct inet_sock *inet	= inet_sk(sk);
765 	DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
766 
767 	sin->sin_family = AF_INET;
768 	if (peer) {
769 		if (!inet->inet_dport ||
770 		    (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
771 		     peer == 1))
772 			return -ENOTCONN;
773 		sin->sin_port = inet->inet_dport;
774 		sin->sin_addr.s_addr = inet->inet_daddr;
775 	} else {
776 		__be32 addr = inet->inet_rcv_saddr;
777 		if (!addr)
778 			addr = inet->inet_saddr;
779 		sin->sin_port = inet->inet_sport;
780 		sin->sin_addr.s_addr = addr;
781 	}
782 	memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
783 	return sizeof(*sin);
784 }
785 EXPORT_SYMBOL(inet_getname);
786 
787 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
788 {
789 	struct sock *sk = sock->sk;
790 
791 	sock_rps_record_flow(sk);
792 
793 	/* We may need to bind the socket. */
794 	if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
795 	    inet_autobind(sk))
796 		return -EAGAIN;
797 
798 	return sk->sk_prot->sendmsg(sk, msg, size);
799 }
800 EXPORT_SYMBOL(inet_sendmsg);
801 
802 ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
803 		      size_t size, int flags)
804 {
805 	struct sock *sk = sock->sk;
806 
807 	sock_rps_record_flow(sk);
808 
809 	/* We may need to bind the socket. */
810 	if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
811 	    inet_autobind(sk))
812 		return -EAGAIN;
813 
814 	if (sk->sk_prot->sendpage)
815 		return sk->sk_prot->sendpage(sk, page, offset, size, flags);
816 	return sock_no_sendpage(sock, page, offset, size, flags);
817 }
818 EXPORT_SYMBOL(inet_sendpage);
819 
820 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
821 		 int flags)
822 {
823 	struct sock *sk = sock->sk;
824 	int addr_len = 0;
825 	int err;
826 
827 	if (likely(!(flags & MSG_ERRQUEUE)))
828 		sock_rps_record_flow(sk);
829 
830 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
831 				   flags & ~MSG_DONTWAIT, &addr_len);
832 	if (err >= 0)
833 		msg->msg_namelen = addr_len;
834 	return err;
835 }
836 EXPORT_SYMBOL(inet_recvmsg);
837 
838 int inet_shutdown(struct socket *sock, int how)
839 {
840 	struct sock *sk = sock->sk;
841 	int err = 0;
842 
843 	/* This should really check to make sure
844 	 * the socket is a TCP socket. (WHY AC...)
845 	 */
846 	how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
847 		       1->2 bit 2 snds.
848 		       2->3 */
849 	if ((how & ~SHUTDOWN_MASK) || !how)	/* MAXINT->0 */
850 		return -EINVAL;
851 
852 	lock_sock(sk);
853 	if (sock->state == SS_CONNECTING) {
854 		if ((1 << sk->sk_state) &
855 		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
856 			sock->state = SS_DISCONNECTING;
857 		else
858 			sock->state = SS_CONNECTED;
859 	}
860 
861 	switch (sk->sk_state) {
862 	case TCP_CLOSE:
863 		err = -ENOTCONN;
864 		/* Hack to wake up other listeners, who can poll for
865 		   EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
866 		/* fall through */
867 	default:
868 		sk->sk_shutdown |= how;
869 		if (sk->sk_prot->shutdown)
870 			sk->sk_prot->shutdown(sk, how);
871 		break;
872 
873 	/* Remaining two branches are temporary solution for missing
874 	 * close() in multithreaded environment. It is _not_ a good idea,
875 	 * but we have no choice until close() is repaired at VFS level.
876 	 */
877 	case TCP_LISTEN:
878 		if (!(how & RCV_SHUTDOWN))
879 			break;
880 		/* fall through */
881 	case TCP_SYN_SENT:
882 		err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
883 		sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
884 		break;
885 	}
886 
887 	/* Wake up anyone sleeping in poll. */
888 	sk->sk_state_change(sk);
889 	release_sock(sk);
890 	return err;
891 }
892 EXPORT_SYMBOL(inet_shutdown);
893 
894 /*
895  *	ioctl() calls you can issue on an INET socket. Most of these are
896  *	device configuration and stuff and very rarely used. Some ioctls
897  *	pass on to the socket itself.
898  *
899  *	NOTE: I like the idea of a module for the config stuff. ie ifconfig
900  *	loads the devconfigure module does its configuring and unloads it.
901  *	There's a good 20K of config code hanging around the kernel.
902  */
903 
904 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
905 {
906 	struct sock *sk = sock->sk;
907 	int err = 0;
908 	struct net *net = sock_net(sk);
909 	void __user *p = (void __user *)arg;
910 	struct ifreq ifr;
911 	struct rtentry rt;
912 
913 	switch (cmd) {
914 	case SIOCGSTAMP:
915 		err = sock_get_timestamp(sk, (struct timeval __user *)arg);
916 		break;
917 	case SIOCGSTAMPNS:
918 		err = sock_get_timestampns(sk, (struct timespec __user *)arg);
919 		break;
920 	case SIOCADDRT:
921 	case SIOCDELRT:
922 		if (copy_from_user(&rt, p, sizeof(struct rtentry)))
923 			return -EFAULT;
924 		err = ip_rt_ioctl(net, cmd, &rt);
925 		break;
926 	case SIOCRTMSG:
927 		err = -EINVAL;
928 		break;
929 	case SIOCDARP:
930 	case SIOCGARP:
931 	case SIOCSARP:
932 		err = arp_ioctl(net, cmd, (void __user *)arg);
933 		break;
934 	case SIOCGIFADDR:
935 	case SIOCGIFBRDADDR:
936 	case SIOCGIFNETMASK:
937 	case SIOCGIFDSTADDR:
938 	case SIOCGIFPFLAGS:
939 		if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
940 			return -EFAULT;
941 		err = devinet_ioctl(net, cmd, &ifr);
942 		if (!err && copy_to_user(p, &ifr, sizeof(struct ifreq)))
943 			err = -EFAULT;
944 		break;
945 
946 	case SIOCSIFADDR:
947 	case SIOCSIFBRDADDR:
948 	case SIOCSIFNETMASK:
949 	case SIOCSIFDSTADDR:
950 	case SIOCSIFPFLAGS:
951 	case SIOCSIFFLAGS:
952 		if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
953 			return -EFAULT;
954 		err = devinet_ioctl(net, cmd, &ifr);
955 		break;
956 	default:
957 		if (sk->sk_prot->ioctl)
958 			err = sk->sk_prot->ioctl(sk, cmd, arg);
959 		else
960 			err = -ENOIOCTLCMD;
961 		break;
962 	}
963 	return err;
964 }
965 EXPORT_SYMBOL(inet_ioctl);
966 
967 #ifdef CONFIG_COMPAT
968 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
969 {
970 	struct sock *sk = sock->sk;
971 	int err = -ENOIOCTLCMD;
972 
973 	if (sk->sk_prot->compat_ioctl)
974 		err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
975 
976 	return err;
977 }
978 #endif
979 
980 const struct proto_ops inet_stream_ops = {
981 	.family		   = PF_INET,
982 	.owner		   = THIS_MODULE,
983 	.release	   = inet_release,
984 	.bind		   = inet_bind,
985 	.connect	   = inet_stream_connect,
986 	.socketpair	   = sock_no_socketpair,
987 	.accept		   = inet_accept,
988 	.getname	   = inet_getname,
989 	.poll		   = tcp_poll,
990 	.ioctl		   = inet_ioctl,
991 	.listen		   = inet_listen,
992 	.shutdown	   = inet_shutdown,
993 	.setsockopt	   = sock_common_setsockopt,
994 	.getsockopt	   = sock_common_getsockopt,
995 	.sendmsg	   = inet_sendmsg,
996 	.recvmsg	   = inet_recvmsg,
997 	.mmap		   = sock_no_mmap,
998 	.sendpage	   = inet_sendpage,
999 	.splice_read	   = tcp_splice_read,
1000 	.read_sock	   = tcp_read_sock,
1001 	.sendmsg_locked    = tcp_sendmsg_locked,
1002 	.sendpage_locked   = tcp_sendpage_locked,
1003 	.peek_len	   = tcp_peek_len,
1004 #ifdef CONFIG_COMPAT
1005 	.compat_setsockopt = compat_sock_common_setsockopt,
1006 	.compat_getsockopt = compat_sock_common_getsockopt,
1007 	.compat_ioctl	   = inet_compat_ioctl,
1008 #endif
1009 };
1010 EXPORT_SYMBOL(inet_stream_ops);
1011 
1012 const struct proto_ops inet_dgram_ops = {
1013 	.family		   = PF_INET,
1014 	.owner		   = THIS_MODULE,
1015 	.release	   = inet_release,
1016 	.bind		   = inet_bind,
1017 	.connect	   = inet_dgram_connect,
1018 	.socketpair	   = sock_no_socketpair,
1019 	.accept		   = sock_no_accept,
1020 	.getname	   = inet_getname,
1021 	.poll		   = udp_poll,
1022 	.ioctl		   = inet_ioctl,
1023 	.listen		   = sock_no_listen,
1024 	.shutdown	   = inet_shutdown,
1025 	.setsockopt	   = sock_common_setsockopt,
1026 	.getsockopt	   = sock_common_getsockopt,
1027 	.sendmsg	   = inet_sendmsg,
1028 	.recvmsg	   = inet_recvmsg,
1029 	.mmap		   = sock_no_mmap,
1030 	.sendpage	   = inet_sendpage,
1031 	.set_peek_off	   = sk_set_peek_off,
1032 #ifdef CONFIG_COMPAT
1033 	.compat_setsockopt = compat_sock_common_setsockopt,
1034 	.compat_getsockopt = compat_sock_common_getsockopt,
1035 	.compat_ioctl	   = inet_compat_ioctl,
1036 #endif
1037 };
1038 EXPORT_SYMBOL(inet_dgram_ops);
1039 
1040 /*
1041  * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1042  * udp_poll
1043  */
1044 static const struct proto_ops inet_sockraw_ops = {
1045 	.family		   = PF_INET,
1046 	.owner		   = THIS_MODULE,
1047 	.release	   = inet_release,
1048 	.bind		   = inet_bind,
1049 	.connect	   = inet_dgram_connect,
1050 	.socketpair	   = sock_no_socketpair,
1051 	.accept		   = sock_no_accept,
1052 	.getname	   = inet_getname,
1053 	.poll		   = datagram_poll,
1054 	.ioctl		   = inet_ioctl,
1055 	.listen		   = sock_no_listen,
1056 	.shutdown	   = inet_shutdown,
1057 	.setsockopt	   = sock_common_setsockopt,
1058 	.getsockopt	   = sock_common_getsockopt,
1059 	.sendmsg	   = inet_sendmsg,
1060 	.recvmsg	   = inet_recvmsg,
1061 	.mmap		   = sock_no_mmap,
1062 	.sendpage	   = inet_sendpage,
1063 #ifdef CONFIG_COMPAT
1064 	.compat_setsockopt = compat_sock_common_setsockopt,
1065 	.compat_getsockopt = compat_sock_common_getsockopt,
1066 	.compat_ioctl	   = inet_compat_ioctl,
1067 #endif
1068 };
1069 
1070 static const struct net_proto_family inet_family_ops = {
1071 	.family = PF_INET,
1072 	.create = inet_create,
1073 	.owner	= THIS_MODULE,
1074 };
1075 
1076 /* Upon startup we insert all the elements in inetsw_array[] into
1077  * the linked list inetsw.
1078  */
1079 static struct inet_protosw inetsw_array[] =
1080 {
1081 	{
1082 		.type =       SOCK_STREAM,
1083 		.protocol =   IPPROTO_TCP,
1084 		.prot =       &tcp_prot,
1085 		.ops =        &inet_stream_ops,
1086 		.flags =      INET_PROTOSW_PERMANENT |
1087 			      INET_PROTOSW_ICSK,
1088 	},
1089 
1090 	{
1091 		.type =       SOCK_DGRAM,
1092 		.protocol =   IPPROTO_UDP,
1093 		.prot =       &udp_prot,
1094 		.ops =        &inet_dgram_ops,
1095 		.flags =      INET_PROTOSW_PERMANENT,
1096        },
1097 
1098        {
1099 		.type =       SOCK_DGRAM,
1100 		.protocol =   IPPROTO_ICMP,
1101 		.prot =       &ping_prot,
1102 		.ops =        &inet_sockraw_ops,
1103 		.flags =      INET_PROTOSW_REUSE,
1104        },
1105 
1106        {
1107 	       .type =       SOCK_RAW,
1108 	       .protocol =   IPPROTO_IP,	/* wild card */
1109 	       .prot =       &raw_prot,
1110 	       .ops =        &inet_sockraw_ops,
1111 	       .flags =      INET_PROTOSW_REUSE,
1112        }
1113 };
1114 
1115 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1116 
1117 void inet_register_protosw(struct inet_protosw *p)
1118 {
1119 	struct list_head *lh;
1120 	struct inet_protosw *answer;
1121 	int protocol = p->protocol;
1122 	struct list_head *last_perm;
1123 
1124 	spin_lock_bh(&inetsw_lock);
1125 
1126 	if (p->type >= SOCK_MAX)
1127 		goto out_illegal;
1128 
1129 	/* If we are trying to override a permanent protocol, bail. */
1130 	last_perm = &inetsw[p->type];
1131 	list_for_each(lh, &inetsw[p->type]) {
1132 		answer = list_entry(lh, struct inet_protosw, list);
1133 		/* Check only the non-wild match. */
1134 		if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1135 			break;
1136 		if (protocol == answer->protocol)
1137 			goto out_permanent;
1138 		last_perm = lh;
1139 	}
1140 
1141 	/* Add the new entry after the last permanent entry if any, so that
1142 	 * the new entry does not override a permanent entry when matched with
1143 	 * a wild-card protocol. But it is allowed to override any existing
1144 	 * non-permanent entry.  This means that when we remove this entry, the
1145 	 * system automatically returns to the old behavior.
1146 	 */
1147 	list_add_rcu(&p->list, last_perm);
1148 out:
1149 	spin_unlock_bh(&inetsw_lock);
1150 
1151 	return;
1152 
1153 out_permanent:
1154 	pr_err("Attempt to override permanent protocol %d\n", protocol);
1155 	goto out;
1156 
1157 out_illegal:
1158 	pr_err("Ignoring attempt to register invalid socket type %d\n",
1159 	       p->type);
1160 	goto out;
1161 }
1162 EXPORT_SYMBOL(inet_register_protosw);
1163 
1164 void inet_unregister_protosw(struct inet_protosw *p)
1165 {
1166 	if (INET_PROTOSW_PERMANENT & p->flags) {
1167 		pr_err("Attempt to unregister permanent protocol %d\n",
1168 		       p->protocol);
1169 	} else {
1170 		spin_lock_bh(&inetsw_lock);
1171 		list_del_rcu(&p->list);
1172 		spin_unlock_bh(&inetsw_lock);
1173 
1174 		synchronize_net();
1175 	}
1176 }
1177 EXPORT_SYMBOL(inet_unregister_protosw);
1178 
1179 static int inet_sk_reselect_saddr(struct sock *sk)
1180 {
1181 	struct inet_sock *inet = inet_sk(sk);
1182 	__be32 old_saddr = inet->inet_saddr;
1183 	__be32 daddr = inet->inet_daddr;
1184 	struct flowi4 *fl4;
1185 	struct rtable *rt;
1186 	__be32 new_saddr;
1187 	struct ip_options_rcu *inet_opt;
1188 
1189 	inet_opt = rcu_dereference_protected(inet->inet_opt,
1190 					     lockdep_sock_is_held(sk));
1191 	if (inet_opt && inet_opt->opt.srr)
1192 		daddr = inet_opt->opt.faddr;
1193 
1194 	/* Query new route. */
1195 	fl4 = &inet->cork.fl.u.ip4;
1196 	rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
1197 			      sk->sk_bound_dev_if, sk->sk_protocol,
1198 			      inet->inet_sport, inet->inet_dport, sk);
1199 	if (IS_ERR(rt))
1200 		return PTR_ERR(rt);
1201 
1202 	sk_setup_caps(sk, &rt->dst);
1203 
1204 	new_saddr = fl4->saddr;
1205 
1206 	if (new_saddr == old_saddr)
1207 		return 0;
1208 
1209 	if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) {
1210 		pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1211 			__func__, &old_saddr, &new_saddr);
1212 	}
1213 
1214 	inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
1215 
1216 	/*
1217 	 * XXX The only one ugly spot where we need to
1218 	 * XXX really change the sockets identity after
1219 	 * XXX it has entered the hashes. -DaveM
1220 	 *
1221 	 * Besides that, it does not check for connection
1222 	 * uniqueness. Wait for troubles.
1223 	 */
1224 	return __sk_prot_rehash(sk);
1225 }
1226 
1227 int inet_sk_rebuild_header(struct sock *sk)
1228 {
1229 	struct inet_sock *inet = inet_sk(sk);
1230 	struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1231 	__be32 daddr;
1232 	struct ip_options_rcu *inet_opt;
1233 	struct flowi4 *fl4;
1234 	int err;
1235 
1236 	/* Route is OK, nothing to do. */
1237 	if (rt)
1238 		return 0;
1239 
1240 	/* Reroute. */
1241 	rcu_read_lock();
1242 	inet_opt = rcu_dereference(inet->inet_opt);
1243 	daddr = inet->inet_daddr;
1244 	if (inet_opt && inet_opt->opt.srr)
1245 		daddr = inet_opt->opt.faddr;
1246 	rcu_read_unlock();
1247 	fl4 = &inet->cork.fl.u.ip4;
1248 	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1249 				   inet->inet_dport, inet->inet_sport,
1250 				   sk->sk_protocol, RT_CONN_FLAGS(sk),
1251 				   sk->sk_bound_dev_if);
1252 	if (!IS_ERR(rt)) {
1253 		err = 0;
1254 		sk_setup_caps(sk, &rt->dst);
1255 	} else {
1256 		err = PTR_ERR(rt);
1257 
1258 		/* Routing failed... */
1259 		sk->sk_route_caps = 0;
1260 		/*
1261 		 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1262 		 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1263 		 */
1264 		if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr ||
1265 		    sk->sk_state != TCP_SYN_SENT ||
1266 		    (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1267 		    (err = inet_sk_reselect_saddr(sk)) != 0)
1268 			sk->sk_err_soft = -err;
1269 	}
1270 
1271 	return err;
1272 }
1273 EXPORT_SYMBOL(inet_sk_rebuild_header);
1274 
1275 void inet_sk_set_state(struct sock *sk, int state)
1276 {
1277 	trace_inet_sock_set_state(sk, sk->sk_state, state);
1278 	sk->sk_state = state;
1279 }
1280 EXPORT_SYMBOL(inet_sk_set_state);
1281 
1282 void inet_sk_state_store(struct sock *sk, int newstate)
1283 {
1284 	trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1285 	smp_store_release(&sk->sk_state, newstate);
1286 }
1287 
1288 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1289 				 netdev_features_t features)
1290 {
1291 	bool udpfrag = false, fixedid = false, gso_partial, encap;
1292 	struct sk_buff *segs = ERR_PTR(-EINVAL);
1293 	const struct net_offload *ops;
1294 	unsigned int offset = 0;
1295 	struct iphdr *iph;
1296 	int proto, tot_len;
1297 	int nhoff;
1298 	int ihl;
1299 	int id;
1300 
1301 	skb_reset_network_header(skb);
1302 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
1303 	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1304 		goto out;
1305 
1306 	iph = ip_hdr(skb);
1307 	ihl = iph->ihl * 4;
1308 	if (ihl < sizeof(*iph))
1309 		goto out;
1310 
1311 	id = ntohs(iph->id);
1312 	proto = iph->protocol;
1313 
1314 	/* Warning: after this point, iph might be no longer valid */
1315 	if (unlikely(!pskb_may_pull(skb, ihl)))
1316 		goto out;
1317 	__skb_pull(skb, ihl);
1318 
1319 	encap = SKB_GSO_CB(skb)->encap_level > 0;
1320 	if (encap)
1321 		features &= skb->dev->hw_enc_features;
1322 	SKB_GSO_CB(skb)->encap_level += ihl;
1323 
1324 	skb_reset_transport_header(skb);
1325 
1326 	segs = ERR_PTR(-EPROTONOSUPPORT);
1327 
1328 	if (!skb->encapsulation || encap) {
1329 		udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1330 		fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1331 
1332 		/* fixed ID is invalid if DF bit is not set */
1333 		if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1334 			goto out;
1335 	}
1336 
1337 	ops = rcu_dereference(inet_offloads[proto]);
1338 	if (likely(ops && ops->callbacks.gso_segment))
1339 		segs = ops->callbacks.gso_segment(skb, features);
1340 
1341 	if (IS_ERR_OR_NULL(segs))
1342 		goto out;
1343 
1344 	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1345 
1346 	skb = segs;
1347 	do {
1348 		iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1349 		if (udpfrag) {
1350 			iph->frag_off = htons(offset >> 3);
1351 			if (skb->next)
1352 				iph->frag_off |= htons(IP_MF);
1353 			offset += skb->len - nhoff - ihl;
1354 			tot_len = skb->len - nhoff;
1355 		} else if (skb_is_gso(skb)) {
1356 			if (!fixedid) {
1357 				iph->id = htons(id);
1358 				id += skb_shinfo(skb)->gso_segs;
1359 			}
1360 
1361 			if (gso_partial)
1362 				tot_len = skb_shinfo(skb)->gso_size +
1363 					  SKB_GSO_CB(skb)->data_offset +
1364 					  skb->head - (unsigned char *)iph;
1365 			else
1366 				tot_len = skb->len - nhoff;
1367 		} else {
1368 			if (!fixedid)
1369 				iph->id = htons(id++);
1370 			tot_len = skb->len - nhoff;
1371 		}
1372 		iph->tot_len = htons(tot_len);
1373 		ip_send_check(iph);
1374 		if (encap)
1375 			skb_reset_inner_headers(skb);
1376 		skb->network_header = (u8 *)iph - skb->head;
1377 	} while ((skb = skb->next));
1378 
1379 out:
1380 	return segs;
1381 }
1382 EXPORT_SYMBOL(inet_gso_segment);
1383 
1384 struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
1385 {
1386 	const struct net_offload *ops;
1387 	struct sk_buff **pp = NULL;
1388 	struct sk_buff *p;
1389 	const struct iphdr *iph;
1390 	unsigned int hlen;
1391 	unsigned int off;
1392 	unsigned int id;
1393 	int flush = 1;
1394 	int proto;
1395 
1396 	off = skb_gro_offset(skb);
1397 	hlen = off + sizeof(*iph);
1398 	iph = skb_gro_header_fast(skb, off);
1399 	if (skb_gro_header_hard(skb, hlen)) {
1400 		iph = skb_gro_header_slow(skb, hlen, off);
1401 		if (unlikely(!iph))
1402 			goto out;
1403 	}
1404 
1405 	proto = iph->protocol;
1406 
1407 	rcu_read_lock();
1408 	ops = rcu_dereference(inet_offloads[proto]);
1409 	if (!ops || !ops->callbacks.gro_receive)
1410 		goto out_unlock;
1411 
1412 	if (*(u8 *)iph != 0x45)
1413 		goto out_unlock;
1414 
1415 	if (ip_is_fragment(iph))
1416 		goto out_unlock;
1417 
1418 	if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1419 		goto out_unlock;
1420 
1421 	id = ntohl(*(__be32 *)&iph->id);
1422 	flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
1423 	id >>= 16;
1424 
1425 	for (p = *head; p; p = p->next) {
1426 		struct iphdr *iph2;
1427 		u16 flush_id;
1428 
1429 		if (!NAPI_GRO_CB(p)->same_flow)
1430 			continue;
1431 
1432 		iph2 = (struct iphdr *)(p->data + off);
1433 		/* The above works because, with the exception of the top
1434 		 * (inner most) layer, we only aggregate pkts with the same
1435 		 * hdr length so all the hdrs we'll need to verify will start
1436 		 * at the same offset.
1437 		 */
1438 		if ((iph->protocol ^ iph2->protocol) |
1439 		    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1440 		    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1441 			NAPI_GRO_CB(p)->same_flow = 0;
1442 			continue;
1443 		}
1444 
1445 		/* All fields must match except length and checksum. */
1446 		NAPI_GRO_CB(p)->flush |=
1447 			(iph->ttl ^ iph2->ttl) |
1448 			(iph->tos ^ iph2->tos) |
1449 			((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
1450 
1451 		NAPI_GRO_CB(p)->flush |= flush;
1452 
1453 		/* We need to store of the IP ID check to be included later
1454 		 * when we can verify that this packet does in fact belong
1455 		 * to a given flow.
1456 		 */
1457 		flush_id = (u16)(id - ntohs(iph2->id));
1458 
1459 		/* This bit of code makes it much easier for us to identify
1460 		 * the cases where we are doing atomic vs non-atomic IP ID
1461 		 * checks.  Specifically an atomic check can return IP ID
1462 		 * values 0 - 0xFFFF, while a non-atomic check can only
1463 		 * return 0 or 0xFFFF.
1464 		 */
1465 		if (!NAPI_GRO_CB(p)->is_atomic ||
1466 		    !(iph->frag_off & htons(IP_DF))) {
1467 			flush_id ^= NAPI_GRO_CB(p)->count;
1468 			flush_id = flush_id ? 0xFFFF : 0;
1469 		}
1470 
1471 		/* If the previous IP ID value was based on an atomic
1472 		 * datagram we can overwrite the value and ignore it.
1473 		 */
1474 		if (NAPI_GRO_CB(skb)->is_atomic)
1475 			NAPI_GRO_CB(p)->flush_id = flush_id;
1476 		else
1477 			NAPI_GRO_CB(p)->flush_id |= flush_id;
1478 	}
1479 
1480 	NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
1481 	NAPI_GRO_CB(skb)->flush |= flush;
1482 	skb_set_network_header(skb, off);
1483 	/* The above will be needed by the transport layer if there is one
1484 	 * immediately following this IP hdr.
1485 	 */
1486 
1487 	/* Note : No need to call skb_gro_postpull_rcsum() here,
1488 	 * as we already checked checksum over ipv4 header was 0
1489 	 */
1490 	skb_gro_pull(skb, sizeof(*iph));
1491 	skb_set_transport_header(skb, skb_gro_offset(skb));
1492 
1493 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1494 
1495 out_unlock:
1496 	rcu_read_unlock();
1497 
1498 out:
1499 	skb_gro_flush_final(skb, pp, flush);
1500 
1501 	return pp;
1502 }
1503 EXPORT_SYMBOL(inet_gro_receive);
1504 
1505 static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
1506 					 struct sk_buff *skb)
1507 {
1508 	if (NAPI_GRO_CB(skb)->encap_mark) {
1509 		NAPI_GRO_CB(skb)->flush = 1;
1510 		return NULL;
1511 	}
1512 
1513 	NAPI_GRO_CB(skb)->encap_mark = 1;
1514 
1515 	return inet_gro_receive(head, skb);
1516 }
1517 
1518 #define SECONDS_PER_DAY	86400
1519 
1520 /* inet_current_timestamp - Return IP network timestamp
1521  *
1522  * Return milliseconds since midnight in network byte order.
1523  */
1524 __be32 inet_current_timestamp(void)
1525 {
1526 	u32 secs;
1527 	u32 msecs;
1528 	struct timespec64 ts;
1529 
1530 	ktime_get_real_ts64(&ts);
1531 
1532 	/* Get secs since midnight. */
1533 	(void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1534 	/* Convert to msecs. */
1535 	msecs = secs * MSEC_PER_SEC;
1536 	/* Convert nsec to msec. */
1537 	msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1538 
1539 	/* Convert to network byte order. */
1540 	return htonl(msecs);
1541 }
1542 EXPORT_SYMBOL(inet_current_timestamp);
1543 
1544 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1545 {
1546 	if (sk->sk_family == AF_INET)
1547 		return ip_recv_error(sk, msg, len, addr_len);
1548 #if IS_ENABLED(CONFIG_IPV6)
1549 	if (sk->sk_family == AF_INET6)
1550 		return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1551 #endif
1552 	return -EINVAL;
1553 }
1554 
1555 int inet_gro_complete(struct sk_buff *skb, int nhoff)
1556 {
1557 	__be16 newlen = htons(skb->len - nhoff);
1558 	struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1559 	const struct net_offload *ops;
1560 	int proto = iph->protocol;
1561 	int err = -ENOSYS;
1562 
1563 	if (skb->encapsulation) {
1564 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1565 		skb_set_inner_network_header(skb, nhoff);
1566 	}
1567 
1568 	csum_replace2(&iph->check, iph->tot_len, newlen);
1569 	iph->tot_len = newlen;
1570 
1571 	rcu_read_lock();
1572 	ops = rcu_dereference(inet_offloads[proto]);
1573 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1574 		goto out_unlock;
1575 
1576 	/* Only need to add sizeof(*iph) to get to the next hdr below
1577 	 * because any hdr with option will have been flushed in
1578 	 * inet_gro_receive().
1579 	 */
1580 	err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));
1581 
1582 out_unlock:
1583 	rcu_read_unlock();
1584 
1585 	return err;
1586 }
1587 EXPORT_SYMBOL(inet_gro_complete);
1588 
1589 static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1590 {
1591 	skb->encapsulation = 1;
1592 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1593 	return inet_gro_complete(skb, nhoff);
1594 }
1595 
1596 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1597 			 unsigned short type, unsigned char protocol,
1598 			 struct net *net)
1599 {
1600 	struct socket *sock;
1601 	int rc = sock_create_kern(net, family, type, protocol, &sock);
1602 
1603 	if (rc == 0) {
1604 		*sk = sock->sk;
1605 		(*sk)->sk_allocation = GFP_ATOMIC;
1606 		/*
1607 		 * Unhash it so that IP input processing does not even see it,
1608 		 * we do not wish this socket to see incoming packets.
1609 		 */
1610 		(*sk)->sk_prot->unhash(*sk);
1611 	}
1612 	return rc;
1613 }
1614 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1615 
1616 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
1617 {
1618 	return  *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
1619 }
1620 EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
1621 
1622 unsigned long snmp_fold_field(void __percpu *mib, int offt)
1623 {
1624 	unsigned long res = 0;
1625 	int i;
1626 
1627 	for_each_possible_cpu(i)
1628 		res += snmp_get_cpu_field(mib, i, offt);
1629 	return res;
1630 }
1631 EXPORT_SYMBOL_GPL(snmp_fold_field);
1632 
1633 #if BITS_PER_LONG==32
1634 
1635 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1636 			 size_t syncp_offset)
1637 {
1638 	void *bhptr;
1639 	struct u64_stats_sync *syncp;
1640 	u64 v;
1641 	unsigned int start;
1642 
1643 	bhptr = per_cpu_ptr(mib, cpu);
1644 	syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1645 	do {
1646 		start = u64_stats_fetch_begin_irq(syncp);
1647 		v = *(((u64 *)bhptr) + offt);
1648 	} while (u64_stats_fetch_retry_irq(syncp, start));
1649 
1650 	return v;
1651 }
1652 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1653 
1654 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1655 {
1656 	u64 res = 0;
1657 	int cpu;
1658 
1659 	for_each_possible_cpu(cpu) {
1660 		res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1661 	}
1662 	return res;
1663 }
1664 EXPORT_SYMBOL_GPL(snmp_fold_field64);
1665 #endif
1666 
1667 #ifdef CONFIG_IP_MULTICAST
1668 static const struct net_protocol igmp_protocol = {
1669 	.handler =	igmp_rcv,
1670 	.netns_ok =	1,
1671 };
1672 #endif
1673 
1674 /* thinking of making this const? Don't.
1675  * early_demux can change based on sysctl.
1676  */
1677 static struct net_protocol tcp_protocol = {
1678 	.early_demux	=	tcp_v4_early_demux,
1679 	.early_demux_handler =  tcp_v4_early_demux,
1680 	.handler	=	tcp_v4_rcv,
1681 	.err_handler	=	tcp_v4_err,
1682 	.no_policy	=	1,
1683 	.netns_ok	=	1,
1684 	.icmp_strict_tag_validation = 1,
1685 };
1686 
1687 /* thinking of making this const? Don't.
1688  * early_demux can change based on sysctl.
1689  */
1690 static struct net_protocol udp_protocol = {
1691 	.early_demux =	udp_v4_early_demux,
1692 	.early_demux_handler =	udp_v4_early_demux,
1693 	.handler =	udp_rcv,
1694 	.err_handler =	udp_err,
1695 	.no_policy =	1,
1696 	.netns_ok =	1,
1697 };
1698 
1699 static const struct net_protocol icmp_protocol = {
1700 	.handler =	icmp_rcv,
1701 	.err_handler =	icmp_err,
1702 	.no_policy =	1,
1703 	.netns_ok =	1,
1704 };
1705 
1706 static __net_init int ipv4_mib_init_net(struct net *net)
1707 {
1708 	int i;
1709 
1710 	net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1711 	if (!net->mib.tcp_statistics)
1712 		goto err_tcp_mib;
1713 	net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1714 	if (!net->mib.ip_statistics)
1715 		goto err_ip_mib;
1716 
1717 	for_each_possible_cpu(i) {
1718 		struct ipstats_mib *af_inet_stats;
1719 		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1720 		u64_stats_init(&af_inet_stats->syncp);
1721 	}
1722 
1723 	net->mib.net_statistics = alloc_percpu(struct linux_mib);
1724 	if (!net->mib.net_statistics)
1725 		goto err_net_mib;
1726 	net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1727 	if (!net->mib.udp_statistics)
1728 		goto err_udp_mib;
1729 	net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1730 	if (!net->mib.udplite_statistics)
1731 		goto err_udplite_mib;
1732 	net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1733 	if (!net->mib.icmp_statistics)
1734 		goto err_icmp_mib;
1735 	net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1736 					      GFP_KERNEL);
1737 	if (!net->mib.icmpmsg_statistics)
1738 		goto err_icmpmsg_mib;
1739 
1740 	tcp_mib_init(net);
1741 	return 0;
1742 
1743 err_icmpmsg_mib:
1744 	free_percpu(net->mib.icmp_statistics);
1745 err_icmp_mib:
1746 	free_percpu(net->mib.udplite_statistics);
1747 err_udplite_mib:
1748 	free_percpu(net->mib.udp_statistics);
1749 err_udp_mib:
1750 	free_percpu(net->mib.net_statistics);
1751 err_net_mib:
1752 	free_percpu(net->mib.ip_statistics);
1753 err_ip_mib:
1754 	free_percpu(net->mib.tcp_statistics);
1755 err_tcp_mib:
1756 	return -ENOMEM;
1757 }
1758 
1759 static __net_exit void ipv4_mib_exit_net(struct net *net)
1760 {
1761 	kfree(net->mib.icmpmsg_statistics);
1762 	free_percpu(net->mib.icmp_statistics);
1763 	free_percpu(net->mib.udplite_statistics);
1764 	free_percpu(net->mib.udp_statistics);
1765 	free_percpu(net->mib.net_statistics);
1766 	free_percpu(net->mib.ip_statistics);
1767 	free_percpu(net->mib.tcp_statistics);
1768 }
1769 
1770 static __net_initdata struct pernet_operations ipv4_mib_ops = {
1771 	.init = ipv4_mib_init_net,
1772 	.exit = ipv4_mib_exit_net,
1773 };
1774 
1775 static int __init init_ipv4_mibs(void)
1776 {
1777 	return register_pernet_subsys(&ipv4_mib_ops);
1778 }
1779 
1780 static __net_init int inet_init_net(struct net *net)
1781 {
1782 	/*
1783 	 * Set defaults for local port range
1784 	 */
1785 	seqlock_init(&net->ipv4.ip_local_ports.lock);
1786 	net->ipv4.ip_local_ports.range[0] =  32768;
1787 	net->ipv4.ip_local_ports.range[1] =  60999;
1788 
1789 	seqlock_init(&net->ipv4.ping_group_range.lock);
1790 	/*
1791 	 * Sane defaults - nobody may create ping sockets.
1792 	 * Boot scripts should set this to distro-specific group.
1793 	 */
1794 	net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1795 	net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1796 
1797 	/* Default values for sysctl-controlled parameters.
1798 	 * We set them here, in case sysctl is not compiled.
1799 	 */
1800 	net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1801 	net->ipv4.sysctl_ip_dynaddr = 0;
1802 	net->ipv4.sysctl_ip_early_demux = 1;
1803 	net->ipv4.sysctl_udp_early_demux = 1;
1804 	net->ipv4.sysctl_tcp_early_demux = 1;
1805 #ifdef CONFIG_SYSCTL
1806 	net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1807 #endif
1808 
1809 	/* Some igmp sysctl, whose values are always used */
1810 	net->ipv4.sysctl_igmp_max_memberships = 20;
1811 	net->ipv4.sysctl_igmp_max_msf = 10;
1812 	/* IGMP reports for link-local multicast groups are enabled by default */
1813 	net->ipv4.sysctl_igmp_llm_reports = 1;
1814 	net->ipv4.sysctl_igmp_qrv = 2;
1815 
1816 	return 0;
1817 }
1818 
1819 static __net_exit void inet_exit_net(struct net *net)
1820 {
1821 }
1822 
1823 static __net_initdata struct pernet_operations af_inet_ops = {
1824 	.init = inet_init_net,
1825 	.exit = inet_exit_net,
1826 };
1827 
1828 static int __init init_inet_pernet_ops(void)
1829 {
1830 	return register_pernet_subsys(&af_inet_ops);
1831 }
1832 
1833 static int ipv4_proc_init(void);
1834 
1835 /*
1836  *	IP protocol layer initialiser
1837  */
1838 
1839 static struct packet_offload ip_packet_offload __read_mostly = {
1840 	.type = cpu_to_be16(ETH_P_IP),
1841 	.callbacks = {
1842 		.gso_segment = inet_gso_segment,
1843 		.gro_receive = inet_gro_receive,
1844 		.gro_complete = inet_gro_complete,
1845 	},
1846 };
1847 
1848 static const struct net_offload ipip_offload = {
1849 	.callbacks = {
1850 		.gso_segment	= inet_gso_segment,
1851 		.gro_receive	= ipip_gro_receive,
1852 		.gro_complete	= ipip_gro_complete,
1853 	},
1854 };
1855 
1856 static int __init ipip_offload_init(void)
1857 {
1858 	return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1859 }
1860 
1861 static int __init ipv4_offload_init(void)
1862 {
1863 	/*
1864 	 * Add offloads
1865 	 */
1866 	if (udpv4_offload_init() < 0)
1867 		pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1868 	if (tcpv4_offload_init() < 0)
1869 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1870 	if (ipip_offload_init() < 0)
1871 		pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1872 
1873 	dev_add_offload(&ip_packet_offload);
1874 	return 0;
1875 }
1876 
1877 fs_initcall(ipv4_offload_init);
1878 
1879 static struct packet_type ip_packet_type __read_mostly = {
1880 	.type = cpu_to_be16(ETH_P_IP),
1881 	.func = ip_rcv,
1882 };
1883 
1884 static int __init inet_init(void)
1885 {
1886 	struct inet_protosw *q;
1887 	struct list_head *r;
1888 	int rc = -EINVAL;
1889 
1890 	sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1891 
1892 	rc = proto_register(&tcp_prot, 1);
1893 	if (rc)
1894 		goto out;
1895 
1896 	rc = proto_register(&udp_prot, 1);
1897 	if (rc)
1898 		goto out_unregister_tcp_proto;
1899 
1900 	rc = proto_register(&raw_prot, 1);
1901 	if (rc)
1902 		goto out_unregister_udp_proto;
1903 
1904 	rc = proto_register(&ping_prot, 1);
1905 	if (rc)
1906 		goto out_unregister_raw_proto;
1907 
1908 	/*
1909 	 *	Tell SOCKET that we are alive...
1910 	 */
1911 
1912 	(void)sock_register(&inet_family_ops);
1913 
1914 #ifdef CONFIG_SYSCTL
1915 	ip_static_sysctl_init();
1916 #endif
1917 
1918 	/*
1919 	 *	Add all the base protocols.
1920 	 */
1921 
1922 	if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1923 		pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1924 	if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1925 		pr_crit("%s: Cannot add UDP protocol\n", __func__);
1926 	if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1927 		pr_crit("%s: Cannot add TCP protocol\n", __func__);
1928 #ifdef CONFIG_IP_MULTICAST
1929 	if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1930 		pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1931 #endif
1932 
1933 	/* Register the socket-side information for inet_create. */
1934 	for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1935 		INIT_LIST_HEAD(r);
1936 
1937 	for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1938 		inet_register_protosw(q);
1939 
1940 	/*
1941 	 *	Set the ARP module up
1942 	 */
1943 
1944 	arp_init();
1945 
1946 	/*
1947 	 *	Set the IP module up
1948 	 */
1949 
1950 	ip_init();
1951 
1952 	/* Setup TCP slab cache for open requests. */
1953 	tcp_init();
1954 
1955 	/* Setup UDP memory threshold */
1956 	udp_init();
1957 
1958 	/* Add UDP-Lite (RFC 3828) */
1959 	udplite4_register();
1960 
1961 	ping_init();
1962 
1963 	/*
1964 	 *	Set the ICMP layer up
1965 	 */
1966 
1967 	if (icmp_init() < 0)
1968 		panic("Failed to create the ICMP control socket.\n");
1969 
1970 	/*
1971 	 *	Initialise the multicast router
1972 	 */
1973 #if defined(CONFIG_IP_MROUTE)
1974 	if (ip_mr_init())
1975 		pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1976 #endif
1977 
1978 	if (init_inet_pernet_ops())
1979 		pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1980 	/*
1981 	 *	Initialise per-cpu ipv4 mibs
1982 	 */
1983 
1984 	if (init_ipv4_mibs())
1985 		pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
1986 
1987 	ipv4_proc_init();
1988 
1989 	ipfrag_init();
1990 
1991 	dev_add_pack(&ip_packet_type);
1992 
1993 	ip_tunnel_core_init();
1994 
1995 	rc = 0;
1996 out:
1997 	return rc;
1998 out_unregister_raw_proto:
1999 	proto_unregister(&raw_prot);
2000 out_unregister_udp_proto:
2001 	proto_unregister(&udp_prot);
2002 out_unregister_tcp_proto:
2003 	proto_unregister(&tcp_prot);
2004 	goto out;
2005 }
2006 
2007 fs_initcall(inet_init);
2008 
2009 /* ------------------------------------------------------------------------ */
2010 
2011 #ifdef CONFIG_PROC_FS
2012 static int __init ipv4_proc_init(void)
2013 {
2014 	int rc = 0;
2015 
2016 	if (raw_proc_init())
2017 		goto out_raw;
2018 	if (tcp4_proc_init())
2019 		goto out_tcp;
2020 	if (udp4_proc_init())
2021 		goto out_udp;
2022 	if (ping_proc_init())
2023 		goto out_ping;
2024 	if (ip_misc_proc_init())
2025 		goto out_misc;
2026 out:
2027 	return rc;
2028 out_misc:
2029 	ping_proc_exit();
2030 out_ping:
2031 	udp4_proc_exit();
2032 out_udp:
2033 	tcp4_proc_exit();
2034 out_tcp:
2035 	raw_proc_exit();
2036 out_raw:
2037 	rc = -ENOMEM;
2038 	goto out;
2039 }
2040 
2041 #else /* CONFIG_PROC_FS */
2042 static int __init ipv4_proc_init(void)
2043 {
2044 	return 0;
2045 }
2046 #endif /* CONFIG_PROC_FS */
2047