xref: /openbmc/linux/net/dccp/proto.c (revision 4a6a676f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  net/dccp/proto.c
4  *
5  *  An implementation of the DCCP protocol
6  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7  */
8 
9 #include <linux/dccp.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/in.h>
17 #include <linux/if_arp.h>
18 #include <linux/init.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <net/checksum.h>
22 
23 #include <net/inet_sock.h>
24 #include <net/inet_common.h>
25 #include <net/sock.h>
26 #include <net/xfrm.h>
27 
28 #include <asm/ioctls.h>
29 #include <linux/spinlock.h>
30 #include <linux/timer.h>
31 #include <linux/delay.h>
32 #include <linux/poll.h>
33 
34 #include "ccid.h"
35 #include "dccp.h"
36 #include "feat.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include "trace.h"
40 
41 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
42 
43 EXPORT_SYMBOL_GPL(dccp_statistics);
44 
45 DEFINE_PER_CPU(unsigned int, dccp_orphan_count);
46 EXPORT_PER_CPU_SYMBOL_GPL(dccp_orphan_count);
47 
48 struct inet_hashinfo dccp_hashinfo;
49 EXPORT_SYMBOL_GPL(dccp_hashinfo);
50 
51 /* the maximum queue length for tx in packets. 0 is no limit */
52 int sysctl_dccp_tx_qlen __read_mostly = 5;
53 
54 #ifdef CONFIG_IP_DCCP_DEBUG
55 static const char *dccp_state_name(const int state)
56 {
57 	static const char *const dccp_state_names[] = {
58 	[DCCP_OPEN]		= "OPEN",
59 	[DCCP_REQUESTING]	= "REQUESTING",
60 	[DCCP_PARTOPEN]		= "PARTOPEN",
61 	[DCCP_LISTEN]		= "LISTEN",
62 	[DCCP_RESPOND]		= "RESPOND",
63 	[DCCP_CLOSING]		= "CLOSING",
64 	[DCCP_ACTIVE_CLOSEREQ]	= "CLOSEREQ",
65 	[DCCP_PASSIVE_CLOSE]	= "PASSIVE_CLOSE",
66 	[DCCP_PASSIVE_CLOSEREQ]	= "PASSIVE_CLOSEREQ",
67 	[DCCP_TIME_WAIT]	= "TIME_WAIT",
68 	[DCCP_CLOSED]		= "CLOSED",
69 	};
70 
71 	if (state >= DCCP_MAX_STATES)
72 		return "INVALID STATE!";
73 	else
74 		return dccp_state_names[state];
75 }
76 #endif
77 
78 void dccp_set_state(struct sock *sk, const int state)
79 {
80 	const int oldstate = sk->sk_state;
81 
82 	dccp_pr_debug("%s(%p)  %s  -->  %s\n", dccp_role(sk), sk,
83 		      dccp_state_name(oldstate), dccp_state_name(state));
84 	WARN_ON(state == oldstate);
85 
86 	switch (state) {
87 	case DCCP_OPEN:
88 		if (oldstate != DCCP_OPEN)
89 			DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
90 		/* Client retransmits all Confirm options until entering OPEN */
91 		if (oldstate == DCCP_PARTOPEN)
92 			dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
93 		break;
94 
95 	case DCCP_CLOSED:
96 		if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
97 		    oldstate == DCCP_CLOSING)
98 			DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
99 
100 		sk->sk_prot->unhash(sk);
101 		if (inet_csk(sk)->icsk_bind_hash != NULL &&
102 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
103 			inet_put_port(sk);
104 		fallthrough;
105 	default:
106 		if (oldstate == DCCP_OPEN)
107 			DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
108 	}
109 
110 	/* Change state AFTER socket is unhashed to avoid closed
111 	 * socket sitting in hash tables.
112 	 */
113 	inet_sk_set_state(sk, state);
114 }
115 
116 EXPORT_SYMBOL_GPL(dccp_set_state);
117 
118 static void dccp_finish_passive_close(struct sock *sk)
119 {
120 	switch (sk->sk_state) {
121 	case DCCP_PASSIVE_CLOSE:
122 		/* Node (client or server) has received Close packet. */
123 		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
124 		dccp_set_state(sk, DCCP_CLOSED);
125 		break;
126 	case DCCP_PASSIVE_CLOSEREQ:
127 		/*
128 		 * Client received CloseReq. We set the `active' flag so that
129 		 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
130 		 */
131 		dccp_send_close(sk, 1);
132 		dccp_set_state(sk, DCCP_CLOSING);
133 	}
134 }
135 
136 void dccp_done(struct sock *sk)
137 {
138 	dccp_set_state(sk, DCCP_CLOSED);
139 	dccp_clear_xmit_timers(sk);
140 
141 	sk->sk_shutdown = SHUTDOWN_MASK;
142 
143 	if (!sock_flag(sk, SOCK_DEAD))
144 		sk->sk_state_change(sk);
145 	else
146 		inet_csk_destroy_sock(sk);
147 }
148 
149 EXPORT_SYMBOL_GPL(dccp_done);
150 
151 const char *dccp_packet_name(const int type)
152 {
153 	static const char *const dccp_packet_names[] = {
154 		[DCCP_PKT_REQUEST]  = "REQUEST",
155 		[DCCP_PKT_RESPONSE] = "RESPONSE",
156 		[DCCP_PKT_DATA]	    = "DATA",
157 		[DCCP_PKT_ACK]	    = "ACK",
158 		[DCCP_PKT_DATAACK]  = "DATAACK",
159 		[DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
160 		[DCCP_PKT_CLOSE]    = "CLOSE",
161 		[DCCP_PKT_RESET]    = "RESET",
162 		[DCCP_PKT_SYNC]	    = "SYNC",
163 		[DCCP_PKT_SYNCACK]  = "SYNCACK",
164 	};
165 
166 	if (type >= DCCP_NR_PKT_TYPES)
167 		return "INVALID";
168 	else
169 		return dccp_packet_names[type];
170 }
171 
172 EXPORT_SYMBOL_GPL(dccp_packet_name);
173 
174 void dccp_destruct_common(struct sock *sk)
175 {
176 	struct dccp_sock *dp = dccp_sk(sk);
177 
178 	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
179 	dp->dccps_hc_tx_ccid = NULL;
180 }
181 EXPORT_SYMBOL_GPL(dccp_destruct_common);
182 
183 static void dccp_sk_destruct(struct sock *sk)
184 {
185 	dccp_destruct_common(sk);
186 	inet_sock_destruct(sk);
187 }
188 
189 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
190 {
191 	struct dccp_sock *dp = dccp_sk(sk);
192 	struct inet_connection_sock *icsk = inet_csk(sk);
193 
194 	icsk->icsk_rto		= DCCP_TIMEOUT_INIT;
195 	icsk->icsk_syn_retries	= sysctl_dccp_request_retries;
196 	sk->sk_state		= DCCP_CLOSED;
197 	sk->sk_write_space	= dccp_write_space;
198 	sk->sk_destruct		= dccp_sk_destruct;
199 	icsk->icsk_sync_mss	= dccp_sync_mss;
200 	dp->dccps_mss_cache	= 536;
201 	dp->dccps_rate_last	= jiffies;
202 	dp->dccps_role		= DCCP_ROLE_UNDEFINED;
203 	dp->dccps_service	= DCCP_SERVICE_CODE_IS_ABSENT;
204 	dp->dccps_tx_qlen	= sysctl_dccp_tx_qlen;
205 
206 	dccp_init_xmit_timers(sk);
207 
208 	INIT_LIST_HEAD(&dp->dccps_featneg);
209 	/* control socket doesn't need feat nego */
210 	if (likely(ctl_sock_initialized))
211 		return dccp_feat_init(sk);
212 	return 0;
213 }
214 
215 EXPORT_SYMBOL_GPL(dccp_init_sock);
216 
217 void dccp_destroy_sock(struct sock *sk)
218 {
219 	struct dccp_sock *dp = dccp_sk(sk);
220 
221 	__skb_queue_purge(&sk->sk_write_queue);
222 	if (sk->sk_send_head != NULL) {
223 		kfree_skb(sk->sk_send_head);
224 		sk->sk_send_head = NULL;
225 	}
226 
227 	/* Clean up a referenced DCCP bind bucket. */
228 	if (inet_csk(sk)->icsk_bind_hash != NULL)
229 		inet_put_port(sk);
230 
231 	kfree(dp->dccps_service_list);
232 	dp->dccps_service_list = NULL;
233 
234 	if (dp->dccps_hc_rx_ackvec != NULL) {
235 		dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
236 		dp->dccps_hc_rx_ackvec = NULL;
237 	}
238 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
239 	dp->dccps_hc_rx_ccid = NULL;
240 
241 	/* clean up feature negotiation state */
242 	dccp_feat_list_purge(&dp->dccps_featneg);
243 }
244 
245 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
246 
247 static inline int dccp_need_reset(int state)
248 {
249 	return state != DCCP_CLOSED && state != DCCP_LISTEN &&
250 	       state != DCCP_REQUESTING;
251 }
252 
253 int dccp_disconnect(struct sock *sk, int flags)
254 {
255 	struct inet_connection_sock *icsk = inet_csk(sk);
256 	struct inet_sock *inet = inet_sk(sk);
257 	struct dccp_sock *dp = dccp_sk(sk);
258 	const int old_state = sk->sk_state;
259 
260 	if (old_state != DCCP_CLOSED)
261 		dccp_set_state(sk, DCCP_CLOSED);
262 
263 	/*
264 	 * This corresponds to the ABORT function of RFC793, sec. 3.8
265 	 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
266 	 */
267 	if (old_state == DCCP_LISTEN) {
268 		inet_csk_listen_stop(sk);
269 	} else if (dccp_need_reset(old_state)) {
270 		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
271 		sk->sk_err = ECONNRESET;
272 	} else if (old_state == DCCP_REQUESTING)
273 		sk->sk_err = ECONNRESET;
274 
275 	dccp_clear_xmit_timers(sk);
276 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
277 	dp->dccps_hc_rx_ccid = NULL;
278 
279 	__skb_queue_purge(&sk->sk_receive_queue);
280 	__skb_queue_purge(&sk->sk_write_queue);
281 	if (sk->sk_send_head != NULL) {
282 		__kfree_skb(sk->sk_send_head);
283 		sk->sk_send_head = NULL;
284 	}
285 
286 	inet->inet_dport = 0;
287 
288 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
289 		inet_reset_saddr(sk);
290 
291 	sk->sk_shutdown = 0;
292 	sock_reset_flag(sk, SOCK_DONE);
293 
294 	icsk->icsk_backoff = 0;
295 	inet_csk_delack_init(sk);
296 	__sk_dst_reset(sk);
297 
298 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
299 
300 	sk_error_report(sk);
301 	return 0;
302 }
303 
304 EXPORT_SYMBOL_GPL(dccp_disconnect);
305 
306 /*
307  *	Wait for a DCCP event.
308  *
309  *	Note that we don't need to lock the socket, as the upper poll layers
310  *	take care of normal races (between the test and the event) and we don't
311  *	go look at any of the socket buffers directly.
312  */
313 __poll_t dccp_poll(struct file *file, struct socket *sock,
314 		       poll_table *wait)
315 {
316 	__poll_t mask;
317 	struct sock *sk = sock->sk;
318 
319 	sock_poll_wait(file, sock, wait);
320 	if (sk->sk_state == DCCP_LISTEN)
321 		return inet_csk_listen_poll(sk);
322 
323 	/* Socket is not locked. We are protected from async events
324 	   by poll logic and correct handling of state changes
325 	   made by another threads is impossible in any case.
326 	 */
327 
328 	mask = 0;
329 	if (sk->sk_err)
330 		mask = EPOLLERR;
331 
332 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
333 		mask |= EPOLLHUP;
334 	if (sk->sk_shutdown & RCV_SHUTDOWN)
335 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
336 
337 	/* Connected? */
338 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
339 		if (atomic_read(&sk->sk_rmem_alloc) > 0)
340 			mask |= EPOLLIN | EPOLLRDNORM;
341 
342 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
343 			if (sk_stream_is_writeable(sk)) {
344 				mask |= EPOLLOUT | EPOLLWRNORM;
345 			} else {  /* send SIGIO later */
346 				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
347 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
348 
349 				/* Race breaker. If space is freed after
350 				 * wspace test but before the flags are set,
351 				 * IO signal will be lost.
352 				 */
353 				if (sk_stream_is_writeable(sk))
354 					mask |= EPOLLOUT | EPOLLWRNORM;
355 			}
356 		}
357 	}
358 	return mask;
359 }
360 
361 EXPORT_SYMBOL_GPL(dccp_poll);
362 
363 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
364 {
365 	int rc = -ENOTCONN;
366 
367 	lock_sock(sk);
368 
369 	if (sk->sk_state == DCCP_LISTEN)
370 		goto out;
371 
372 	switch (cmd) {
373 	case SIOCOUTQ: {
374 		int amount = sk_wmem_alloc_get(sk);
375 		/* Using sk_wmem_alloc here because sk_wmem_queued is not used by DCCP and
376 		 * always 0, comparably to UDP.
377 		 */
378 
379 		rc = put_user(amount, (int __user *)arg);
380 	}
381 		break;
382 	case SIOCINQ: {
383 		struct sk_buff *skb;
384 		unsigned long amount = 0;
385 
386 		skb = skb_peek(&sk->sk_receive_queue);
387 		if (skb != NULL) {
388 			/*
389 			 * We will only return the amount of this packet since
390 			 * that is all that will be read.
391 			 */
392 			amount = skb->len;
393 		}
394 		rc = put_user(amount, (int __user *)arg);
395 	}
396 		break;
397 	default:
398 		rc = -ENOIOCTLCMD;
399 		break;
400 	}
401 out:
402 	release_sock(sk);
403 	return rc;
404 }
405 
406 EXPORT_SYMBOL_GPL(dccp_ioctl);
407 
408 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
409 				   sockptr_t optval, unsigned int optlen)
410 {
411 	struct dccp_sock *dp = dccp_sk(sk);
412 	struct dccp_service_list *sl = NULL;
413 
414 	if (service == DCCP_SERVICE_INVALID_VALUE ||
415 	    optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
416 		return -EINVAL;
417 
418 	if (optlen > sizeof(service)) {
419 		sl = kmalloc(optlen, GFP_KERNEL);
420 		if (sl == NULL)
421 			return -ENOMEM;
422 
423 		sl->dccpsl_nr = optlen / sizeof(u32) - 1;
424 		if (copy_from_sockptr_offset(sl->dccpsl_list, optval,
425 				sizeof(service), optlen - sizeof(service)) ||
426 		    dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
427 			kfree(sl);
428 			return -EFAULT;
429 		}
430 	}
431 
432 	lock_sock(sk);
433 	dp->dccps_service = service;
434 
435 	kfree(dp->dccps_service_list);
436 
437 	dp->dccps_service_list = sl;
438 	release_sock(sk);
439 	return 0;
440 }
441 
442 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
443 {
444 	u8 *list, len;
445 	int i, rc;
446 
447 	if (cscov < 0 || cscov > 15)
448 		return -EINVAL;
449 	/*
450 	 * Populate a list of permissible values, in the range cscov...15. This
451 	 * is necessary since feature negotiation of single values only works if
452 	 * both sides incidentally choose the same value. Since the list starts
453 	 * lowest-value first, negotiation will pick the smallest shared value.
454 	 */
455 	if (cscov == 0)
456 		return 0;
457 	len = 16 - cscov;
458 
459 	list = kmalloc(len, GFP_KERNEL);
460 	if (list == NULL)
461 		return -ENOBUFS;
462 
463 	for (i = 0; i < len; i++)
464 		list[i] = cscov++;
465 
466 	rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
467 
468 	if (rc == 0) {
469 		if (rx)
470 			dccp_sk(sk)->dccps_pcrlen = cscov;
471 		else
472 			dccp_sk(sk)->dccps_pcslen = cscov;
473 	}
474 	kfree(list);
475 	return rc;
476 }
477 
478 static int dccp_setsockopt_ccid(struct sock *sk, int type,
479 				sockptr_t optval, unsigned int optlen)
480 {
481 	u8 *val;
482 	int rc = 0;
483 
484 	if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
485 		return -EINVAL;
486 
487 	val = memdup_sockptr(optval, optlen);
488 	if (IS_ERR(val))
489 		return PTR_ERR(val);
490 
491 	lock_sock(sk);
492 	if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
493 		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
494 
495 	if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
496 		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
497 	release_sock(sk);
498 
499 	kfree(val);
500 	return rc;
501 }
502 
503 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
504 		sockptr_t optval, unsigned int optlen)
505 {
506 	struct dccp_sock *dp = dccp_sk(sk);
507 	int val, err = 0;
508 
509 	switch (optname) {
510 	case DCCP_SOCKOPT_PACKET_SIZE:
511 		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
512 		return 0;
513 	case DCCP_SOCKOPT_CHANGE_L:
514 	case DCCP_SOCKOPT_CHANGE_R:
515 		DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
516 		return 0;
517 	case DCCP_SOCKOPT_CCID:
518 	case DCCP_SOCKOPT_RX_CCID:
519 	case DCCP_SOCKOPT_TX_CCID:
520 		return dccp_setsockopt_ccid(sk, optname, optval, optlen);
521 	}
522 
523 	if (optlen < (int)sizeof(int))
524 		return -EINVAL;
525 
526 	if (copy_from_sockptr(&val, optval, sizeof(int)))
527 		return -EFAULT;
528 
529 	if (optname == DCCP_SOCKOPT_SERVICE)
530 		return dccp_setsockopt_service(sk, val, optval, optlen);
531 
532 	lock_sock(sk);
533 	switch (optname) {
534 	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
535 		if (dp->dccps_role != DCCP_ROLE_SERVER)
536 			err = -EOPNOTSUPP;
537 		else
538 			dp->dccps_server_timewait = (val != 0);
539 		break;
540 	case DCCP_SOCKOPT_SEND_CSCOV:
541 		err = dccp_setsockopt_cscov(sk, val, false);
542 		break;
543 	case DCCP_SOCKOPT_RECV_CSCOV:
544 		err = dccp_setsockopt_cscov(sk, val, true);
545 		break;
546 	case DCCP_SOCKOPT_QPOLICY_ID:
547 		if (sk->sk_state != DCCP_CLOSED)
548 			err = -EISCONN;
549 		else if (val < 0 || val >= DCCPQ_POLICY_MAX)
550 			err = -EINVAL;
551 		else
552 			dp->dccps_qpolicy = val;
553 		break;
554 	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
555 		if (val < 0)
556 			err = -EINVAL;
557 		else
558 			dp->dccps_tx_qlen = val;
559 		break;
560 	default:
561 		err = -ENOPROTOOPT;
562 		break;
563 	}
564 	release_sock(sk);
565 
566 	return err;
567 }
568 
569 int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
570 		    unsigned int optlen)
571 {
572 	if (level != SOL_DCCP)
573 		return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
574 							     optname, optval,
575 							     optlen);
576 	return do_dccp_setsockopt(sk, level, optname, optval, optlen);
577 }
578 
579 EXPORT_SYMBOL_GPL(dccp_setsockopt);
580 
581 static int dccp_getsockopt_service(struct sock *sk, int len,
582 				   __be32 __user *optval,
583 				   int __user *optlen)
584 {
585 	const struct dccp_sock *dp = dccp_sk(sk);
586 	const struct dccp_service_list *sl;
587 	int err = -ENOENT, slen = 0, total_len = sizeof(u32);
588 
589 	lock_sock(sk);
590 	if ((sl = dp->dccps_service_list) != NULL) {
591 		slen = sl->dccpsl_nr * sizeof(u32);
592 		total_len += slen;
593 	}
594 
595 	err = -EINVAL;
596 	if (total_len > len)
597 		goto out;
598 
599 	err = 0;
600 	if (put_user(total_len, optlen) ||
601 	    put_user(dp->dccps_service, optval) ||
602 	    (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
603 		err = -EFAULT;
604 out:
605 	release_sock(sk);
606 	return err;
607 }
608 
609 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
610 		    char __user *optval, int __user *optlen)
611 {
612 	struct dccp_sock *dp;
613 	int val, len;
614 
615 	if (get_user(len, optlen))
616 		return -EFAULT;
617 
618 	if (len < (int)sizeof(int))
619 		return -EINVAL;
620 
621 	dp = dccp_sk(sk);
622 
623 	switch (optname) {
624 	case DCCP_SOCKOPT_PACKET_SIZE:
625 		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
626 		return 0;
627 	case DCCP_SOCKOPT_SERVICE:
628 		return dccp_getsockopt_service(sk, len,
629 					       (__be32 __user *)optval, optlen);
630 	case DCCP_SOCKOPT_GET_CUR_MPS:
631 		val = dp->dccps_mss_cache;
632 		break;
633 	case DCCP_SOCKOPT_AVAILABLE_CCIDS:
634 		return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
635 	case DCCP_SOCKOPT_TX_CCID:
636 		val = ccid_get_current_tx_ccid(dp);
637 		if (val < 0)
638 			return -ENOPROTOOPT;
639 		break;
640 	case DCCP_SOCKOPT_RX_CCID:
641 		val = ccid_get_current_rx_ccid(dp);
642 		if (val < 0)
643 			return -ENOPROTOOPT;
644 		break;
645 	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
646 		val = dp->dccps_server_timewait;
647 		break;
648 	case DCCP_SOCKOPT_SEND_CSCOV:
649 		val = dp->dccps_pcslen;
650 		break;
651 	case DCCP_SOCKOPT_RECV_CSCOV:
652 		val = dp->dccps_pcrlen;
653 		break;
654 	case DCCP_SOCKOPT_QPOLICY_ID:
655 		val = dp->dccps_qpolicy;
656 		break;
657 	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
658 		val = dp->dccps_tx_qlen;
659 		break;
660 	case 128 ... 191:
661 		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
662 					     len, (u32 __user *)optval, optlen);
663 	case 192 ... 255:
664 		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
665 					     len, (u32 __user *)optval, optlen);
666 	default:
667 		return -ENOPROTOOPT;
668 	}
669 
670 	len = sizeof(val);
671 	if (put_user(len, optlen) || copy_to_user(optval, &val, len))
672 		return -EFAULT;
673 
674 	return 0;
675 }
676 
677 int dccp_getsockopt(struct sock *sk, int level, int optname,
678 		    char __user *optval, int __user *optlen)
679 {
680 	if (level != SOL_DCCP)
681 		return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
682 							     optname, optval,
683 							     optlen);
684 	return do_dccp_getsockopt(sk, level, optname, optval, optlen);
685 }
686 
687 EXPORT_SYMBOL_GPL(dccp_getsockopt);
688 
689 static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
690 {
691 	struct cmsghdr *cmsg;
692 
693 	/*
694 	 * Assign an (opaque) qpolicy priority value to skb->priority.
695 	 *
696 	 * We are overloading this skb field for use with the qpolicy subystem.
697 	 * The skb->priority is normally used for the SO_PRIORITY option, which
698 	 * is initialised from sk_priority. Since the assignment of sk_priority
699 	 * to skb->priority happens later (on layer 3), we overload this field
700 	 * for use with queueing priorities as long as the skb is on layer 4.
701 	 * The default priority value (if nothing is set) is 0.
702 	 */
703 	skb->priority = 0;
704 
705 	for_each_cmsghdr(cmsg, msg) {
706 		if (!CMSG_OK(msg, cmsg))
707 			return -EINVAL;
708 
709 		if (cmsg->cmsg_level != SOL_DCCP)
710 			continue;
711 
712 		if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
713 		    !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
714 			return -EINVAL;
715 
716 		switch (cmsg->cmsg_type) {
717 		case DCCP_SCM_PRIORITY:
718 			if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
719 				return -EINVAL;
720 			skb->priority = *(__u32 *)CMSG_DATA(cmsg);
721 			break;
722 		default:
723 			return -EINVAL;
724 		}
725 	}
726 	return 0;
727 }
728 
729 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
730 {
731 	const struct dccp_sock *dp = dccp_sk(sk);
732 	const int flags = msg->msg_flags;
733 	const int noblock = flags & MSG_DONTWAIT;
734 	struct sk_buff *skb;
735 	int rc, size;
736 	long timeo;
737 
738 	trace_dccp_probe(sk, len);
739 
740 	if (len > dp->dccps_mss_cache)
741 		return -EMSGSIZE;
742 
743 	lock_sock(sk);
744 
745 	timeo = sock_sndtimeo(sk, noblock);
746 
747 	/*
748 	 * We have to use sk_stream_wait_connect here to set sk_write_pending,
749 	 * so that the trick in dccp_rcv_request_sent_state_process.
750 	 */
751 	/* Wait for a connection to finish. */
752 	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
753 		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
754 			goto out_release;
755 
756 	size = sk->sk_prot->max_header + len;
757 	release_sock(sk);
758 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
759 	lock_sock(sk);
760 	if (skb == NULL)
761 		goto out_release;
762 
763 	if (dccp_qpolicy_full(sk)) {
764 		rc = -EAGAIN;
765 		goto out_discard;
766 	}
767 
768 	if (sk->sk_state == DCCP_CLOSED) {
769 		rc = -ENOTCONN;
770 		goto out_discard;
771 	}
772 
773 	skb_reserve(skb, sk->sk_prot->max_header);
774 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
775 	if (rc != 0)
776 		goto out_discard;
777 
778 	rc = dccp_msghdr_parse(msg, skb);
779 	if (rc != 0)
780 		goto out_discard;
781 
782 	dccp_qpolicy_push(sk, skb);
783 	/*
784 	 * The xmit_timer is set if the TX CCID is rate-based and will expire
785 	 * when congestion control permits to release further packets into the
786 	 * network. Window-based CCIDs do not use this timer.
787 	 */
788 	if (!timer_pending(&dp->dccps_xmit_timer))
789 		dccp_write_xmit(sk);
790 out_release:
791 	release_sock(sk);
792 	return rc ? : len;
793 out_discard:
794 	kfree_skb(skb);
795 	goto out_release;
796 }
797 
798 EXPORT_SYMBOL_GPL(dccp_sendmsg);
799 
800 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
801 		 int *addr_len)
802 {
803 	const struct dccp_hdr *dh;
804 	long timeo;
805 
806 	lock_sock(sk);
807 
808 	if (sk->sk_state == DCCP_LISTEN) {
809 		len = -ENOTCONN;
810 		goto out;
811 	}
812 
813 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
814 
815 	do {
816 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
817 
818 		if (skb == NULL)
819 			goto verify_sock_status;
820 
821 		dh = dccp_hdr(skb);
822 
823 		switch (dh->dccph_type) {
824 		case DCCP_PKT_DATA:
825 		case DCCP_PKT_DATAACK:
826 			goto found_ok_skb;
827 
828 		case DCCP_PKT_CLOSE:
829 		case DCCP_PKT_CLOSEREQ:
830 			if (!(flags & MSG_PEEK))
831 				dccp_finish_passive_close(sk);
832 			fallthrough;
833 		case DCCP_PKT_RESET:
834 			dccp_pr_debug("found fin (%s) ok!\n",
835 				      dccp_packet_name(dh->dccph_type));
836 			len = 0;
837 			goto found_fin_ok;
838 		default:
839 			dccp_pr_debug("packet_type=%s\n",
840 				      dccp_packet_name(dh->dccph_type));
841 			sk_eat_skb(sk, skb);
842 		}
843 verify_sock_status:
844 		if (sock_flag(sk, SOCK_DONE)) {
845 			len = 0;
846 			break;
847 		}
848 
849 		if (sk->sk_err) {
850 			len = sock_error(sk);
851 			break;
852 		}
853 
854 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
855 			len = 0;
856 			break;
857 		}
858 
859 		if (sk->sk_state == DCCP_CLOSED) {
860 			if (!sock_flag(sk, SOCK_DONE)) {
861 				/* This occurs when user tries to read
862 				 * from never connected socket.
863 				 */
864 				len = -ENOTCONN;
865 				break;
866 			}
867 			len = 0;
868 			break;
869 		}
870 
871 		if (!timeo) {
872 			len = -EAGAIN;
873 			break;
874 		}
875 
876 		if (signal_pending(current)) {
877 			len = sock_intr_errno(timeo);
878 			break;
879 		}
880 
881 		sk_wait_data(sk, &timeo, NULL);
882 		continue;
883 	found_ok_skb:
884 		if (len > skb->len)
885 			len = skb->len;
886 		else if (len < skb->len)
887 			msg->msg_flags |= MSG_TRUNC;
888 
889 		if (skb_copy_datagram_msg(skb, 0, msg, len)) {
890 			/* Exception. Bailout! */
891 			len = -EFAULT;
892 			break;
893 		}
894 		if (flags & MSG_TRUNC)
895 			len = skb->len;
896 	found_fin_ok:
897 		if (!(flags & MSG_PEEK))
898 			sk_eat_skb(sk, skb);
899 		break;
900 	} while (1);
901 out:
902 	release_sock(sk);
903 	return len;
904 }
905 
906 EXPORT_SYMBOL_GPL(dccp_recvmsg);
907 
908 int inet_dccp_listen(struct socket *sock, int backlog)
909 {
910 	struct sock *sk = sock->sk;
911 	unsigned char old_state;
912 	int err;
913 
914 	lock_sock(sk);
915 
916 	err = -EINVAL;
917 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
918 		goto out;
919 
920 	old_state = sk->sk_state;
921 	if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
922 		goto out;
923 
924 	WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
925 	/* Really, if the socket is already in listen state
926 	 * we can only allow the backlog to be adjusted.
927 	 */
928 	if (old_state != DCCP_LISTEN) {
929 		struct dccp_sock *dp = dccp_sk(sk);
930 
931 		dp->dccps_role = DCCP_ROLE_LISTEN;
932 
933 		/* do not start to listen if feature negotiation setup fails */
934 		if (dccp_feat_finalise_settings(dp)) {
935 			err = -EPROTO;
936 			goto out;
937 		}
938 
939 		err = inet_csk_listen_start(sk);
940 		if (err)
941 			goto out;
942 	}
943 	err = 0;
944 
945 out:
946 	release_sock(sk);
947 	return err;
948 }
949 
950 EXPORT_SYMBOL_GPL(inet_dccp_listen);
951 
952 static void dccp_terminate_connection(struct sock *sk)
953 {
954 	u8 next_state = DCCP_CLOSED;
955 
956 	switch (sk->sk_state) {
957 	case DCCP_PASSIVE_CLOSE:
958 	case DCCP_PASSIVE_CLOSEREQ:
959 		dccp_finish_passive_close(sk);
960 		break;
961 	case DCCP_PARTOPEN:
962 		dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
963 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
964 		fallthrough;
965 	case DCCP_OPEN:
966 		dccp_send_close(sk, 1);
967 
968 		if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
969 		    !dccp_sk(sk)->dccps_server_timewait)
970 			next_state = DCCP_ACTIVE_CLOSEREQ;
971 		else
972 			next_state = DCCP_CLOSING;
973 		fallthrough;
974 	default:
975 		dccp_set_state(sk, next_state);
976 	}
977 }
978 
979 void dccp_close(struct sock *sk, long timeout)
980 {
981 	struct dccp_sock *dp = dccp_sk(sk);
982 	struct sk_buff *skb;
983 	u32 data_was_unread = 0;
984 	int state;
985 
986 	lock_sock(sk);
987 
988 	sk->sk_shutdown = SHUTDOWN_MASK;
989 
990 	if (sk->sk_state == DCCP_LISTEN) {
991 		dccp_set_state(sk, DCCP_CLOSED);
992 
993 		/* Special case. */
994 		inet_csk_listen_stop(sk);
995 
996 		goto adjudge_to_death;
997 	}
998 
999 	sk_stop_timer(sk, &dp->dccps_xmit_timer);
1000 
1001 	/*
1002 	 * We need to flush the recv. buffs.  We do this only on the
1003 	 * descriptor close, not protocol-sourced closes, because the
1004 	  *reader process may not have drained the data yet!
1005 	 */
1006 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1007 		data_was_unread += skb->len;
1008 		__kfree_skb(skb);
1009 	}
1010 
1011 	/* If socket has been already reset kill it. */
1012 	if (sk->sk_state == DCCP_CLOSED)
1013 		goto adjudge_to_death;
1014 
1015 	if (data_was_unread) {
1016 		/* Unread data was tossed, send an appropriate Reset Code */
1017 		DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
1018 		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1019 		dccp_set_state(sk, DCCP_CLOSED);
1020 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1021 		/* Check zero linger _after_ checking for unread data. */
1022 		sk->sk_prot->disconnect(sk, 0);
1023 	} else if (sk->sk_state != DCCP_CLOSED) {
1024 		/*
1025 		 * Normal connection termination. May need to wait if there are
1026 		 * still packets in the TX queue that are delayed by the CCID.
1027 		 */
1028 		dccp_flush_write_queue(sk, &timeout);
1029 		dccp_terminate_connection(sk);
1030 	}
1031 
1032 	/*
1033 	 * Flush write queue. This may be necessary in several cases:
1034 	 * - we have been closed by the peer but still have application data;
1035 	 * - abortive termination (unread data or zero linger time),
1036 	 * - normal termination but queue could not be flushed within time limit
1037 	 */
1038 	__skb_queue_purge(&sk->sk_write_queue);
1039 
1040 	sk_stream_wait_close(sk, timeout);
1041 
1042 adjudge_to_death:
1043 	state = sk->sk_state;
1044 	sock_hold(sk);
1045 	sock_orphan(sk);
1046 
1047 	/*
1048 	 * It is the last release_sock in its life. It will remove backlog.
1049 	 */
1050 	release_sock(sk);
1051 	/*
1052 	 * Now socket is owned by kernel and we acquire BH lock
1053 	 * to finish close. No need to check for user refs.
1054 	 */
1055 	local_bh_disable();
1056 	bh_lock_sock(sk);
1057 	WARN_ON(sock_owned_by_user(sk));
1058 
1059 	this_cpu_inc(dccp_orphan_count);
1060 
1061 	/* Have we already been destroyed by a softirq or backlog? */
1062 	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1063 		goto out;
1064 
1065 	if (sk->sk_state == DCCP_CLOSED)
1066 		inet_csk_destroy_sock(sk);
1067 
1068 	/* Otherwise, socket is reprieved until protocol close. */
1069 
1070 out:
1071 	bh_unlock_sock(sk);
1072 	local_bh_enable();
1073 	sock_put(sk);
1074 }
1075 
1076 EXPORT_SYMBOL_GPL(dccp_close);
1077 
1078 void dccp_shutdown(struct sock *sk, int how)
1079 {
1080 	dccp_pr_debug("called shutdown(%x)\n", how);
1081 }
1082 
1083 EXPORT_SYMBOL_GPL(dccp_shutdown);
1084 
1085 static inline int __init dccp_mib_init(void)
1086 {
1087 	dccp_statistics = alloc_percpu(struct dccp_mib);
1088 	if (!dccp_statistics)
1089 		return -ENOMEM;
1090 	return 0;
1091 }
1092 
1093 static inline void dccp_mib_exit(void)
1094 {
1095 	free_percpu(dccp_statistics);
1096 }
1097 
1098 static int thash_entries;
1099 module_param(thash_entries, int, 0444);
1100 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1101 
1102 #ifdef CONFIG_IP_DCCP_DEBUG
1103 bool dccp_debug;
1104 module_param(dccp_debug, bool, 0644);
1105 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1106 
1107 EXPORT_SYMBOL_GPL(dccp_debug);
1108 #endif
1109 
1110 static int __init dccp_init(void)
1111 {
1112 	unsigned long goal;
1113 	unsigned long nr_pages = totalram_pages();
1114 	int ehash_order, bhash_order, i;
1115 	int rc;
1116 
1117 	BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1118 		     sizeof_field(struct sk_buff, cb));
1119 	rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
1120 	if (rc)
1121 		goto out_fail;
1122 	rc = -ENOBUFS;
1123 	dccp_hashinfo.bind_bucket_cachep =
1124 		kmem_cache_create("dccp_bind_bucket",
1125 				  sizeof(struct inet_bind_bucket), 0,
1126 				  SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
1127 	if (!dccp_hashinfo.bind_bucket_cachep)
1128 		goto out_free_hashinfo2;
1129 	dccp_hashinfo.bind2_bucket_cachep =
1130 		kmem_cache_create("dccp_bind2_bucket",
1131 				  sizeof(struct inet_bind2_bucket), 0,
1132 				  SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
1133 	if (!dccp_hashinfo.bind2_bucket_cachep)
1134 		goto out_free_bind_bucket_cachep;
1135 
1136 	/*
1137 	 * Size and allocate the main established and bind bucket
1138 	 * hash tables.
1139 	 *
1140 	 * The methodology is similar to that of the buffer cache.
1141 	 */
1142 	if (nr_pages >= (128 * 1024))
1143 		goal = nr_pages >> (21 - PAGE_SHIFT);
1144 	else
1145 		goal = nr_pages >> (23 - PAGE_SHIFT);
1146 
1147 	if (thash_entries)
1148 		goal = (thash_entries *
1149 			sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1150 	for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1151 		;
1152 	do {
1153 		unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1154 					sizeof(struct inet_ehash_bucket);
1155 
1156 		while (hash_size & (hash_size - 1))
1157 			hash_size--;
1158 		dccp_hashinfo.ehash_mask = hash_size - 1;
1159 		dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1160 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1161 	} while (!dccp_hashinfo.ehash && --ehash_order > 0);
1162 
1163 	if (!dccp_hashinfo.ehash) {
1164 		DCCP_CRIT("Failed to allocate DCCP established hash table");
1165 		goto out_free_bind2_bucket_cachep;
1166 	}
1167 
1168 	for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
1169 		INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1170 
1171 	if (inet_ehash_locks_alloc(&dccp_hashinfo))
1172 			goto out_free_dccp_ehash;
1173 
1174 	bhash_order = ehash_order;
1175 
1176 	do {
1177 		dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1178 					sizeof(struct inet_bind_hashbucket);
1179 		if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1180 		    bhash_order > 0)
1181 			continue;
1182 		dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1183 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1184 	} while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1185 
1186 	if (!dccp_hashinfo.bhash) {
1187 		DCCP_CRIT("Failed to allocate DCCP bind hash table");
1188 		goto out_free_dccp_locks;
1189 	}
1190 
1191 	dccp_hashinfo.bhash2 = (struct inet_bind_hashbucket *)
1192 		__get_free_pages(GFP_ATOMIC | __GFP_NOWARN, bhash_order);
1193 
1194 	if (!dccp_hashinfo.bhash2) {
1195 		DCCP_CRIT("Failed to allocate DCCP bind2 hash table");
1196 		goto out_free_dccp_bhash;
1197 	}
1198 
1199 	for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1200 		spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1201 		INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1202 		spin_lock_init(&dccp_hashinfo.bhash2[i].lock);
1203 		INIT_HLIST_HEAD(&dccp_hashinfo.bhash2[i].chain);
1204 	}
1205 
1206 	dccp_hashinfo.pernet = false;
1207 
1208 	rc = dccp_mib_init();
1209 	if (rc)
1210 		goto out_free_dccp_bhash2;
1211 
1212 	rc = dccp_ackvec_init();
1213 	if (rc)
1214 		goto out_free_dccp_mib;
1215 
1216 	rc = dccp_sysctl_init();
1217 	if (rc)
1218 		goto out_ackvec_exit;
1219 
1220 	rc = ccid_initialize_builtins();
1221 	if (rc)
1222 		goto out_sysctl_exit;
1223 
1224 	dccp_timestamping_init();
1225 
1226 	return 0;
1227 
1228 out_sysctl_exit:
1229 	dccp_sysctl_exit();
1230 out_ackvec_exit:
1231 	dccp_ackvec_exit();
1232 out_free_dccp_mib:
1233 	dccp_mib_exit();
1234 out_free_dccp_bhash2:
1235 	free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
1236 out_free_dccp_bhash:
1237 	free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1238 out_free_dccp_locks:
1239 	inet_ehash_locks_free(&dccp_hashinfo);
1240 out_free_dccp_ehash:
1241 	free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1242 out_free_bind2_bucket_cachep:
1243 	kmem_cache_destroy(dccp_hashinfo.bind2_bucket_cachep);
1244 out_free_bind_bucket_cachep:
1245 	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1246 out_free_hashinfo2:
1247 	inet_hashinfo2_free_mod(&dccp_hashinfo);
1248 out_fail:
1249 	dccp_hashinfo.bhash = NULL;
1250 	dccp_hashinfo.bhash2 = NULL;
1251 	dccp_hashinfo.ehash = NULL;
1252 	dccp_hashinfo.bind_bucket_cachep = NULL;
1253 	dccp_hashinfo.bind2_bucket_cachep = NULL;
1254 	return rc;
1255 }
1256 
1257 static void __exit dccp_fini(void)
1258 {
1259 	int bhash_order = get_order(dccp_hashinfo.bhash_size *
1260 				    sizeof(struct inet_bind_hashbucket));
1261 
1262 	ccid_cleanup_builtins();
1263 	dccp_mib_exit();
1264 	free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1265 	free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
1266 	free_pages((unsigned long)dccp_hashinfo.ehash,
1267 		   get_order((dccp_hashinfo.ehash_mask + 1) *
1268 			     sizeof(struct inet_ehash_bucket)));
1269 	inet_ehash_locks_free(&dccp_hashinfo);
1270 	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1271 	dccp_ackvec_exit();
1272 	dccp_sysctl_exit();
1273 	inet_hashinfo2_free_mod(&dccp_hashinfo);
1274 }
1275 
1276 module_init(dccp_init);
1277 module_exit(dccp_fini);
1278 
1279 MODULE_LICENSE("GPL");
1280 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1281 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
1282