1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 #ifndef _DCCP_H 3 #define _DCCP_H 4 /* 5 * net/dccp/dccp.h 6 * 7 * An implementation of the DCCP protocol 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 9 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz> 10 */ 11 12 #include <linux/dccp.h> 13 #include <linux/ktime.h> 14 #include <net/snmp.h> 15 #include <net/sock.h> 16 #include <net/tcp.h> 17 #include "ackvec.h" 18 19 /* 20 * DCCP - specific warning and debugging macros. 21 */ 22 #define DCCP_WARN(fmt, ...) \ 23 net_warn_ratelimited("%s: " fmt, __func__, ##__VA_ARGS__) 24 #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ 25 __FILE__, __LINE__, __func__) 26 #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) 27 #define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \ 28 DCCP_BUG("\"%s\" holds (exception!)", \ 29 __stringify(cond)); \ 30 } while (0) 31 32 #define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \ 33 printk(fmt, ##args); \ 34 } while(0) 35 #define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \ 36 "%s: " fmt, __func__, ##a) 37 38 #ifdef CONFIG_IP_DCCP_DEBUG 39 extern bool dccp_debug; 40 #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) 41 #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) 42 #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) 43 #else 44 #define dccp_pr_debug(format, a...) do {} while (0) 45 #define dccp_pr_debug_cat(format, a...) do {} while (0) 46 #define dccp_debug(format, a...) do {} while (0) 47 #endif 48 49 extern struct inet_hashinfo dccp_hashinfo; 50 51 DECLARE_PER_CPU(unsigned int, dccp_orphan_count); 52 53 void dccp_time_wait(struct sock *sk, int state, int timeo); 54 55 /* 56 * Set safe upper bounds for header and option length. Since Data Offset is 8 57 * bits (RFC 4340, sec. 5.1), the total header length can never be more than 58 * 4 * 255 = 1020 bytes. The largest possible header length is 28 bytes (X=1): 59 * - DCCP-Response with ACK Subheader and 4 bytes of Service code OR 60 * - DCCP-Reset with ACK Subheader and 4 bytes of Reset Code fields 61 * Hence a safe upper bound for the maximum option length is 1020-28 = 992 62 */ 63 #define MAX_DCCP_SPECIFIC_HEADER (255 * sizeof(uint32_t)) 64 #define DCCP_MAX_PACKET_HDR 28 65 #define DCCP_MAX_OPT_LEN (MAX_DCCP_SPECIFIC_HEADER - DCCP_MAX_PACKET_HDR) 66 #define MAX_DCCP_HEADER (MAX_DCCP_SPECIFIC_HEADER + MAX_HEADER) 67 68 /* Upper bound for initial feature-negotiation overhead (padded to 32 bits) */ 69 #define DCCP_FEATNEG_OVERHEAD (32 * sizeof(uint32_t)) 70 71 #define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT 72 * state, about 60 seconds */ 73 74 /* RFC 1122, 4.2.3.1 initial RTO value */ 75 #define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ)) 76 77 /* 78 * The maximum back-off value for retransmissions. This is needed for 79 * - retransmitting client-Requests (sec. 8.1.1), 80 * - retransmitting Close/CloseReq when closing (sec. 8.3), 81 * - feature-negotiation retransmission (sec. 6.6.3), 82 * - Acks in client-PARTOPEN state (sec. 8.1.5). 83 */ 84 #define DCCP_RTO_MAX ((unsigned int)(64 * HZ)) 85 86 /* 87 * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4 88 */ 89 #define DCCP_SANE_RTT_MIN 100 90 #define DCCP_FALLBACK_RTT (USEC_PER_SEC / 5) 91 #define DCCP_SANE_RTT_MAX (3 * USEC_PER_SEC) 92 93 /* sysctl variables for DCCP */ 94 extern int sysctl_dccp_request_retries; 95 extern int sysctl_dccp_retries1; 96 extern int sysctl_dccp_retries2; 97 extern int sysctl_dccp_tx_qlen; 98 extern int sysctl_dccp_sync_ratelimit; 99 100 /* 101 * 48-bit sequence number arithmetic (signed and unsigned) 102 */ 103 #define INT48_MIN 0x800000000000LL /* 2^47 */ 104 #define UINT48_MAX 0xFFFFFFFFFFFFLL /* 2^48 - 1 */ 105 #define COMPLEMENT48(x) (0x1000000000000LL - (x)) /* 2^48 - x */ 106 #define TO_SIGNED48(x) (((x) < INT48_MIN)? (x) : -COMPLEMENT48( (x))) 107 #define TO_UNSIGNED48(x) (((x) >= 0)? (x) : COMPLEMENT48(-(x))) 108 #define ADD48(a, b) (((a) + (b)) & UINT48_MAX) 109 #define SUB48(a, b) ADD48((a), COMPLEMENT48(b)) 110 111 static inline void dccp_inc_seqno(u64 *seqno) 112 { 113 *seqno = ADD48(*seqno, 1); 114 } 115 116 /* signed mod-2^48 distance: pos. if seqno1 < seqno2, neg. if seqno1 > seqno2 */ 117 static inline s64 dccp_delta_seqno(const u64 seqno1, const u64 seqno2) 118 { 119 u64 delta = SUB48(seqno2, seqno1); 120 121 return TO_SIGNED48(delta); 122 } 123 124 /* is seq1 < seq2 ? */ 125 static inline int before48(const u64 seq1, const u64 seq2) 126 { 127 return (s64)((seq2 << 16) - (seq1 << 16)) > 0; 128 } 129 130 /* is seq1 > seq2 ? */ 131 #define after48(seq1, seq2) before48(seq2, seq1) 132 133 /* is seq2 <= seq1 <= seq3 ? */ 134 static inline int between48(const u64 seq1, const u64 seq2, const u64 seq3) 135 { 136 return (seq3 << 16) - (seq2 << 16) >= (seq1 << 16) - (seq2 << 16); 137 } 138 139 /** 140 * dccp_loss_count - Approximate the number of lost data packets in a burst loss 141 * @s1: last known sequence number before the loss ('hole') 142 * @s2: first sequence number seen after the 'hole' 143 * @ndp: NDP count on packet with sequence number @s2 144 */ 145 static inline u64 dccp_loss_count(const u64 s1, const u64 s2, const u64 ndp) 146 { 147 s64 delta = dccp_delta_seqno(s1, s2); 148 149 WARN_ON(delta < 0); 150 delta -= ndp + 1; 151 152 return delta > 0 ? delta : 0; 153 } 154 155 /** 156 * dccp_loss_free - Evaluate condition for data loss from RFC 4340, 7.7.1 157 */ 158 static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp) 159 { 160 return dccp_loss_count(s1, s2, ndp) == 0; 161 } 162 163 enum { 164 DCCP_MIB_NUM = 0, 165 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ 166 DCCP_MIB_ESTABRESETS, /* EstabResets */ 167 DCCP_MIB_CURRESTAB, /* CurrEstab */ 168 DCCP_MIB_OUTSEGS, /* OutSegs */ 169 DCCP_MIB_OUTRSTS, 170 DCCP_MIB_ABORTONTIMEOUT, 171 DCCP_MIB_TIMEOUTS, 172 DCCP_MIB_ABORTFAILED, 173 DCCP_MIB_PASSIVEOPENS, 174 DCCP_MIB_ATTEMPTFAILS, 175 DCCP_MIB_OUTDATAGRAMS, 176 DCCP_MIB_INERRS, 177 DCCP_MIB_OPTMANDATORYERROR, 178 DCCP_MIB_INVALIDOPT, 179 __DCCP_MIB_MAX 180 }; 181 182 #define DCCP_MIB_MAX __DCCP_MIB_MAX 183 struct dccp_mib { 184 unsigned long mibs[DCCP_MIB_MAX]; 185 }; 186 187 DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); 188 #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) 189 #define __DCCP_INC_STATS(field) __SNMP_INC_STATS(dccp_statistics, field) 190 #define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field) 191 192 /* 193 * Checksumming routines 194 */ 195 static inline unsigned int dccp_csum_coverage(const struct sk_buff *skb) 196 { 197 const struct dccp_hdr* dh = dccp_hdr(skb); 198 199 if (dh->dccph_cscov == 0) 200 return skb->len; 201 return (dh->dccph_doff + dh->dccph_cscov - 1) * sizeof(u32); 202 } 203 204 static inline void dccp_csum_outgoing(struct sk_buff *skb) 205 { 206 unsigned int cov = dccp_csum_coverage(skb); 207 208 if (cov >= skb->len) 209 dccp_hdr(skb)->dccph_cscov = 0; 210 211 skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); 212 } 213 214 void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb); 215 216 int dccp_retransmit_skb(struct sock *sk); 217 218 void dccp_send_ack(struct sock *sk); 219 void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, 220 struct request_sock *rsk); 221 222 void dccp_send_sync(struct sock *sk, const u64 seq, 223 const enum dccp_pkt_type pkt_type); 224 225 /* 226 * TX Packet Dequeueing Interface 227 */ 228 void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb); 229 bool dccp_qpolicy_full(struct sock *sk); 230 void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb); 231 struct sk_buff *dccp_qpolicy_top(struct sock *sk); 232 struct sk_buff *dccp_qpolicy_pop(struct sock *sk); 233 bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param); 234 235 /* 236 * TX Packet Output and TX Timers 237 */ 238 void dccp_write_xmit(struct sock *sk); 239 void dccp_write_space(struct sock *sk); 240 void dccp_flush_write_queue(struct sock *sk, long *time_budget); 241 242 void dccp_init_xmit_timers(struct sock *sk); 243 static inline void dccp_clear_xmit_timers(struct sock *sk) 244 { 245 inet_csk_clear_xmit_timers(sk); 246 } 247 248 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu); 249 250 const char *dccp_packet_name(const int type); 251 252 void dccp_set_state(struct sock *sk, const int state); 253 void dccp_done(struct sock *sk); 254 255 int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp, 256 struct sk_buff const *skb); 257 258 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 259 260 struct sock *dccp_create_openreq_child(const struct sock *sk, 261 const struct request_sock *req, 262 const struct sk_buff *skb); 263 264 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 265 266 struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb, 267 struct request_sock *req, 268 struct dst_entry *dst, 269 struct request_sock *req_unhash, 270 bool *own_req); 271 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, 272 struct request_sock *req); 273 274 int dccp_child_process(struct sock *parent, struct sock *child, 275 struct sk_buff *skb); 276 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 277 struct dccp_hdr *dh, unsigned int len); 278 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 279 const struct dccp_hdr *dh, const unsigned int len); 280 281 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); 282 void dccp_destroy_sock(struct sock *sk); 283 284 void dccp_close(struct sock *sk, long timeout); 285 struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst, 286 struct request_sock *req); 287 288 int dccp_connect(struct sock *sk); 289 int dccp_disconnect(struct sock *sk, int flags); 290 int dccp_getsockopt(struct sock *sk, int level, int optname, 291 char __user *optval, int __user *optlen); 292 int dccp_setsockopt(struct sock *sk, int level, int optname, 293 sockptr_t optval, unsigned int optlen); 294 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); 295 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 296 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 297 int *addr_len); 298 void dccp_shutdown(struct sock *sk, int how); 299 int inet_dccp_listen(struct socket *sock, int backlog); 300 __poll_t dccp_poll(struct file *file, struct socket *sock, 301 poll_table *wait); 302 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 303 void dccp_req_err(struct sock *sk, u64 seq); 304 305 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb); 306 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); 307 void dccp_send_close(struct sock *sk, const int active); 308 int dccp_invalid_packet(struct sk_buff *skb); 309 u32 dccp_sample_rtt(struct sock *sk, long delta); 310 311 static inline bool dccp_bad_service_code(const struct sock *sk, 312 const __be32 service) 313 { 314 const struct dccp_sock *dp = dccp_sk(sk); 315 316 if (dp->dccps_service == service) 317 return false; 318 return !dccp_list_has_service(dp->dccps_service_list, service); 319 } 320 321 /** 322 * dccp_skb_cb - DCCP per-packet control information 323 * @dccpd_type: one of %dccp_pkt_type (or unknown) 324 * @dccpd_ccval: CCVal field (5.1), see e.g. RFC 4342, 8.1 325 * @dccpd_reset_code: one of %dccp_reset_codes 326 * @dccpd_reset_data: Data1..3 fields (depend on @dccpd_reset_code) 327 * @dccpd_opt_len: total length of all options (5.8) in the packet 328 * @dccpd_seq: sequence number 329 * @dccpd_ack_seq: acknowledgment number subheader field value 330 * 331 * This is used for transmission as well as for reception. 332 */ 333 struct dccp_skb_cb { 334 union { 335 struct inet_skb_parm h4; 336 #if IS_ENABLED(CONFIG_IPV6) 337 struct inet6_skb_parm h6; 338 #endif 339 } header; 340 __u8 dccpd_type:4; 341 __u8 dccpd_ccval:4; 342 __u8 dccpd_reset_code, 343 dccpd_reset_data[3]; 344 __u16 dccpd_opt_len; 345 __u64 dccpd_seq; 346 __u64 dccpd_ack_seq; 347 }; 348 349 #define DCCP_SKB_CB(__skb) ((struct dccp_skb_cb *)&((__skb)->cb[0])) 350 351 /* RFC 4340, sec. 7.7 */ 352 static inline int dccp_non_data_packet(const struct sk_buff *skb) 353 { 354 const __u8 type = DCCP_SKB_CB(skb)->dccpd_type; 355 356 return type == DCCP_PKT_ACK || 357 type == DCCP_PKT_CLOSE || 358 type == DCCP_PKT_CLOSEREQ || 359 type == DCCP_PKT_RESET || 360 type == DCCP_PKT_SYNC || 361 type == DCCP_PKT_SYNCACK; 362 } 363 364 /* RFC 4340, sec. 7.7 */ 365 static inline int dccp_data_packet(const struct sk_buff *skb) 366 { 367 const __u8 type = DCCP_SKB_CB(skb)->dccpd_type; 368 369 return type == DCCP_PKT_DATA || 370 type == DCCP_PKT_DATAACK || 371 type == DCCP_PKT_REQUEST || 372 type == DCCP_PKT_RESPONSE; 373 } 374 375 static inline int dccp_packet_without_ack(const struct sk_buff *skb) 376 { 377 const __u8 type = DCCP_SKB_CB(skb)->dccpd_type; 378 379 return type == DCCP_PKT_DATA || type == DCCP_PKT_REQUEST; 380 } 381 382 #define DCCP_PKT_WITHOUT_ACK_SEQ (UINT48_MAX << 2) 383 384 static inline void dccp_hdr_set_seq(struct dccp_hdr *dh, const u64 gss) 385 { 386 struct dccp_hdr_ext *dhx = (struct dccp_hdr_ext *)((void *)dh + 387 sizeof(*dh)); 388 dh->dccph_seq2 = 0; 389 dh->dccph_seq = htons((gss >> 32) & 0xfffff); 390 dhx->dccph_seq_low = htonl(gss & 0xffffffff); 391 } 392 393 static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack, 394 const u64 gsr) 395 { 396 dhack->dccph_reserved1 = 0; 397 dhack->dccph_ack_nr_high = htons(gsr >> 32); 398 dhack->dccph_ack_nr_low = htonl(gsr & 0xffffffff); 399 } 400 401 static inline void dccp_update_gsr(struct sock *sk, u64 seq) 402 { 403 struct dccp_sock *dp = dccp_sk(sk); 404 405 if (after48(seq, dp->dccps_gsr)) 406 dp->dccps_gsr = seq; 407 /* Sequence validity window depends on remote Sequence Window (7.5.1) */ 408 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); 409 /* 410 * Adjust SWL so that it is not below ISR. In contrast to RFC 4340, 411 * 7.5.1 we perform this check beyond the initial handshake: W/W' are 412 * always > 32, so for the first W/W' packets in the lifetime of a 413 * connection we always have to adjust SWL. 414 * A second reason why we are doing this is that the window depends on 415 * the feature-remote value of Sequence Window: nothing stops the peer 416 * from updating this value while we are busy adjusting SWL for the 417 * first W packets (we would have to count from scratch again then). 418 * Therefore it is safer to always make sure that the Sequence Window 419 * is not artificially extended by a peer who grows SWL downwards by 420 * continually updating the feature-remote Sequence-Window. 421 * If sequence numbers wrap it is bad luck. But that will take a while 422 * (48 bit), and this measure prevents Sequence-number attacks. 423 */ 424 if (before48(dp->dccps_swl, dp->dccps_isr)) 425 dp->dccps_swl = dp->dccps_isr; 426 dp->dccps_swh = ADD48(dp->dccps_gsr, (3 * dp->dccps_r_seq_win) / 4); 427 } 428 429 static inline void dccp_update_gss(struct sock *sk, u64 seq) 430 { 431 struct dccp_sock *dp = dccp_sk(sk); 432 433 dp->dccps_gss = seq; 434 /* Ack validity window depends on local Sequence Window value (7.5.1) */ 435 dp->dccps_awl = SUB48(ADD48(dp->dccps_gss, 1), dp->dccps_l_seq_win); 436 /* Adjust AWL so that it is not below ISS - see comment above for SWL */ 437 if (before48(dp->dccps_awl, dp->dccps_iss)) 438 dp->dccps_awl = dp->dccps_iss; 439 dp->dccps_awh = dp->dccps_gss; 440 } 441 442 static inline int dccp_ackvec_pending(const struct sock *sk) 443 { 444 return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL && 445 !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec); 446 } 447 448 static inline int dccp_ack_pending(const struct sock *sk) 449 { 450 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk); 451 } 452 453 int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val); 454 int dccp_feat_finalise_settings(struct dccp_sock *dp); 455 int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq); 456 int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*, 457 struct sk_buff *skb); 458 int dccp_feat_activate_values(struct sock *sk, struct list_head *fn); 459 void dccp_feat_list_purge(struct list_head *fn_list); 460 461 int dccp_insert_options(struct sock *sk, struct sk_buff *skb); 462 int dccp_insert_options_rsk(struct dccp_request_sock *, struct sk_buff *); 463 u32 dccp_timestamp(void); 464 void dccp_timestamping_init(void); 465 int dccp_insert_option(struct sk_buff *skb, unsigned char option, 466 const void *value, unsigned char len); 467 468 #ifdef CONFIG_SYSCTL 469 int dccp_sysctl_init(void); 470 void dccp_sysctl_exit(void); 471 #else 472 static inline int dccp_sysctl_init(void) 473 { 474 return 0; 475 } 476 477 static inline void dccp_sysctl_exit(void) 478 { 479 } 480 #endif 481 482 #endif /* _DCCP_H */ 483