1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the UDP module. 7 * 8 * Version: @(#)udp.h 1.0.2 05/07/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * 13 * Fixes: 14 * Alan Cox : Turned on udp checksums. I don't want to 15 * chase 'memory corruption' bugs that aren't! 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 20 * 2 of the License, or (at your option) any later version. 21 */ 22 #ifndef _UDP_H 23 #define _UDP_H 24 25 #include <linux/list.h> 26 #include <linux/bug.h> 27 #include <net/inet_sock.h> 28 #include <net/sock.h> 29 #include <net/snmp.h> 30 #include <net/ip.h> 31 #include <linux/ipv6.h> 32 #include <linux/seq_file.h> 33 #include <linux/poll.h> 34 35 /** 36 * struct udp_skb_cb - UDP(-Lite) private variables 37 * 38 * @header: private variables used by IPv4/IPv6 39 * @cscov: checksum coverage length (UDP-Lite only) 40 * @partial_cov: if set indicates partial csum coverage 41 */ 42 struct udp_skb_cb { 43 union { 44 struct inet_skb_parm h4; 45 #if IS_ENABLED(CONFIG_IPV6) 46 struct inet6_skb_parm h6; 47 #endif 48 } header; 49 __u16 cscov; 50 __u8 partial_cov; 51 }; 52 #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) 53 54 /** 55 * struct udp_hslot - UDP hash slot 56 * 57 * @head: head of list of sockets 58 * @count: number of sockets in 'head' list 59 * @lock: spinlock protecting changes to head/count 60 */ 61 struct udp_hslot { 62 struct hlist_head head; 63 int count; 64 spinlock_t lock; 65 } __attribute__((aligned(2 * sizeof(long)))); 66 67 /** 68 * struct udp_table - UDP table 69 * 70 * @hash: hash table, sockets are hashed on (local port) 71 * @hash2: hash table, sockets are hashed on (local port, local address) 72 * @mask: number of slots in hash tables, minus 1 73 * @log: log2(number of slots in hash table) 74 */ 75 struct udp_table { 76 struct udp_hslot *hash; 77 struct udp_hslot *hash2; 78 unsigned int mask; 79 unsigned int log; 80 }; 81 extern struct udp_table udp_table; 82 void udp_table_init(struct udp_table *, const char *); 83 static inline struct udp_hslot *udp_hashslot(struct udp_table *table, 84 struct net *net, unsigned int num) 85 { 86 return &table->hash[udp_hashfn(net, num, table->mask)]; 87 } 88 /* 89 * For secondary hash, net_hash_mix() is performed before calling 90 * udp_hashslot2(), this explains difference with udp_hashslot() 91 */ 92 static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, 93 unsigned int hash) 94 { 95 return &table->hash2[hash & table->mask]; 96 } 97 98 extern struct proto udp_prot; 99 100 extern atomic_long_t udp_memory_allocated; 101 102 /* sysctl variables for udp */ 103 extern long sysctl_udp_mem[3]; 104 extern int sysctl_udp_rmem_min; 105 extern int sysctl_udp_wmem_min; 106 107 struct sk_buff; 108 109 /* 110 * Generic checksumming routines for UDP(-Lite) v4 and v6 111 */ 112 static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) 113 { 114 return (UDP_SKB_CB(skb)->cscov == skb->len ? 115 __skb_checksum_complete(skb) : 116 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov)); 117 } 118 119 static inline int udp_lib_checksum_complete(struct sk_buff *skb) 120 { 121 return !skb_csum_unnecessary(skb) && 122 __udp_lib_checksum_complete(skb); 123 } 124 125 /** 126 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments 127 * @sk: socket we are writing to 128 * @skb: sk_buff containing the filled-in UDP header 129 * (checksum field must be zeroed out) 130 */ 131 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) 132 { 133 __wsum csum = csum_partial(skb_transport_header(skb), 134 sizeof(struct udphdr), 0); 135 skb_queue_walk(&sk->sk_write_queue, skb) { 136 csum = csum_add(csum, skb->csum); 137 } 138 return csum; 139 } 140 141 static inline __wsum udp_csum(struct sk_buff *skb) 142 { 143 __wsum csum = csum_partial(skb_transport_header(skb), 144 sizeof(struct udphdr), skb->csum); 145 146 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { 147 csum = csum_add(csum, skb->csum); 148 } 149 return csum; 150 } 151 152 static inline __sum16 udp_v4_check(int len, __be32 saddr, 153 __be32 daddr, __wsum base) 154 { 155 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base); 156 } 157 158 void udp_set_csum(bool nocheck, struct sk_buff *skb, 159 __be32 saddr, __be32 daddr, int len); 160 161 static inline void udp_csum_pull_header(struct sk_buff *skb) 162 { 163 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE) 164 skb->csum = csum_partial(skb->data, sizeof(struct udphdr), 165 skb->csum); 166 skb_pull_rcsum(skb, sizeof(struct udphdr)); 167 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr); 168 } 169 170 typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, 171 __be16 dport); 172 173 struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 174 struct udphdr *uh, udp_lookup_t lookup); 175 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); 176 177 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, 178 netdev_features_t features); 179 180 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) 181 { 182 struct udphdr *uh; 183 unsigned int hlen, off; 184 185 off = skb_gro_offset(skb); 186 hlen = off + sizeof(*uh); 187 uh = skb_gro_header_fast(skb, off); 188 if (skb_gro_header_hard(skb, hlen)) 189 uh = skb_gro_header_slow(skb, hlen, off); 190 191 return uh; 192 } 193 194 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ 195 static inline int udp_lib_hash(struct sock *sk) 196 { 197 BUG(); 198 return 0; 199 } 200 201 void udp_lib_unhash(struct sock *sk); 202 void udp_lib_rehash(struct sock *sk, u16 new_hash); 203 204 static inline void udp_lib_close(struct sock *sk, long timeout) 205 { 206 sk_common_release(sk); 207 } 208 209 int udp_lib_get_port(struct sock *sk, unsigned short snum, 210 unsigned int hash2_nulladdr); 211 212 u32 udp_flow_hashrnd(void); 213 214 static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, 215 int min, int max, bool use_eth) 216 { 217 u32 hash; 218 219 if (min >= max) { 220 /* Use default range */ 221 inet_get_local_port_range(net, &min, &max); 222 } 223 224 hash = skb_get_hash(skb); 225 if (unlikely(!hash)) { 226 if (use_eth) { 227 /* Can't find a normal hash, caller has indicated an 228 * Ethernet packet so use that to compute a hash. 229 */ 230 hash = jhash(skb->data, 2 * ETH_ALEN, 231 (__force u32) skb->protocol); 232 } else { 233 /* Can't derive any sort of hash for the packet, set 234 * to some consistent random value. 235 */ 236 hash = udp_flow_hashrnd(); 237 } 238 } 239 240 /* Since this is being sent on the wire obfuscate hash a bit 241 * to minimize possbility that any useful information to an 242 * attacker is leaked. Only upper 16 bits are relevant in the 243 * computation for 16 bit port value. 244 */ 245 hash ^= hash << 16; 246 247 return htons((((u64) hash * (max - min)) >> 32) + min); 248 } 249 250 /* net/ipv4/udp.c */ 251 void udp_destruct_sock(struct sock *sk); 252 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); 253 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb); 254 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb); 255 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, 256 int noblock, int *peeked, int *off, int *err); 257 static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, 258 int noblock, int *err) 259 { 260 int peeked, off = 0; 261 262 return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err); 263 } 264 265 int udp_v4_early_demux(struct sk_buff *skb); 266 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); 267 int udp_get_port(struct sock *sk, unsigned short snum, 268 int (*saddr_cmp)(const struct sock *, 269 const struct sock *)); 270 void udp_err(struct sk_buff *, u32); 271 int udp_abort(struct sock *sk, int err); 272 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 273 int udp_push_pending_frames(struct sock *sk); 274 void udp_flush_pending_frames(struct sock *sk); 275 int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size); 276 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); 277 int udp_rcv(struct sk_buff *skb); 278 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); 279 int udp_init_sock(struct sock *sk); 280 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 281 int __udp_disconnect(struct sock *sk, int flags); 282 int udp_disconnect(struct sock *sk, int flags); 283 __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait); 284 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 285 netdev_features_t features, 286 bool is_ipv6); 287 int udp_lib_getsockopt(struct sock *sk, int level, int optname, 288 char __user *optval, int __user *optlen); 289 int udp_lib_setsockopt(struct sock *sk, int level, int optname, 290 char __user *optval, unsigned int optlen, 291 int (*push_pending_frames)(struct sock *)); 292 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 293 __be32 daddr, __be16 dport, int dif); 294 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 295 __be32 daddr, __be16 dport, int dif, int sdif, 296 struct udp_table *tbl, struct sk_buff *skb); 297 struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, 298 __be16 sport, __be16 dport); 299 struct sock *udp6_lib_lookup(struct net *net, 300 const struct in6_addr *saddr, __be16 sport, 301 const struct in6_addr *daddr, __be16 dport, 302 int dif); 303 struct sock *__udp6_lib_lookup(struct net *net, 304 const struct in6_addr *saddr, __be16 sport, 305 const struct in6_addr *daddr, __be16 dport, 306 int dif, int sdif, struct udp_table *tbl, 307 struct sk_buff *skb); 308 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, 309 __be16 sport, __be16 dport); 310 311 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid 312 * possibly multiple cache miss on dequeue() 313 */ 314 struct udp_dev_scratch { 315 /* skb->truesize and the stateless bit are embedded in a single field; 316 * do not use a bitfield since the compiler emits better/smaller code 317 * this way 318 */ 319 u32 _tsize_state; 320 321 #if BITS_PER_LONG == 64 322 /* len and the bit needed to compute skb_csum_unnecessary 323 * will be on cold cache lines at recvmsg time. 324 * skb->len can be stored on 16 bits since the udp header has been 325 * already validated and pulled. 326 */ 327 u16 len; 328 bool is_linear; 329 bool csum_unnecessary; 330 #endif 331 }; 332 333 static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb) 334 { 335 return (struct udp_dev_scratch *)&skb->dev_scratch; 336 } 337 338 #if BITS_PER_LONG == 64 339 static inline unsigned int udp_skb_len(struct sk_buff *skb) 340 { 341 return udp_skb_scratch(skb)->len; 342 } 343 344 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb) 345 { 346 return udp_skb_scratch(skb)->csum_unnecessary; 347 } 348 349 static inline bool udp_skb_is_linear(struct sk_buff *skb) 350 { 351 return udp_skb_scratch(skb)->is_linear; 352 } 353 354 #else 355 static inline unsigned int udp_skb_len(struct sk_buff *skb) 356 { 357 return skb->len; 358 } 359 360 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb) 361 { 362 return skb_csum_unnecessary(skb); 363 } 364 365 static inline bool udp_skb_is_linear(struct sk_buff *skb) 366 { 367 return !skb_is_nonlinear(skb); 368 } 369 #endif 370 371 static inline int copy_linear_skb(struct sk_buff *skb, int len, int off, 372 struct iov_iter *to) 373 { 374 int n; 375 376 n = copy_to_iter(skb->data + off, len, to); 377 if (n == len) 378 return 0; 379 380 iov_iter_revert(to, n); 381 return -EFAULT; 382 } 383 384 /* 385 * SNMP statistics for UDP and UDP-Lite 386 */ 387 #define UDP_INC_STATS(net, field, is_udplite) do { \ 388 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 389 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) 390 #define __UDP_INC_STATS(net, field, is_udplite) do { \ 391 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 392 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) 393 394 #define __UDP6_INC_STATS(net, field, is_udplite) do { \ 395 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\ 396 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ 397 } while(0) 398 #define UDP6_INC_STATS(net, field, __lite) do { \ 399 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ 400 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ 401 } while(0) 402 403 #if IS_ENABLED(CONFIG_IPV6) 404 #define __UDPX_INC_STATS(sk, field) \ 405 do { \ 406 if ((sk)->sk_family == AF_INET) \ 407 __UDP_INC_STATS(sock_net(sk), field, 0); \ 408 else \ 409 __UDP6_INC_STATS(sock_net(sk), field, 0); \ 410 } while (0) 411 #else 412 #define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0) 413 #endif 414 415 /* /proc */ 416 int udp_seq_open(struct inode *inode, struct file *file); 417 418 struct udp_seq_afinfo { 419 char *name; 420 sa_family_t family; 421 struct udp_table *udp_table; 422 const struct file_operations *seq_fops; 423 struct seq_operations seq_ops; 424 }; 425 426 struct udp_iter_state { 427 struct seq_net_private p; 428 sa_family_t family; 429 int bucket; 430 struct udp_table *udp_table; 431 }; 432 433 #ifdef CONFIG_PROC_FS 434 int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo); 435 void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo); 436 437 int udp4_proc_init(void); 438 void udp4_proc_exit(void); 439 #endif 440 441 int udpv4_offload_init(void); 442 443 void udp_init(void); 444 445 void udp_encap_enable(void); 446 #if IS_ENABLED(CONFIG_IPV6) 447 void udpv6_encap_enable(void); 448 #endif 449 450 #endif /* _UDP_H */ 451