1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the AF_INET socket handler. 7 * 8 * Version: @(#)sock.h 1.0.4 05/13/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche <flla@stud.uni-sb.de> 14 * 15 * Fixes: 16 * Alan Cox : Volatiles in skbuff pointers. See 17 * skbuff comments. May be overdone, 18 * better to prove they can be removed 19 * than the reverse. 20 * Alan Cox : Added a zapped field for tcp to note 21 * a socket is reset and must stay shut up 22 * Alan Cox : New fields for options 23 * Pauline Middelink : identd support 24 * Alan Cox : Eliminate low level recv/recvfrom 25 * David S. Miller : New socket lookup architecture. 26 * Steve Whitehouse: Default routines for sock_ops 27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made 28 * protinfo be just a void pointer, as the 29 * protocol specific parts were moved to 30 * respective headers and ipv4/v6, etc now 31 * use private slabcaches for its socks 32 * Pedro Hortas : New flags field for socket options 33 * 34 * 35 * This program is free software; you can redistribute it and/or 36 * modify it under the terms of the GNU General Public License 37 * as published by the Free Software Foundation; either version 38 * 2 of the License, or (at your option) any later version. 39 */ 40 #ifndef _SOCK_H 41 #define _SOCK_H 42 43 #include <linux/hardirq.h> 44 #include <linux/kernel.h> 45 #include <linux/list.h> 46 #include <linux/list_nulls.h> 47 #include <linux/timer.h> 48 #include <linux/cache.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/netdevice.h> 52 #include <linux/skbuff.h> /* struct sk_buff */ 53 #include <linux/mm.h> 54 #include <linux/security.h> 55 #include <linux/slab.h> 56 #include <linux/uaccess.h> 57 #include <linux/page_counter.h> 58 #include <linux/memcontrol.h> 59 #include <linux/static_key.h> 60 #include <linux/sched.h> 61 #include <linux/wait.h> 62 #include <linux/cgroup-defs.h> 63 64 #include <linux/filter.h> 65 #include <linux/rculist_nulls.h> 66 #include <linux/poll.h> 67 68 #include <linux/atomic.h> 69 #include <net/dst.h> 70 #include <net/checksum.h> 71 #include <net/tcp_states.h> 72 #include <linux/net_tstamp.h> 73 74 /* 75 * This structure really needs to be cleaned up. 76 * Most of it is for TCP, and not used by any of 77 * the other protocols. 78 */ 79 80 /* Define this to get the SOCK_DBG debugging facility. */ 81 #define SOCK_DEBUGGING 82 #ifdef SOCK_DEBUGGING 83 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 84 printk(KERN_DEBUG msg); } while (0) 85 #else 86 /* Validate arguments and do nothing */ 87 static inline __printf(2, 3) 88 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) 89 { 90 } 91 #endif 92 93 /* This is the per-socket lock. The spinlock provides a synchronization 94 * between user contexts and software interrupt processing, whereas the 95 * mini-semaphore synchronizes multiple users amongst themselves. 96 */ 97 typedef struct { 98 spinlock_t slock; 99 int owned; 100 wait_queue_head_t wq; 101 /* 102 * We express the mutex-alike socket_lock semantics 103 * to the lock validator by explicitly managing 104 * the slock as a lock variant (in addition to 105 * the slock itself): 106 */ 107 #ifdef CONFIG_DEBUG_LOCK_ALLOC 108 struct lockdep_map dep_map; 109 #endif 110 } socket_lock_t; 111 112 struct sock; 113 struct proto; 114 struct net; 115 116 typedef __u32 __bitwise __portpair; 117 typedef __u64 __bitwise __addrpair; 118 119 /** 120 * struct sock_common - minimal network layer representation of sockets 121 * @skc_daddr: Foreign IPv4 addr 122 * @skc_rcv_saddr: Bound local IPv4 addr 123 * @skc_hash: hash value used with various protocol lookup tables 124 * @skc_u16hashes: two u16 hash values used by UDP lookup tables 125 * @skc_dport: placeholder for inet_dport/tw_dport 126 * @skc_num: placeholder for inet_num/tw_num 127 * @skc_family: network address family 128 * @skc_state: Connection state 129 * @skc_reuse: %SO_REUSEADDR setting 130 * @skc_reuseport: %SO_REUSEPORT setting 131 * @skc_bound_dev_if: bound device index if != 0 132 * @skc_bind_node: bind hash linkage for various protocol lookup tables 133 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol 134 * @skc_prot: protocol handlers inside a network family 135 * @skc_net: reference to the network namespace of this socket 136 * @skc_node: main hash linkage for various protocol lookup tables 137 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol 138 * @skc_tx_queue_mapping: tx queue number for this connection 139 * @skc_flags: place holder for sk_flags 140 * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, 141 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings 142 * @skc_incoming_cpu: record/match cpu processing incoming packets 143 * @skc_refcnt: reference count 144 * 145 * This is the minimal network layer representation of sockets, the header 146 * for struct sock and struct inet_timewait_sock. 147 */ 148 struct sock_common { 149 /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned 150 * address on 64bit arches : cf INET_MATCH() 151 */ 152 union { 153 __addrpair skc_addrpair; 154 struct { 155 __be32 skc_daddr; 156 __be32 skc_rcv_saddr; 157 }; 158 }; 159 union { 160 unsigned int skc_hash; 161 __u16 skc_u16hashes[2]; 162 }; 163 /* skc_dport && skc_num must be grouped as well */ 164 union { 165 __portpair skc_portpair; 166 struct { 167 __be16 skc_dport; 168 __u16 skc_num; 169 }; 170 }; 171 172 unsigned short skc_family; 173 volatile unsigned char skc_state; 174 unsigned char skc_reuse:4; 175 unsigned char skc_reuseport:1; 176 unsigned char skc_ipv6only:1; 177 unsigned char skc_net_refcnt:1; 178 int skc_bound_dev_if; 179 union { 180 struct hlist_node skc_bind_node; 181 struct hlist_node skc_portaddr_node; 182 }; 183 struct proto *skc_prot; 184 possible_net_t skc_net; 185 186 #if IS_ENABLED(CONFIG_IPV6) 187 struct in6_addr skc_v6_daddr; 188 struct in6_addr skc_v6_rcv_saddr; 189 #endif 190 191 atomic64_t skc_cookie; 192 193 /* following fields are padding to force 194 * offset(struct sock, sk_refcnt) == 128 on 64bit arches 195 * assuming IPV6 is enabled. We use this padding differently 196 * for different kind of 'sockets' 197 */ 198 union { 199 unsigned long skc_flags; 200 struct sock *skc_listener; /* request_sock */ 201 struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */ 202 }; 203 /* 204 * fields between dontcopy_begin/dontcopy_end 205 * are not copied in sock_copy() 206 */ 207 /* private: */ 208 int skc_dontcopy_begin[0]; 209 /* public: */ 210 union { 211 struct hlist_node skc_node; 212 struct hlist_nulls_node skc_nulls_node; 213 }; 214 int skc_tx_queue_mapping; 215 union { 216 int skc_incoming_cpu; 217 u32 skc_rcv_wnd; 218 u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */ 219 }; 220 221 atomic_t skc_refcnt; 222 /* private: */ 223 int skc_dontcopy_end[0]; 224 union { 225 u32 skc_rxhash; 226 u32 skc_window_clamp; 227 u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */ 228 }; 229 /* public: */ 230 }; 231 232 /** 233 * struct sock - network layer representation of sockets 234 * @__sk_common: shared layout with inet_timewait_sock 235 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 236 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 237 * @sk_lock: synchronizer 238 * @sk_rcvbuf: size of receive buffer in bytes 239 * @sk_wq: sock wait queue and async head 240 * @sk_rx_dst: receive input route used by early demux 241 * @sk_dst_cache: destination cache 242 * @sk_policy: flow policy 243 * @sk_receive_queue: incoming packets 244 * @sk_wmem_alloc: transmit queue bytes committed 245 * @sk_write_queue: Packet sending queue 246 * @sk_omem_alloc: "o" is "option" or "other" 247 * @sk_wmem_queued: persistent queue size 248 * @sk_forward_alloc: space allocated forward 249 * @sk_napi_id: id of the last napi context to receive data for sk 250 * @sk_ll_usec: usecs to busypoll when there is no data 251 * @sk_allocation: allocation mode 252 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) 253 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) 254 * @sk_sndbuf: size of send buffer in bytes 255 * @sk_padding: unused element for alignment 256 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets 257 * @sk_no_check_rx: allow zero checksum in RX packets 258 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 259 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) 260 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) 261 * @sk_gso_max_size: Maximum GSO segment size to build 262 * @sk_gso_max_segs: Maximum number of GSO segments 263 * @sk_lingertime: %SO_LINGER l_linger setting 264 * @sk_backlog: always used with the per-socket spinlock held 265 * @sk_callback_lock: used with the callbacks in the end of this struct 266 * @sk_error_queue: rarely used 267 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, 268 * IPV6_ADDRFORM for instance) 269 * @sk_err: last error 270 * @sk_err_soft: errors that don't cause failure but are the cause of a 271 * persistent failure not just 'timed out' 272 * @sk_drops: raw/udp drops counter 273 * @sk_ack_backlog: current listen backlog 274 * @sk_max_ack_backlog: listen backlog set in listen() 275 * @sk_priority: %SO_PRIORITY setting 276 * @sk_type: socket type (%SOCK_STREAM, etc) 277 * @sk_protocol: which protocol this socket belongs in this network family 278 * @sk_peer_pid: &struct pid for this socket's peer 279 * @sk_peer_cred: %SO_PEERCRED setting 280 * @sk_rcvlowat: %SO_RCVLOWAT setting 281 * @sk_rcvtimeo: %SO_RCVTIMEO setting 282 * @sk_sndtimeo: %SO_SNDTIMEO setting 283 * @sk_txhash: computed flow hash for use on transmit 284 * @sk_filter: socket filtering instructions 285 * @sk_timer: sock cleanup timer 286 * @sk_stamp: time stamp of last packet received 287 * @sk_tsflags: SO_TIMESTAMPING socket options 288 * @sk_tskey: counter to disambiguate concurrent tstamp requests 289 * @sk_socket: Identd and reporting IO signals 290 * @sk_user_data: RPC layer private data 291 * @sk_frag: cached page frag 292 * @sk_peek_off: current peek_offset value 293 * @sk_send_head: front of stuff to transmit 294 * @sk_security: used by security modules 295 * @sk_mark: generic packet mark 296 * @sk_cgrp_data: cgroup data for this cgroup 297 * @sk_memcg: this socket's memory cgroup association 298 * @sk_write_pending: a write to stream socket waits to start 299 * @sk_state_change: callback to indicate change in the state of the sock 300 * @sk_data_ready: callback to indicate there is data to be processed 301 * @sk_write_space: callback to indicate there is bf sending space available 302 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) 303 * @sk_backlog_rcv: callback to process the backlog 304 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 305 * @sk_reuseport_cb: reuseport group container 306 * @sk_rcu: used during RCU grace period 307 */ 308 struct sock { 309 /* 310 * Now struct inet_timewait_sock also uses sock_common, so please just 311 * don't add nothing before this first member (__sk_common) --acme 312 */ 313 struct sock_common __sk_common; 314 #define sk_node __sk_common.skc_node 315 #define sk_nulls_node __sk_common.skc_nulls_node 316 #define sk_refcnt __sk_common.skc_refcnt 317 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping 318 319 #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin 320 #define sk_dontcopy_end __sk_common.skc_dontcopy_end 321 #define sk_hash __sk_common.skc_hash 322 #define sk_portpair __sk_common.skc_portpair 323 #define sk_num __sk_common.skc_num 324 #define sk_dport __sk_common.skc_dport 325 #define sk_addrpair __sk_common.skc_addrpair 326 #define sk_daddr __sk_common.skc_daddr 327 #define sk_rcv_saddr __sk_common.skc_rcv_saddr 328 #define sk_family __sk_common.skc_family 329 #define sk_state __sk_common.skc_state 330 #define sk_reuse __sk_common.skc_reuse 331 #define sk_reuseport __sk_common.skc_reuseport 332 #define sk_ipv6only __sk_common.skc_ipv6only 333 #define sk_net_refcnt __sk_common.skc_net_refcnt 334 #define sk_bound_dev_if __sk_common.skc_bound_dev_if 335 #define sk_bind_node __sk_common.skc_bind_node 336 #define sk_prot __sk_common.skc_prot 337 #define sk_net __sk_common.skc_net 338 #define sk_v6_daddr __sk_common.skc_v6_daddr 339 #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr 340 #define sk_cookie __sk_common.skc_cookie 341 #define sk_incoming_cpu __sk_common.skc_incoming_cpu 342 #define sk_flags __sk_common.skc_flags 343 #define sk_rxhash __sk_common.skc_rxhash 344 345 socket_lock_t sk_lock; 346 atomic_t sk_drops; 347 int sk_rcvlowat; 348 struct sk_buff_head sk_error_queue; 349 struct sk_buff_head sk_receive_queue; 350 /* 351 * The backlog queue is special, it is always used with 352 * the per-socket spinlock held and requires low latency 353 * access. Therefore we special case it's implementation. 354 * Note : rmem_alloc is in this structure to fill a hole 355 * on 64bit arches, not because its logically part of 356 * backlog. 357 */ 358 struct { 359 atomic_t rmem_alloc; 360 int len; 361 struct sk_buff *head; 362 struct sk_buff *tail; 363 } sk_backlog; 364 #define sk_rmem_alloc sk_backlog.rmem_alloc 365 366 int sk_forward_alloc; 367 #ifdef CONFIG_NET_RX_BUSY_POLL 368 unsigned int sk_ll_usec; 369 /* ===== mostly read cache line ===== */ 370 unsigned int sk_napi_id; 371 #endif 372 int sk_rcvbuf; 373 374 struct sk_filter __rcu *sk_filter; 375 union { 376 struct socket_wq __rcu *sk_wq; 377 struct socket_wq *sk_wq_raw; 378 }; 379 #ifdef CONFIG_XFRM 380 struct xfrm_policy __rcu *sk_policy[2]; 381 #endif 382 struct dst_entry *sk_rx_dst; 383 struct dst_entry __rcu *sk_dst_cache; 384 atomic_t sk_omem_alloc; 385 int sk_sndbuf; 386 387 /* ===== cache line for TX ===== */ 388 int sk_wmem_queued; 389 atomic_t sk_wmem_alloc; 390 unsigned long sk_tsq_flags; 391 struct sk_buff *sk_send_head; 392 struct sk_buff_head sk_write_queue; 393 __s32 sk_peek_off; 394 int sk_write_pending; 395 long sk_sndtimeo; 396 struct timer_list sk_timer; 397 __u32 sk_priority; 398 __u32 sk_mark; 399 u32 sk_pacing_rate; /* bytes per second */ 400 u32 sk_max_pacing_rate; 401 struct page_frag sk_frag; 402 netdev_features_t sk_route_caps; 403 netdev_features_t sk_route_nocaps; 404 int sk_gso_type; 405 unsigned int sk_gso_max_size; 406 gfp_t sk_allocation; 407 __u32 sk_txhash; 408 409 /* 410 * Because of non atomicity rules, all 411 * changes are protected by socket lock. 412 */ 413 unsigned int __sk_flags_offset[0]; 414 #ifdef __BIG_ENDIAN_BITFIELD 415 #define SK_FL_PROTO_SHIFT 16 416 #define SK_FL_PROTO_MASK 0x00ff0000 417 418 #define SK_FL_TYPE_SHIFT 0 419 #define SK_FL_TYPE_MASK 0x0000ffff 420 #else 421 #define SK_FL_PROTO_SHIFT 8 422 #define SK_FL_PROTO_MASK 0x0000ff00 423 424 #define SK_FL_TYPE_SHIFT 16 425 #define SK_FL_TYPE_MASK 0xffff0000 426 #endif 427 428 kmemcheck_bitfield_begin(flags); 429 unsigned int sk_padding : 2, 430 sk_no_check_tx : 1, 431 sk_no_check_rx : 1, 432 sk_userlocks : 4, 433 sk_protocol : 8, 434 sk_type : 16; 435 #define SK_PROTOCOL_MAX U8_MAX 436 kmemcheck_bitfield_end(flags); 437 438 u16 sk_gso_max_segs; 439 unsigned long sk_lingertime; 440 struct proto *sk_prot_creator; 441 rwlock_t sk_callback_lock; 442 int sk_err, 443 sk_err_soft; 444 u32 sk_ack_backlog; 445 u32 sk_max_ack_backlog; 446 kuid_t sk_uid; 447 struct pid *sk_peer_pid; 448 const struct cred *sk_peer_cred; 449 long sk_rcvtimeo; 450 ktime_t sk_stamp; 451 u16 sk_tsflags; 452 u8 sk_shutdown; 453 u32 sk_tskey; 454 struct socket *sk_socket; 455 void *sk_user_data; 456 #ifdef CONFIG_SECURITY 457 void *sk_security; 458 #endif 459 struct sock_cgroup_data sk_cgrp_data; 460 struct mem_cgroup *sk_memcg; 461 void (*sk_state_change)(struct sock *sk); 462 void (*sk_data_ready)(struct sock *sk); 463 void (*sk_write_space)(struct sock *sk); 464 void (*sk_error_report)(struct sock *sk); 465 int (*sk_backlog_rcv)(struct sock *sk, 466 struct sk_buff *skb); 467 void (*sk_destruct)(struct sock *sk); 468 struct sock_reuseport __rcu *sk_reuseport_cb; 469 struct rcu_head sk_rcu; 470 }; 471 472 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) 473 474 #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) 475 #define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) 476 477 /* 478 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK 479 * or not whether his port will be reused by someone else. SK_FORCE_REUSE 480 * on a socket means that the socket will reuse everybody else's port 481 * without looking at the other's sk_reuse value. 482 */ 483 484 #define SK_NO_REUSE 0 485 #define SK_CAN_REUSE 1 486 #define SK_FORCE_REUSE 2 487 488 int sk_set_peek_off(struct sock *sk, int val); 489 490 static inline int sk_peek_offset(struct sock *sk, int flags) 491 { 492 if (unlikely(flags & MSG_PEEK)) { 493 s32 off = READ_ONCE(sk->sk_peek_off); 494 if (off >= 0) 495 return off; 496 } 497 498 return 0; 499 } 500 501 static inline void sk_peek_offset_bwd(struct sock *sk, int val) 502 { 503 s32 off = READ_ONCE(sk->sk_peek_off); 504 505 if (unlikely(off >= 0)) { 506 off = max_t(s32, off - val, 0); 507 WRITE_ONCE(sk->sk_peek_off, off); 508 } 509 } 510 511 static inline void sk_peek_offset_fwd(struct sock *sk, int val) 512 { 513 sk_peek_offset_bwd(sk, -val); 514 } 515 516 /* 517 * Hashed lists helper routines 518 */ 519 static inline struct sock *sk_entry(const struct hlist_node *node) 520 { 521 return hlist_entry(node, struct sock, sk_node); 522 } 523 524 static inline struct sock *__sk_head(const struct hlist_head *head) 525 { 526 return hlist_entry(head->first, struct sock, sk_node); 527 } 528 529 static inline struct sock *sk_head(const struct hlist_head *head) 530 { 531 return hlist_empty(head) ? NULL : __sk_head(head); 532 } 533 534 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) 535 { 536 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); 537 } 538 539 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) 540 { 541 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); 542 } 543 544 static inline struct sock *sk_next(const struct sock *sk) 545 { 546 return sk->sk_node.next ? 547 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 548 } 549 550 static inline struct sock *sk_nulls_next(const struct sock *sk) 551 { 552 return (!is_a_nulls(sk->sk_nulls_node.next)) ? 553 hlist_nulls_entry(sk->sk_nulls_node.next, 554 struct sock, sk_nulls_node) : 555 NULL; 556 } 557 558 static inline bool sk_unhashed(const struct sock *sk) 559 { 560 return hlist_unhashed(&sk->sk_node); 561 } 562 563 static inline bool sk_hashed(const struct sock *sk) 564 { 565 return !sk_unhashed(sk); 566 } 567 568 static inline void sk_node_init(struct hlist_node *node) 569 { 570 node->pprev = NULL; 571 } 572 573 static inline void sk_nulls_node_init(struct hlist_nulls_node *node) 574 { 575 node->pprev = NULL; 576 } 577 578 static inline void __sk_del_node(struct sock *sk) 579 { 580 __hlist_del(&sk->sk_node); 581 } 582 583 /* NB: equivalent to hlist_del_init_rcu */ 584 static inline bool __sk_del_node_init(struct sock *sk) 585 { 586 if (sk_hashed(sk)) { 587 __sk_del_node(sk); 588 sk_node_init(&sk->sk_node); 589 return true; 590 } 591 return false; 592 } 593 594 /* Grab socket reference count. This operation is valid only 595 when sk is ALREADY grabbed f.e. it is found in hash table 596 or a list and the lookup is made under lock preventing hash table 597 modifications. 598 */ 599 600 static __always_inline void sock_hold(struct sock *sk) 601 { 602 atomic_inc(&sk->sk_refcnt); 603 } 604 605 /* Ungrab socket in the context, which assumes that socket refcnt 606 cannot hit zero, f.e. it is true in context of any socketcall. 607 */ 608 static __always_inline void __sock_put(struct sock *sk) 609 { 610 atomic_dec(&sk->sk_refcnt); 611 } 612 613 static inline bool sk_del_node_init(struct sock *sk) 614 { 615 bool rc = __sk_del_node_init(sk); 616 617 if (rc) { 618 /* paranoid for a while -acme */ 619 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 620 __sock_put(sk); 621 } 622 return rc; 623 } 624 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) 625 626 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) 627 { 628 if (sk_hashed(sk)) { 629 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); 630 return true; 631 } 632 return false; 633 } 634 635 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) 636 { 637 bool rc = __sk_nulls_del_node_init_rcu(sk); 638 639 if (rc) { 640 /* paranoid for a while -acme */ 641 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 642 __sock_put(sk); 643 } 644 return rc; 645 } 646 647 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) 648 { 649 hlist_add_head(&sk->sk_node, list); 650 } 651 652 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) 653 { 654 sock_hold(sk); 655 __sk_add_node(sk, list); 656 } 657 658 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) 659 { 660 sock_hold(sk); 661 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 662 sk->sk_family == AF_INET6) 663 hlist_add_tail_rcu(&sk->sk_node, list); 664 else 665 hlist_add_head_rcu(&sk->sk_node, list); 666 } 667 668 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 669 { 670 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 671 sk->sk_family == AF_INET6) 672 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); 673 else 674 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 675 } 676 677 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 678 { 679 sock_hold(sk); 680 __sk_nulls_add_node_rcu(sk, list); 681 } 682 683 static inline void __sk_del_bind_node(struct sock *sk) 684 { 685 __hlist_del(&sk->sk_bind_node); 686 } 687 688 static inline void sk_add_bind_node(struct sock *sk, 689 struct hlist_head *list) 690 { 691 hlist_add_head(&sk->sk_bind_node, list); 692 } 693 694 #define sk_for_each(__sk, list) \ 695 hlist_for_each_entry(__sk, list, sk_node) 696 #define sk_for_each_rcu(__sk, list) \ 697 hlist_for_each_entry_rcu(__sk, list, sk_node) 698 #define sk_nulls_for_each(__sk, node, list) \ 699 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 700 #define sk_nulls_for_each_rcu(__sk, node, list) \ 701 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) 702 #define sk_for_each_from(__sk) \ 703 hlist_for_each_entry_from(__sk, sk_node) 704 #define sk_nulls_for_each_from(__sk, node) \ 705 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 706 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 707 #define sk_for_each_safe(__sk, tmp, list) \ 708 hlist_for_each_entry_safe(__sk, tmp, list, sk_node) 709 #define sk_for_each_bound(__sk, list) \ 710 hlist_for_each_entry(__sk, list, sk_bind_node) 711 712 /** 713 * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset 714 * @tpos: the type * to use as a loop cursor. 715 * @pos: the &struct hlist_node to use as a loop cursor. 716 * @head: the head for your list. 717 * @offset: offset of hlist_node within the struct. 718 * 719 */ 720 #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \ 721 for (pos = rcu_dereference((head)->first); \ 722 pos != NULL && \ 723 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \ 724 pos = rcu_dereference(pos->next)) 725 726 static inline struct user_namespace *sk_user_ns(struct sock *sk) 727 { 728 /* Careful only use this in a context where these parameters 729 * can not change and must all be valid, such as recvmsg from 730 * userspace. 731 */ 732 return sk->sk_socket->file->f_cred->user_ns; 733 } 734 735 /* Sock flags */ 736 enum sock_flags { 737 SOCK_DEAD, 738 SOCK_DONE, 739 SOCK_URGINLINE, 740 SOCK_KEEPOPEN, 741 SOCK_LINGER, 742 SOCK_DESTROY, 743 SOCK_BROADCAST, 744 SOCK_TIMESTAMP, 745 SOCK_ZAPPED, 746 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 747 SOCK_DBG, /* %SO_DEBUG setting */ 748 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 749 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ 750 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 751 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 752 SOCK_MEMALLOC, /* VM depends on this socket for swapping */ 753 SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */ 754 SOCK_FASYNC, /* fasync() active */ 755 SOCK_RXQ_OVFL, 756 SOCK_ZEROCOPY, /* buffers from userspace */ 757 SOCK_WIFI_STATUS, /* push wifi status to userspace */ 758 SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS. 759 * Will use last 4 bytes of packet sent from 760 * user-space instead. 761 */ 762 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ 763 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ 764 SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ 765 }; 766 767 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 768 769 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 770 { 771 nsk->sk_flags = osk->sk_flags; 772 } 773 774 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 775 { 776 __set_bit(flag, &sk->sk_flags); 777 } 778 779 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) 780 { 781 __clear_bit(flag, &sk->sk_flags); 782 } 783 784 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) 785 { 786 return test_bit(flag, &sk->sk_flags); 787 } 788 789 #ifdef CONFIG_NET 790 extern struct static_key memalloc_socks; 791 static inline int sk_memalloc_socks(void) 792 { 793 return static_key_false(&memalloc_socks); 794 } 795 #else 796 797 static inline int sk_memalloc_socks(void) 798 { 799 return 0; 800 } 801 802 #endif 803 804 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) 805 { 806 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); 807 } 808 809 static inline void sk_acceptq_removed(struct sock *sk) 810 { 811 sk->sk_ack_backlog--; 812 } 813 814 static inline void sk_acceptq_added(struct sock *sk) 815 { 816 sk->sk_ack_backlog++; 817 } 818 819 static inline bool sk_acceptq_is_full(const struct sock *sk) 820 { 821 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 822 } 823 824 /* 825 * Compute minimal free write space needed to queue new packets. 826 */ 827 static inline int sk_stream_min_wspace(const struct sock *sk) 828 { 829 return sk->sk_wmem_queued >> 1; 830 } 831 832 static inline int sk_stream_wspace(const struct sock *sk) 833 { 834 return sk->sk_sndbuf - sk->sk_wmem_queued; 835 } 836 837 void sk_stream_write_space(struct sock *sk); 838 839 /* OOB backlog add */ 840 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) 841 { 842 /* dont let skb dst not refcounted, we are going to leave rcu lock */ 843 skb_dst_force_safe(skb); 844 845 if (!sk->sk_backlog.tail) 846 sk->sk_backlog.head = skb; 847 else 848 sk->sk_backlog.tail->next = skb; 849 850 sk->sk_backlog.tail = skb; 851 skb->next = NULL; 852 } 853 854 /* 855 * Take into account size of receive queue and backlog queue 856 * Do not take into account this skb truesize, 857 * to allow even a single big packet to come. 858 */ 859 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) 860 { 861 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 862 863 return qsize > limit; 864 } 865 866 /* The per-socket spinlock must be held here. */ 867 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, 868 unsigned int limit) 869 { 870 if (sk_rcvqueues_full(sk, limit)) 871 return -ENOBUFS; 872 873 /* 874 * If the skb was allocated from pfmemalloc reserves, only 875 * allow SOCK_MEMALLOC sockets to use it as this socket is 876 * helping free memory 877 */ 878 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) 879 return -ENOMEM; 880 881 __sk_add_backlog(sk, skb); 882 sk->sk_backlog.len += skb->truesize; 883 return 0; 884 } 885 886 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 887 888 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 889 { 890 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 891 return __sk_backlog_rcv(sk, skb); 892 893 return sk->sk_backlog_rcv(sk, skb); 894 } 895 896 static inline void sk_incoming_cpu_update(struct sock *sk) 897 { 898 sk->sk_incoming_cpu = raw_smp_processor_id(); 899 } 900 901 static inline void sock_rps_record_flow_hash(__u32 hash) 902 { 903 #ifdef CONFIG_RPS 904 struct rps_sock_flow_table *sock_flow_table; 905 906 rcu_read_lock(); 907 sock_flow_table = rcu_dereference(rps_sock_flow_table); 908 rps_record_sock_flow(sock_flow_table, hash); 909 rcu_read_unlock(); 910 #endif 911 } 912 913 static inline void sock_rps_record_flow(const struct sock *sk) 914 { 915 #ifdef CONFIG_RPS 916 if (static_key_false(&rfs_needed)) { 917 /* Reading sk->sk_rxhash might incur an expensive cache line 918 * miss. 919 * 920 * TCP_ESTABLISHED does cover almost all states where RFS 921 * might be useful, and is cheaper [1] than testing : 922 * IPv4: inet_sk(sk)->inet_daddr 923 * IPv6: ipv6_addr_any(&sk->sk_v6_daddr) 924 * OR an additional socket flag 925 * [1] : sk_state and sk_prot are in the same cache line. 926 */ 927 if (sk->sk_state == TCP_ESTABLISHED) 928 sock_rps_record_flow_hash(sk->sk_rxhash); 929 } 930 #endif 931 } 932 933 static inline void sock_rps_save_rxhash(struct sock *sk, 934 const struct sk_buff *skb) 935 { 936 #ifdef CONFIG_RPS 937 if (unlikely(sk->sk_rxhash != skb->hash)) 938 sk->sk_rxhash = skb->hash; 939 #endif 940 } 941 942 static inline void sock_rps_reset_rxhash(struct sock *sk) 943 { 944 #ifdef CONFIG_RPS 945 sk->sk_rxhash = 0; 946 #endif 947 } 948 949 #define sk_wait_event(__sk, __timeo, __condition, __wait) \ 950 ({ int __rc; \ 951 release_sock(__sk); \ 952 __rc = __condition; \ 953 if (!__rc) { \ 954 *(__timeo) = wait_woken(__wait, \ 955 TASK_INTERRUPTIBLE, \ 956 *(__timeo)); \ 957 } \ 958 sched_annotate_sleep(); \ 959 lock_sock(__sk); \ 960 __rc = __condition; \ 961 __rc; \ 962 }) 963 964 int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 965 int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 966 void sk_stream_wait_close(struct sock *sk, long timeo_p); 967 int sk_stream_error(struct sock *sk, int flags, int err); 968 void sk_stream_kill_queues(struct sock *sk); 969 void sk_set_memalloc(struct sock *sk); 970 void sk_clear_memalloc(struct sock *sk); 971 972 void __sk_flush_backlog(struct sock *sk); 973 974 static inline bool sk_flush_backlog(struct sock *sk) 975 { 976 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { 977 __sk_flush_backlog(sk); 978 return true; 979 } 980 return false; 981 } 982 983 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb); 984 985 struct request_sock_ops; 986 struct timewait_sock_ops; 987 struct inet_hashinfo; 988 struct raw_hashinfo; 989 struct module; 990 991 /* 992 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 993 * un-modified. Special care is taken when initializing object to zero. 994 */ 995 static inline void sk_prot_clear_nulls(struct sock *sk, int size) 996 { 997 if (offsetof(struct sock, sk_node.next) != 0) 998 memset(sk, 0, offsetof(struct sock, sk_node.next)); 999 memset(&sk->sk_node.pprev, 0, 1000 size - offsetof(struct sock, sk_node.pprev)); 1001 } 1002 1003 /* Networking protocol blocks we attach to sockets. 1004 * socket layer -> transport layer interface 1005 */ 1006 struct proto { 1007 void (*close)(struct sock *sk, 1008 long timeout); 1009 int (*connect)(struct sock *sk, 1010 struct sockaddr *uaddr, 1011 int addr_len); 1012 int (*disconnect)(struct sock *sk, int flags); 1013 1014 struct sock * (*accept)(struct sock *sk, int flags, int *err); 1015 1016 int (*ioctl)(struct sock *sk, int cmd, 1017 unsigned long arg); 1018 int (*init)(struct sock *sk); 1019 void (*destroy)(struct sock *sk); 1020 void (*shutdown)(struct sock *sk, int how); 1021 int (*setsockopt)(struct sock *sk, int level, 1022 int optname, char __user *optval, 1023 unsigned int optlen); 1024 int (*getsockopt)(struct sock *sk, int level, 1025 int optname, char __user *optval, 1026 int __user *option); 1027 #ifdef CONFIG_COMPAT 1028 int (*compat_setsockopt)(struct sock *sk, 1029 int level, 1030 int optname, char __user *optval, 1031 unsigned int optlen); 1032 int (*compat_getsockopt)(struct sock *sk, 1033 int level, 1034 int optname, char __user *optval, 1035 int __user *option); 1036 int (*compat_ioctl)(struct sock *sk, 1037 unsigned int cmd, unsigned long arg); 1038 #endif 1039 int (*sendmsg)(struct sock *sk, struct msghdr *msg, 1040 size_t len); 1041 int (*recvmsg)(struct sock *sk, struct msghdr *msg, 1042 size_t len, int noblock, int flags, 1043 int *addr_len); 1044 int (*sendpage)(struct sock *sk, struct page *page, 1045 int offset, size_t size, int flags); 1046 int (*bind)(struct sock *sk, 1047 struct sockaddr *uaddr, int addr_len); 1048 1049 int (*backlog_rcv) (struct sock *sk, 1050 struct sk_buff *skb); 1051 1052 void (*release_cb)(struct sock *sk); 1053 1054 /* Keeping track of sk's, looking them up, and port selection methods. */ 1055 int (*hash)(struct sock *sk); 1056 void (*unhash)(struct sock *sk); 1057 void (*rehash)(struct sock *sk); 1058 int (*get_port)(struct sock *sk, unsigned short snum); 1059 1060 /* Keeping track of sockets in use */ 1061 #ifdef CONFIG_PROC_FS 1062 unsigned int inuse_idx; 1063 #endif 1064 1065 bool (*stream_memory_free)(const struct sock *sk); 1066 /* Memory pressure */ 1067 void (*enter_memory_pressure)(struct sock *sk); 1068 atomic_long_t *memory_allocated; /* Current allocated memory. */ 1069 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 1070 /* 1071 * Pressure flag: try to collapse. 1072 * Technical note: it is used by multiple contexts non atomically. 1073 * All the __sk_mem_schedule() is of this nature: accounting 1074 * is strict, actions are advisory and have some latency. 1075 */ 1076 int *memory_pressure; 1077 long *sysctl_mem; 1078 int *sysctl_wmem; 1079 int *sysctl_rmem; 1080 int max_header; 1081 bool no_autobind; 1082 1083 struct kmem_cache *slab; 1084 unsigned int obj_size; 1085 int slab_flags; 1086 1087 struct percpu_counter *orphan_count; 1088 1089 struct request_sock_ops *rsk_prot; 1090 struct timewait_sock_ops *twsk_prot; 1091 1092 union { 1093 struct inet_hashinfo *hashinfo; 1094 struct udp_table *udp_table; 1095 struct raw_hashinfo *raw_hash; 1096 } h; 1097 1098 struct module *owner; 1099 1100 char name[32]; 1101 1102 struct list_head node; 1103 #ifdef SOCK_REFCNT_DEBUG 1104 atomic_t socks; 1105 #endif 1106 int (*diag_destroy)(struct sock *sk, int err); 1107 }; 1108 1109 int proto_register(struct proto *prot, int alloc_slab); 1110 void proto_unregister(struct proto *prot); 1111 1112 #ifdef SOCK_REFCNT_DEBUG 1113 static inline void sk_refcnt_debug_inc(struct sock *sk) 1114 { 1115 atomic_inc(&sk->sk_prot->socks); 1116 } 1117 1118 static inline void sk_refcnt_debug_dec(struct sock *sk) 1119 { 1120 atomic_dec(&sk->sk_prot->socks); 1121 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n", 1122 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 1123 } 1124 1125 static inline void sk_refcnt_debug_release(const struct sock *sk) 1126 { 1127 if (atomic_read(&sk->sk_refcnt) != 1) 1128 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 1129 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); 1130 } 1131 #else /* SOCK_REFCNT_DEBUG */ 1132 #define sk_refcnt_debug_inc(sk) do { } while (0) 1133 #define sk_refcnt_debug_dec(sk) do { } while (0) 1134 #define sk_refcnt_debug_release(sk) do { } while (0) 1135 #endif /* SOCK_REFCNT_DEBUG */ 1136 1137 static inline bool sk_stream_memory_free(const struct sock *sk) 1138 { 1139 if (sk->sk_wmem_queued >= sk->sk_sndbuf) 1140 return false; 1141 1142 return sk->sk_prot->stream_memory_free ? 1143 sk->sk_prot->stream_memory_free(sk) : true; 1144 } 1145 1146 static inline bool sk_stream_is_writeable(const struct sock *sk) 1147 { 1148 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && 1149 sk_stream_memory_free(sk); 1150 } 1151 1152 static inline int sk_under_cgroup_hierarchy(struct sock *sk, 1153 struct cgroup *ancestor) 1154 { 1155 #ifdef CONFIG_SOCK_CGROUP_DATA 1156 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), 1157 ancestor); 1158 #else 1159 return -ENOTSUPP; 1160 #endif 1161 } 1162 1163 static inline bool sk_has_memory_pressure(const struct sock *sk) 1164 { 1165 return sk->sk_prot->memory_pressure != NULL; 1166 } 1167 1168 static inline bool sk_under_memory_pressure(const struct sock *sk) 1169 { 1170 if (!sk->sk_prot->memory_pressure) 1171 return false; 1172 1173 if (mem_cgroup_sockets_enabled && sk->sk_memcg && 1174 mem_cgroup_under_socket_pressure(sk->sk_memcg)) 1175 return true; 1176 1177 return !!*sk->sk_prot->memory_pressure; 1178 } 1179 1180 static inline void sk_leave_memory_pressure(struct sock *sk) 1181 { 1182 int *memory_pressure = sk->sk_prot->memory_pressure; 1183 1184 if (!memory_pressure) 1185 return; 1186 1187 if (*memory_pressure) 1188 *memory_pressure = 0; 1189 } 1190 1191 static inline void sk_enter_memory_pressure(struct sock *sk) 1192 { 1193 if (!sk->sk_prot->enter_memory_pressure) 1194 return; 1195 1196 sk->sk_prot->enter_memory_pressure(sk); 1197 } 1198 1199 static inline long 1200 sk_memory_allocated(const struct sock *sk) 1201 { 1202 return atomic_long_read(sk->sk_prot->memory_allocated); 1203 } 1204 1205 static inline long 1206 sk_memory_allocated_add(struct sock *sk, int amt) 1207 { 1208 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated); 1209 } 1210 1211 static inline void 1212 sk_memory_allocated_sub(struct sock *sk, int amt) 1213 { 1214 atomic_long_sub(amt, sk->sk_prot->memory_allocated); 1215 } 1216 1217 static inline void sk_sockets_allocated_dec(struct sock *sk) 1218 { 1219 percpu_counter_dec(sk->sk_prot->sockets_allocated); 1220 } 1221 1222 static inline void sk_sockets_allocated_inc(struct sock *sk) 1223 { 1224 percpu_counter_inc(sk->sk_prot->sockets_allocated); 1225 } 1226 1227 static inline int 1228 sk_sockets_allocated_read_positive(struct sock *sk) 1229 { 1230 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); 1231 } 1232 1233 static inline int 1234 proto_sockets_allocated_sum_positive(struct proto *prot) 1235 { 1236 return percpu_counter_sum_positive(prot->sockets_allocated); 1237 } 1238 1239 static inline long 1240 proto_memory_allocated(struct proto *prot) 1241 { 1242 return atomic_long_read(prot->memory_allocated); 1243 } 1244 1245 static inline bool 1246 proto_memory_pressure(struct proto *prot) 1247 { 1248 if (!prot->memory_pressure) 1249 return false; 1250 return !!*prot->memory_pressure; 1251 } 1252 1253 1254 #ifdef CONFIG_PROC_FS 1255 /* Called with local bh disabled */ 1256 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); 1257 int sock_prot_inuse_get(struct net *net, struct proto *proto); 1258 #else 1259 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, 1260 int inc) 1261 { 1262 } 1263 #endif 1264 1265 1266 /* With per-bucket locks this operation is not-atomic, so that 1267 * this version is not worse. 1268 */ 1269 static inline int __sk_prot_rehash(struct sock *sk) 1270 { 1271 sk->sk_prot->unhash(sk); 1272 return sk->sk_prot->hash(sk); 1273 } 1274 1275 /* About 10 seconds */ 1276 #define SOCK_DESTROY_TIME (10*HZ) 1277 1278 /* Sockets 0-1023 can't be bound to unless you are superuser */ 1279 #define PROT_SOCK 1024 1280 1281 #define SHUTDOWN_MASK 3 1282 #define RCV_SHUTDOWN 1 1283 #define SEND_SHUTDOWN 2 1284 1285 #define SOCK_SNDBUF_LOCK 1 1286 #define SOCK_RCVBUF_LOCK 2 1287 #define SOCK_BINDADDR_LOCK 4 1288 #define SOCK_BINDPORT_LOCK 8 1289 1290 struct socket_alloc { 1291 struct socket socket; 1292 struct inode vfs_inode; 1293 }; 1294 1295 static inline struct socket *SOCKET_I(struct inode *inode) 1296 { 1297 return &container_of(inode, struct socket_alloc, vfs_inode)->socket; 1298 } 1299 1300 static inline struct inode *SOCK_INODE(struct socket *socket) 1301 { 1302 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 1303 } 1304 1305 /* 1306 * Functions for memory accounting 1307 */ 1308 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind); 1309 int __sk_mem_schedule(struct sock *sk, int size, int kind); 1310 void __sk_mem_reduce_allocated(struct sock *sk, int amount); 1311 void __sk_mem_reclaim(struct sock *sk, int amount); 1312 1313 /* We used to have PAGE_SIZE here, but systems with 64KB pages 1314 * do not necessarily have 16x time more memory than 4KB ones. 1315 */ 1316 #define SK_MEM_QUANTUM 4096 1317 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) 1318 #define SK_MEM_SEND 0 1319 #define SK_MEM_RECV 1 1320 1321 /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */ 1322 static inline long sk_prot_mem_limits(const struct sock *sk, int index) 1323 { 1324 long val = sk->sk_prot->sysctl_mem[index]; 1325 1326 #if PAGE_SIZE > SK_MEM_QUANTUM 1327 val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT; 1328 #elif PAGE_SIZE < SK_MEM_QUANTUM 1329 val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT; 1330 #endif 1331 return val; 1332 } 1333 1334 static inline int sk_mem_pages(int amt) 1335 { 1336 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; 1337 } 1338 1339 static inline bool sk_has_account(struct sock *sk) 1340 { 1341 /* return true if protocol supports memory accounting */ 1342 return !!sk->sk_prot->memory_allocated; 1343 } 1344 1345 static inline bool sk_wmem_schedule(struct sock *sk, int size) 1346 { 1347 if (!sk_has_account(sk)) 1348 return true; 1349 return size <= sk->sk_forward_alloc || 1350 __sk_mem_schedule(sk, size, SK_MEM_SEND); 1351 } 1352 1353 static inline bool 1354 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) 1355 { 1356 if (!sk_has_account(sk)) 1357 return true; 1358 return size<= sk->sk_forward_alloc || 1359 __sk_mem_schedule(sk, size, SK_MEM_RECV) || 1360 skb_pfmemalloc(skb); 1361 } 1362 1363 static inline void sk_mem_reclaim(struct sock *sk) 1364 { 1365 if (!sk_has_account(sk)) 1366 return; 1367 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) 1368 __sk_mem_reclaim(sk, sk->sk_forward_alloc); 1369 } 1370 1371 static inline void sk_mem_reclaim_partial(struct sock *sk) 1372 { 1373 if (!sk_has_account(sk)) 1374 return; 1375 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) 1376 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); 1377 } 1378 1379 static inline void sk_mem_charge(struct sock *sk, int size) 1380 { 1381 if (!sk_has_account(sk)) 1382 return; 1383 sk->sk_forward_alloc -= size; 1384 } 1385 1386 static inline void sk_mem_uncharge(struct sock *sk, int size) 1387 { 1388 if (!sk_has_account(sk)) 1389 return; 1390 sk->sk_forward_alloc += size; 1391 1392 /* Avoid a possible overflow. 1393 * TCP send queues can make this happen, if sk_mem_reclaim() 1394 * is not called and more than 2 GBytes are released at once. 1395 * 1396 * If we reach 2 MBytes, reclaim 1 MBytes right now, there is 1397 * no need to hold that much forward allocation anyway. 1398 */ 1399 if (unlikely(sk->sk_forward_alloc >= 1 << 21)) 1400 __sk_mem_reclaim(sk, 1 << 20); 1401 } 1402 1403 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 1404 { 1405 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1406 sk->sk_wmem_queued -= skb->truesize; 1407 sk_mem_uncharge(sk, skb->truesize); 1408 __kfree_skb(skb); 1409 } 1410 1411 static inline void sock_release_ownership(struct sock *sk) 1412 { 1413 if (sk->sk_lock.owned) { 1414 sk->sk_lock.owned = 0; 1415 1416 /* The sk_lock has mutex_unlock() semantics: */ 1417 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 1418 } 1419 } 1420 1421 /* 1422 * Macro so as to not evaluate some arguments when 1423 * lockdep is not enabled. 1424 * 1425 * Mark both the sk_lock and the sk_lock.slock as a 1426 * per-address-family lock class. 1427 */ 1428 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 1429 do { \ 1430 sk->sk_lock.owned = 0; \ 1431 init_waitqueue_head(&sk->sk_lock.wq); \ 1432 spin_lock_init(&(sk)->sk_lock.slock); \ 1433 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ 1434 sizeof((sk)->sk_lock)); \ 1435 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ 1436 (skey), (sname)); \ 1437 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ 1438 } while (0) 1439 1440 #ifdef CONFIG_LOCKDEP 1441 static inline bool lockdep_sock_is_held(const struct sock *csk) 1442 { 1443 struct sock *sk = (struct sock *)csk; 1444 1445 return lockdep_is_held(&sk->sk_lock) || 1446 lockdep_is_held(&sk->sk_lock.slock); 1447 } 1448 #endif 1449 1450 void lock_sock_nested(struct sock *sk, int subclass); 1451 1452 static inline void lock_sock(struct sock *sk) 1453 { 1454 lock_sock_nested(sk, 0); 1455 } 1456 1457 void release_sock(struct sock *sk); 1458 1459 /* BH context may only use the following locking interface. */ 1460 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 1461 #define bh_lock_sock_nested(__sk) \ 1462 spin_lock_nested(&((__sk)->sk_lock.slock), \ 1463 SINGLE_DEPTH_NESTING) 1464 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 1465 1466 bool lock_sock_fast(struct sock *sk); 1467 /** 1468 * unlock_sock_fast - complement of lock_sock_fast 1469 * @sk: socket 1470 * @slow: slow mode 1471 * 1472 * fast unlock socket for user context. 1473 * If slow mode is on, we call regular release_sock() 1474 */ 1475 static inline void unlock_sock_fast(struct sock *sk, bool slow) 1476 { 1477 if (slow) 1478 release_sock(sk); 1479 else 1480 spin_unlock_bh(&sk->sk_lock.slock); 1481 } 1482 1483 /* Used by processes to "lock" a socket state, so that 1484 * interrupts and bottom half handlers won't change it 1485 * from under us. It essentially blocks any incoming 1486 * packets, so that we won't get any new data or any 1487 * packets that change the state of the socket. 1488 * 1489 * While locked, BH processing will add new packets to 1490 * the backlog queue. This queue is processed by the 1491 * owner of the socket lock right before it is released. 1492 * 1493 * Since ~2.3.5 it is also exclusive sleep lock serializing 1494 * accesses from user process context. 1495 */ 1496 1497 static inline void sock_owned_by_me(const struct sock *sk) 1498 { 1499 #ifdef CONFIG_LOCKDEP 1500 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); 1501 #endif 1502 } 1503 1504 static inline bool sock_owned_by_user(const struct sock *sk) 1505 { 1506 sock_owned_by_me(sk); 1507 return sk->sk_lock.owned; 1508 } 1509 1510 /* no reclassification while locks are held */ 1511 static inline bool sock_allow_reclassification(const struct sock *csk) 1512 { 1513 struct sock *sk = (struct sock *)csk; 1514 1515 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock); 1516 } 1517 1518 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1519 struct proto *prot, int kern); 1520 void sk_free(struct sock *sk); 1521 void sk_destruct(struct sock *sk); 1522 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); 1523 1524 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1525 gfp_t priority); 1526 void __sock_wfree(struct sk_buff *skb); 1527 void sock_wfree(struct sk_buff *skb); 1528 void skb_orphan_partial(struct sk_buff *skb); 1529 void sock_rfree(struct sk_buff *skb); 1530 void sock_efree(struct sk_buff *skb); 1531 #ifdef CONFIG_INET 1532 void sock_edemux(struct sk_buff *skb); 1533 #else 1534 #define sock_edemux(skb) sock_efree(skb) 1535 #endif 1536 1537 int sock_setsockopt(struct socket *sock, int level, int op, 1538 char __user *optval, unsigned int optlen); 1539 1540 int sock_getsockopt(struct socket *sock, int level, int op, 1541 char __user *optval, int __user *optlen); 1542 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1543 int noblock, int *errcode); 1544 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1545 unsigned long data_len, int noblock, 1546 int *errcode, int max_page_order); 1547 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); 1548 void sock_kfree_s(struct sock *sk, void *mem, int size); 1549 void sock_kzfree_s(struct sock *sk, void *mem, int size); 1550 void sk_send_sigurg(struct sock *sk); 1551 1552 struct sockcm_cookie { 1553 u32 mark; 1554 u16 tsflags; 1555 }; 1556 1557 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, 1558 struct sockcm_cookie *sockc); 1559 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 1560 struct sockcm_cookie *sockc); 1561 1562 /* 1563 * Functions to fill in entries in struct proto_ops when a protocol 1564 * does not implement a particular function. 1565 */ 1566 int sock_no_bind(struct socket *, struct sockaddr *, int); 1567 int sock_no_connect(struct socket *, struct sockaddr *, int, int); 1568 int sock_no_socketpair(struct socket *, struct socket *); 1569 int sock_no_accept(struct socket *, struct socket *, int); 1570 int sock_no_getname(struct socket *, struct sockaddr *, int *, int); 1571 unsigned int sock_no_poll(struct file *, struct socket *, 1572 struct poll_table_struct *); 1573 int sock_no_ioctl(struct socket *, unsigned int, unsigned long); 1574 int sock_no_listen(struct socket *, int); 1575 int sock_no_shutdown(struct socket *, int); 1576 int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); 1577 int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); 1578 int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); 1579 int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); 1580 int sock_no_mmap(struct file *file, struct socket *sock, 1581 struct vm_area_struct *vma); 1582 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, 1583 size_t size, int flags); 1584 1585 /* 1586 * Functions to fill in entries in struct proto_ops when a protocol 1587 * uses the inet style. 1588 */ 1589 int sock_common_getsockopt(struct socket *sock, int level, int optname, 1590 char __user *optval, int __user *optlen); 1591 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1592 int flags); 1593 int sock_common_setsockopt(struct socket *sock, int level, int optname, 1594 char __user *optval, unsigned int optlen); 1595 int compat_sock_common_getsockopt(struct socket *sock, int level, 1596 int optname, char __user *optval, int __user *optlen); 1597 int compat_sock_common_setsockopt(struct socket *sock, int level, 1598 int optname, char __user *optval, unsigned int optlen); 1599 1600 void sk_common_release(struct sock *sk); 1601 1602 /* 1603 * Default socket callbacks and setup code 1604 */ 1605 1606 /* Initialise core socket variables */ 1607 void sock_init_data(struct socket *sock, struct sock *sk); 1608 1609 /* 1610 * Socket reference counting postulates. 1611 * 1612 * * Each user of socket SHOULD hold a reference count. 1613 * * Each access point to socket (an hash table bucket, reference from a list, 1614 * running timer, skb in flight MUST hold a reference count. 1615 * * When reference count hits 0, it means it will never increase back. 1616 * * When reference count hits 0, it means that no references from 1617 * outside exist to this socket and current process on current CPU 1618 * is last user and may/should destroy this socket. 1619 * * sk_free is called from any context: process, BH, IRQ. When 1620 * it is called, socket has no references from outside -> sk_free 1621 * may release descendant resources allocated by the socket, but 1622 * to the time when it is called, socket is NOT referenced by any 1623 * hash tables, lists etc. 1624 * * Packets, delivered from outside (from network or from another process) 1625 * and enqueued on receive/error queues SHOULD NOT grab reference count, 1626 * when they sit in queue. Otherwise, packets will leak to hole, when 1627 * socket is looked up by one cpu and unhasing is made by another CPU. 1628 * It is true for udp/raw, netlink (leak to receive and error queues), tcp 1629 * (leak to backlog). Packet socket does all the processing inside 1630 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets 1631 * use separate SMP lock, so that they are prone too. 1632 */ 1633 1634 /* Ungrab socket and destroy it, if it was the last reference. */ 1635 static inline void sock_put(struct sock *sk) 1636 { 1637 if (atomic_dec_and_test(&sk->sk_refcnt)) 1638 sk_free(sk); 1639 } 1640 /* Generic version of sock_put(), dealing with all sockets 1641 * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...) 1642 */ 1643 void sock_gen_put(struct sock *sk); 1644 1645 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, 1646 unsigned int trim_cap, bool refcounted); 1647 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1648 const int nested) 1649 { 1650 return __sk_receive_skb(sk, skb, nested, 1, true); 1651 } 1652 1653 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1654 { 1655 sk->sk_tx_queue_mapping = tx_queue; 1656 } 1657 1658 static inline void sk_tx_queue_clear(struct sock *sk) 1659 { 1660 sk->sk_tx_queue_mapping = -1; 1661 } 1662 1663 static inline int sk_tx_queue_get(const struct sock *sk) 1664 { 1665 return sk ? sk->sk_tx_queue_mapping : -1; 1666 } 1667 1668 static inline void sk_set_socket(struct sock *sk, struct socket *sock) 1669 { 1670 sk_tx_queue_clear(sk); 1671 sk->sk_socket = sock; 1672 } 1673 1674 static inline wait_queue_head_t *sk_sleep(struct sock *sk) 1675 { 1676 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0); 1677 return &rcu_dereference_raw(sk->sk_wq)->wait; 1678 } 1679 /* Detach socket from process context. 1680 * Announce socket dead, detach it from wait queue and inode. 1681 * Note that parent inode held reference count on this struct sock, 1682 * we do not release it in this function, because protocol 1683 * probably wants some additional cleanups or even continuing 1684 * to work with this socket (TCP). 1685 */ 1686 static inline void sock_orphan(struct sock *sk) 1687 { 1688 write_lock_bh(&sk->sk_callback_lock); 1689 sock_set_flag(sk, SOCK_DEAD); 1690 sk_set_socket(sk, NULL); 1691 sk->sk_wq = NULL; 1692 write_unlock_bh(&sk->sk_callback_lock); 1693 } 1694 1695 static inline void sock_graft(struct sock *sk, struct socket *parent) 1696 { 1697 write_lock_bh(&sk->sk_callback_lock); 1698 sk->sk_wq = parent->wq; 1699 parent->sk = sk; 1700 sk_set_socket(sk, parent); 1701 sk->sk_uid = SOCK_INODE(parent)->i_uid; 1702 security_sock_graft(sk, parent); 1703 write_unlock_bh(&sk->sk_callback_lock); 1704 } 1705 1706 kuid_t sock_i_uid(struct sock *sk); 1707 unsigned long sock_i_ino(struct sock *sk); 1708 1709 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) 1710 { 1711 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); 1712 } 1713 1714 static inline u32 net_tx_rndhash(void) 1715 { 1716 u32 v = prandom_u32(); 1717 1718 return v ?: 1; 1719 } 1720 1721 static inline void sk_set_txhash(struct sock *sk) 1722 { 1723 sk->sk_txhash = net_tx_rndhash(); 1724 } 1725 1726 static inline void sk_rethink_txhash(struct sock *sk) 1727 { 1728 if (sk->sk_txhash) 1729 sk_set_txhash(sk); 1730 } 1731 1732 static inline struct dst_entry * 1733 __sk_dst_get(struct sock *sk) 1734 { 1735 return rcu_dereference_check(sk->sk_dst_cache, 1736 lockdep_sock_is_held(sk)); 1737 } 1738 1739 static inline struct dst_entry * 1740 sk_dst_get(struct sock *sk) 1741 { 1742 struct dst_entry *dst; 1743 1744 rcu_read_lock(); 1745 dst = rcu_dereference(sk->sk_dst_cache); 1746 if (dst && !atomic_inc_not_zero(&dst->__refcnt)) 1747 dst = NULL; 1748 rcu_read_unlock(); 1749 return dst; 1750 } 1751 1752 static inline void dst_negative_advice(struct sock *sk) 1753 { 1754 struct dst_entry *ndst, *dst = __sk_dst_get(sk); 1755 1756 sk_rethink_txhash(sk); 1757 1758 if (dst && dst->ops->negative_advice) { 1759 ndst = dst->ops->negative_advice(dst); 1760 1761 if (ndst != dst) { 1762 rcu_assign_pointer(sk->sk_dst_cache, ndst); 1763 sk_tx_queue_clear(sk); 1764 } 1765 } 1766 } 1767 1768 static inline void 1769 __sk_dst_set(struct sock *sk, struct dst_entry *dst) 1770 { 1771 struct dst_entry *old_dst; 1772 1773 sk_tx_queue_clear(sk); 1774 /* 1775 * This can be called while sk is owned by the caller only, 1776 * with no state that can be checked in a rcu_dereference_check() cond 1777 */ 1778 old_dst = rcu_dereference_raw(sk->sk_dst_cache); 1779 rcu_assign_pointer(sk->sk_dst_cache, dst); 1780 dst_release(old_dst); 1781 } 1782 1783 static inline void 1784 sk_dst_set(struct sock *sk, struct dst_entry *dst) 1785 { 1786 struct dst_entry *old_dst; 1787 1788 sk_tx_queue_clear(sk); 1789 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); 1790 dst_release(old_dst); 1791 } 1792 1793 static inline void 1794 __sk_dst_reset(struct sock *sk) 1795 { 1796 __sk_dst_set(sk, NULL); 1797 } 1798 1799 static inline void 1800 sk_dst_reset(struct sock *sk) 1801 { 1802 sk_dst_set(sk, NULL); 1803 } 1804 1805 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1806 1807 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1808 1809 bool sk_mc_loop(struct sock *sk); 1810 1811 static inline bool sk_can_gso(const struct sock *sk) 1812 { 1813 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1814 } 1815 1816 void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1817 1818 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) 1819 { 1820 sk->sk_route_nocaps |= flags; 1821 sk->sk_route_caps &= ~flags; 1822 } 1823 1824 static inline bool sk_check_csum_caps(struct sock *sk) 1825 { 1826 return (sk->sk_route_caps & NETIF_F_HW_CSUM) || 1827 (sk->sk_family == PF_INET && 1828 (sk->sk_route_caps & NETIF_F_IP_CSUM)) || 1829 (sk->sk_family == PF_INET6 && 1830 (sk->sk_route_caps & NETIF_F_IPV6_CSUM)); 1831 } 1832 1833 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, 1834 struct iov_iter *from, char *to, 1835 int copy, int offset) 1836 { 1837 if (skb->ip_summed == CHECKSUM_NONE) { 1838 __wsum csum = 0; 1839 if (!csum_and_copy_from_iter_full(to, copy, &csum, from)) 1840 return -EFAULT; 1841 skb->csum = csum_block_add(skb->csum, csum, offset); 1842 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { 1843 if (!copy_from_iter_full_nocache(to, copy, from)) 1844 return -EFAULT; 1845 } else if (!copy_from_iter_full(to, copy, from)) 1846 return -EFAULT; 1847 1848 return 0; 1849 } 1850 1851 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, 1852 struct iov_iter *from, int copy) 1853 { 1854 int err, offset = skb->len; 1855 1856 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), 1857 copy, offset); 1858 if (err) 1859 __skb_trim(skb, offset); 1860 1861 return err; 1862 } 1863 1864 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, 1865 struct sk_buff *skb, 1866 struct page *page, 1867 int off, int copy) 1868 { 1869 int err; 1870 1871 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, 1872 copy, skb->len); 1873 if (err) 1874 return err; 1875 1876 skb->len += copy; 1877 skb->data_len += copy; 1878 skb->truesize += copy; 1879 sk->sk_wmem_queued += copy; 1880 sk_mem_charge(sk, copy); 1881 return 0; 1882 } 1883 1884 /** 1885 * sk_wmem_alloc_get - returns write allocations 1886 * @sk: socket 1887 * 1888 * Returns sk_wmem_alloc minus initial offset of one 1889 */ 1890 static inline int sk_wmem_alloc_get(const struct sock *sk) 1891 { 1892 return atomic_read(&sk->sk_wmem_alloc) - 1; 1893 } 1894 1895 /** 1896 * sk_rmem_alloc_get - returns read allocations 1897 * @sk: socket 1898 * 1899 * Returns sk_rmem_alloc 1900 */ 1901 static inline int sk_rmem_alloc_get(const struct sock *sk) 1902 { 1903 return atomic_read(&sk->sk_rmem_alloc); 1904 } 1905 1906 /** 1907 * sk_has_allocations - check if allocations are outstanding 1908 * @sk: socket 1909 * 1910 * Returns true if socket has write or read allocations 1911 */ 1912 static inline bool sk_has_allocations(const struct sock *sk) 1913 { 1914 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); 1915 } 1916 1917 /** 1918 * skwq_has_sleeper - check if there are any waiting processes 1919 * @wq: struct socket_wq 1920 * 1921 * Returns true if socket_wq has waiting processes 1922 * 1923 * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory 1924 * barrier call. They were added due to the race found within the tcp code. 1925 * 1926 * Consider following tcp code paths: 1927 * 1928 * CPU1 CPU2 1929 * 1930 * sys_select receive packet 1931 * ... ... 1932 * __add_wait_queue update tp->rcv_nxt 1933 * ... ... 1934 * tp->rcv_nxt check sock_def_readable 1935 * ... { 1936 * schedule rcu_read_lock(); 1937 * wq = rcu_dereference(sk->sk_wq); 1938 * if (wq && waitqueue_active(&wq->wait)) 1939 * wake_up_interruptible(&wq->wait) 1940 * ... 1941 * } 1942 * 1943 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay 1944 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1 1945 * could then endup calling schedule and sleep forever if there are no more 1946 * data on the socket. 1947 * 1948 */ 1949 static inline bool skwq_has_sleeper(struct socket_wq *wq) 1950 { 1951 return wq && wq_has_sleeper(&wq->wait); 1952 } 1953 1954 /** 1955 * sock_poll_wait - place memory barrier behind the poll_wait call. 1956 * @filp: file 1957 * @wait_address: socket wait queue 1958 * @p: poll_table 1959 * 1960 * See the comments in the wq_has_sleeper function. 1961 */ 1962 static inline void sock_poll_wait(struct file *filp, 1963 wait_queue_head_t *wait_address, poll_table *p) 1964 { 1965 if (!poll_does_not_wait(p) && wait_address) { 1966 poll_wait(filp, wait_address, p); 1967 /* We need to be sure we are in sync with the 1968 * socket flags modification. 1969 * 1970 * This memory barrier is paired in the wq_has_sleeper. 1971 */ 1972 smp_mb(); 1973 } 1974 } 1975 1976 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) 1977 { 1978 if (sk->sk_txhash) { 1979 skb->l4_hash = 1; 1980 skb->hash = sk->sk_txhash; 1981 } 1982 } 1983 1984 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk); 1985 1986 /* 1987 * Queue a received datagram if it will fit. Stream and sequenced 1988 * protocols can't normally use this as they need to fit buffers in 1989 * and play with them. 1990 * 1991 * Inlined as it's very short and called for pretty much every 1992 * packet ever received. 1993 */ 1994 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1995 { 1996 skb_orphan(skb); 1997 skb->sk = sk; 1998 skb->destructor = sock_rfree; 1999 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2000 sk_mem_charge(sk, skb->truesize); 2001 } 2002 2003 void sk_reset_timer(struct sock *sk, struct timer_list *timer, 2004 unsigned long expires); 2005 2006 void sk_stop_timer(struct sock *sk, struct timer_list *timer); 2007 2008 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, 2009 unsigned int flags); 2010 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 2011 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 2012 2013 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 2014 struct sk_buff *sock_dequeue_err_skb(struct sock *sk); 2015 2016 /* 2017 * Recover an error report and clear atomically 2018 */ 2019 2020 static inline int sock_error(struct sock *sk) 2021 { 2022 int err; 2023 if (likely(!sk->sk_err)) 2024 return 0; 2025 err = xchg(&sk->sk_err, 0); 2026 return -err; 2027 } 2028 2029 static inline unsigned long sock_wspace(struct sock *sk) 2030 { 2031 int amt = 0; 2032 2033 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 2034 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 2035 if (amt < 0) 2036 amt = 0; 2037 } 2038 return amt; 2039 } 2040 2041 /* Note: 2042 * We use sk->sk_wq_raw, from contexts knowing this 2043 * pointer is not NULL and cannot disappear/change. 2044 */ 2045 static inline void sk_set_bit(int nr, struct sock *sk) 2046 { 2047 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) && 2048 !sock_flag(sk, SOCK_FASYNC)) 2049 return; 2050 2051 set_bit(nr, &sk->sk_wq_raw->flags); 2052 } 2053 2054 static inline void sk_clear_bit(int nr, struct sock *sk) 2055 { 2056 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) && 2057 !sock_flag(sk, SOCK_FASYNC)) 2058 return; 2059 2060 clear_bit(nr, &sk->sk_wq_raw->flags); 2061 } 2062 2063 static inline void sk_wake_async(const struct sock *sk, int how, int band) 2064 { 2065 if (sock_flag(sk, SOCK_FASYNC)) { 2066 rcu_read_lock(); 2067 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); 2068 rcu_read_unlock(); 2069 } 2070 } 2071 2072 /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might 2073 * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak. 2074 * Note: for send buffers, TCP works better if we can build two skbs at 2075 * minimum. 2076 */ 2077 #define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff))) 2078 2079 #define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2) 2080 #define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE 2081 2082 static inline void sk_stream_moderate_sndbuf(struct sock *sk) 2083 { 2084 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { 2085 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); 2086 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); 2087 } 2088 } 2089 2090 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, 2091 bool force_schedule); 2092 2093 /** 2094 * sk_page_frag - return an appropriate page_frag 2095 * @sk: socket 2096 * 2097 * If socket allocation mode allows current thread to sleep, it means its 2098 * safe to use the per task page_frag instead of the per socket one. 2099 */ 2100 static inline struct page_frag *sk_page_frag(struct sock *sk) 2101 { 2102 if (gfpflags_allow_blocking(sk->sk_allocation)) 2103 return ¤t->task_frag; 2104 2105 return &sk->sk_frag; 2106 } 2107 2108 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); 2109 2110 /* 2111 * Default write policy as shown to user space via poll/select/SIGIO 2112 */ 2113 static inline bool sock_writeable(const struct sock *sk) 2114 { 2115 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); 2116 } 2117 2118 static inline gfp_t gfp_any(void) 2119 { 2120 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 2121 } 2122 2123 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) 2124 { 2125 return noblock ? 0 : sk->sk_rcvtimeo; 2126 } 2127 2128 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) 2129 { 2130 return noblock ? 0 : sk->sk_sndtimeo; 2131 } 2132 2133 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) 2134 { 2135 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; 2136 } 2137 2138 /* Alas, with timeout socket operations are not restartable. 2139 * Compare this to poll(). 2140 */ 2141 static inline int sock_intr_errno(long timeo) 2142 { 2143 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 2144 } 2145 2146 struct sock_skb_cb { 2147 u32 dropcount; 2148 }; 2149 2150 /* Store sock_skb_cb at the end of skb->cb[] so protocol families 2151 * using skb->cb[] would keep using it directly and utilize its 2152 * alignement guarantee. 2153 */ 2154 #define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \ 2155 sizeof(struct sock_skb_cb))) 2156 2157 #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \ 2158 SOCK_SKB_CB_OFFSET)) 2159 2160 #define sock_skb_cb_check_size(size) \ 2161 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET) 2162 2163 static inline void 2164 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) 2165 { 2166 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? 2167 atomic_read(&sk->sk_drops) : 0; 2168 } 2169 2170 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) 2171 { 2172 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 2173 2174 atomic_add(segs, &sk->sk_drops); 2175 } 2176 2177 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 2178 struct sk_buff *skb); 2179 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, 2180 struct sk_buff *skb); 2181 2182 static inline void 2183 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 2184 { 2185 ktime_t kt = skb->tstamp; 2186 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 2187 2188 /* 2189 * generate control messages if 2190 * - receive time stamping in software requested 2191 * - software time stamp available and wanted 2192 * - hardware time stamps available and wanted 2193 */ 2194 if (sock_flag(sk, SOCK_RCVTSTAMP) || 2195 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || 2196 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || 2197 (hwtstamps->hwtstamp && 2198 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) 2199 __sock_recv_timestamp(msg, sk, skb); 2200 else 2201 sk->sk_stamp = kt; 2202 2203 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) 2204 __sock_recv_wifi_status(msg, sk, skb); 2205 } 2206 2207 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2208 struct sk_buff *skb); 2209 2210 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2211 struct sk_buff *skb) 2212 { 2213 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ 2214 (1UL << SOCK_RCVTSTAMP)) 2215 #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \ 2216 SOF_TIMESTAMPING_RAW_HARDWARE) 2217 2218 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) 2219 __sock_recv_ts_and_drops(msg, sk, skb); 2220 else 2221 sk->sk_stamp = skb->tstamp; 2222 } 2223 2224 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); 2225 2226 /** 2227 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 2228 * @sk: socket sending this packet 2229 * @tsflags: timestamping flags to use 2230 * @tx_flags: completed with instructions for time stamping 2231 * 2232 * Note : callers should take care of initial *tx_flags value (usually 0) 2233 */ 2234 static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags, 2235 __u8 *tx_flags) 2236 { 2237 if (unlikely(tsflags)) 2238 __sock_tx_timestamp(tsflags, tx_flags); 2239 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) 2240 *tx_flags |= SKBTX_WIFI_STATUS; 2241 } 2242 2243 /** 2244 * sk_eat_skb - Release a skb if it is no longer needed 2245 * @sk: socket to eat this skb from 2246 * @skb: socket buffer to eat 2247 * 2248 * This routine must be called with interrupts disabled or with the socket 2249 * locked so that the sk_buff queue operation is ok. 2250 */ 2251 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) 2252 { 2253 __skb_unlink(skb, &sk->sk_receive_queue); 2254 __kfree_skb(skb); 2255 } 2256 2257 static inline 2258 struct net *sock_net(const struct sock *sk) 2259 { 2260 return read_pnet(&sk->sk_net); 2261 } 2262 2263 static inline 2264 void sock_net_set(struct sock *sk, struct net *net) 2265 { 2266 write_pnet(&sk->sk_net, net); 2267 } 2268 2269 static inline struct sock *skb_steal_sock(struct sk_buff *skb) 2270 { 2271 if (skb->sk) { 2272 struct sock *sk = skb->sk; 2273 2274 skb->destructor = NULL; 2275 skb->sk = NULL; 2276 return sk; 2277 } 2278 return NULL; 2279 } 2280 2281 /* This helper checks if a socket is a full socket, 2282 * ie _not_ a timewait or request socket. 2283 */ 2284 static inline bool sk_fullsock(const struct sock *sk) 2285 { 2286 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); 2287 } 2288 2289 /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV 2290 * SYNACK messages can be attached to either ones (depending on SYNCOOKIE) 2291 */ 2292 static inline bool sk_listener(const struct sock *sk) 2293 { 2294 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); 2295 } 2296 2297 /** 2298 * sk_state_load - read sk->sk_state for lockless contexts 2299 * @sk: socket pointer 2300 * 2301 * Paired with sk_state_store(). Used in places we do not hold socket lock : 2302 * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ... 2303 */ 2304 static inline int sk_state_load(const struct sock *sk) 2305 { 2306 return smp_load_acquire(&sk->sk_state); 2307 } 2308 2309 /** 2310 * sk_state_store - update sk->sk_state 2311 * @sk: socket pointer 2312 * @newstate: new state 2313 * 2314 * Paired with sk_state_load(). Should be used in contexts where 2315 * state change might impact lockless readers. 2316 */ 2317 static inline void sk_state_store(struct sock *sk, int newstate) 2318 { 2319 smp_store_release(&sk->sk_state, newstate); 2320 } 2321 2322 void sock_enable_timestamp(struct sock *sk, int flag); 2323 int sock_get_timestamp(struct sock *, struct timeval __user *); 2324 int sock_get_timestampns(struct sock *, struct timespec __user *); 2325 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, 2326 int type); 2327 2328 bool sk_ns_capable(const struct sock *sk, 2329 struct user_namespace *user_ns, int cap); 2330 bool sk_capable(const struct sock *sk, int cap); 2331 bool sk_net_capable(const struct sock *sk, int cap); 2332 2333 extern __u32 sysctl_wmem_max; 2334 extern __u32 sysctl_rmem_max; 2335 2336 extern int sysctl_tstamp_allow_data; 2337 extern int sysctl_optmem_max; 2338 2339 extern __u32 sysctl_wmem_default; 2340 extern __u32 sysctl_rmem_default; 2341 2342 #endif /* _SOCK_H */ 2343