1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the AF_INET socket handler. 7 * 8 * Version: @(#)sock.h 1.0.4 05/13/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche <flla@stud.uni-sb.de> 14 * 15 * Fixes: 16 * Alan Cox : Volatiles in skbuff pointers. See 17 * skbuff comments. May be overdone, 18 * better to prove they can be removed 19 * than the reverse. 20 * Alan Cox : Added a zapped field for tcp to note 21 * a socket is reset and must stay shut up 22 * Alan Cox : New fields for options 23 * Pauline Middelink : identd support 24 * Alan Cox : Eliminate low level recv/recvfrom 25 * David S. Miller : New socket lookup architecture. 26 * Steve Whitehouse: Default routines for sock_ops 27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made 28 * protinfo be just a void pointer, as the 29 * protocol specific parts were moved to 30 * respective headers and ipv4/v6, etc now 31 * use private slabcaches for its socks 32 * Pedro Hortas : New flags field for socket options 33 * 34 * 35 * This program is free software; you can redistribute it and/or 36 * modify it under the terms of the GNU General Public License 37 * as published by the Free Software Foundation; either version 38 * 2 of the License, or (at your option) any later version. 39 */ 40 #ifndef _SOCK_H 41 #define _SOCK_H 42 43 #include <linux/hardirq.h> 44 #include <linux/kernel.h> 45 #include <linux/list.h> 46 #include <linux/list_nulls.h> 47 #include <linux/timer.h> 48 #include <linux/cache.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/netdevice.h> 52 #include <linux/skbuff.h> /* struct sk_buff */ 53 #include <linux/mm.h> 54 #include <linux/security.h> 55 #include <linux/slab.h> 56 #include <linux/uaccess.h> 57 #include <linux/page_counter.h> 58 #include <linux/memcontrol.h> 59 #include <linux/static_key.h> 60 #include <linux/sched.h> 61 #include <linux/wait.h> 62 #include <linux/cgroup-defs.h> 63 #include <linux/rbtree.h> 64 #include <linux/filter.h> 65 #include <linux/rculist_nulls.h> 66 #include <linux/poll.h> 67 68 #include <linux/atomic.h> 69 #include <linux/refcount.h> 70 #include <net/dst.h> 71 #include <net/checksum.h> 72 #include <net/tcp_states.h> 73 #include <linux/net_tstamp.h> 74 #include <net/smc.h> 75 76 /* 77 * This structure really needs to be cleaned up. 78 * Most of it is for TCP, and not used by any of 79 * the other protocols. 80 */ 81 82 /* Define this to get the SOCK_DBG debugging facility. */ 83 #define SOCK_DEBUGGING 84 #ifdef SOCK_DEBUGGING 85 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 86 printk(KERN_DEBUG msg); } while (0) 87 #else 88 /* Validate arguments and do nothing */ 89 static inline __printf(2, 3) 90 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) 91 { 92 } 93 #endif 94 95 /* This is the per-socket lock. The spinlock provides a synchronization 96 * between user contexts and software interrupt processing, whereas the 97 * mini-semaphore synchronizes multiple users amongst themselves. 98 */ 99 typedef struct { 100 spinlock_t slock; 101 int owned; 102 wait_queue_head_t wq; 103 /* 104 * We express the mutex-alike socket_lock semantics 105 * to the lock validator by explicitly managing 106 * the slock as a lock variant (in addition to 107 * the slock itself): 108 */ 109 #ifdef CONFIG_DEBUG_LOCK_ALLOC 110 struct lockdep_map dep_map; 111 #endif 112 } socket_lock_t; 113 114 struct sock; 115 struct proto; 116 struct net; 117 118 typedef __u32 __bitwise __portpair; 119 typedef __u64 __bitwise __addrpair; 120 121 /** 122 * struct sock_common - minimal network layer representation of sockets 123 * @skc_daddr: Foreign IPv4 addr 124 * @skc_rcv_saddr: Bound local IPv4 addr 125 * @skc_hash: hash value used with various protocol lookup tables 126 * @skc_u16hashes: two u16 hash values used by UDP lookup tables 127 * @skc_dport: placeholder for inet_dport/tw_dport 128 * @skc_num: placeholder for inet_num/tw_num 129 * @skc_family: network address family 130 * @skc_state: Connection state 131 * @skc_reuse: %SO_REUSEADDR setting 132 * @skc_reuseport: %SO_REUSEPORT setting 133 * @skc_bound_dev_if: bound device index if != 0 134 * @skc_bind_node: bind hash linkage for various protocol lookup tables 135 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol 136 * @skc_prot: protocol handlers inside a network family 137 * @skc_net: reference to the network namespace of this socket 138 * @skc_node: main hash linkage for various protocol lookup tables 139 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol 140 * @skc_tx_queue_mapping: tx queue number for this connection 141 * @skc_flags: place holder for sk_flags 142 * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, 143 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings 144 * @skc_incoming_cpu: record/match cpu processing incoming packets 145 * @skc_refcnt: reference count 146 * 147 * This is the minimal network layer representation of sockets, the header 148 * for struct sock and struct inet_timewait_sock. 149 */ 150 struct sock_common { 151 /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned 152 * address on 64bit arches : cf INET_MATCH() 153 */ 154 union { 155 __addrpair skc_addrpair; 156 struct { 157 __be32 skc_daddr; 158 __be32 skc_rcv_saddr; 159 }; 160 }; 161 union { 162 unsigned int skc_hash; 163 __u16 skc_u16hashes[2]; 164 }; 165 /* skc_dport && skc_num must be grouped as well */ 166 union { 167 __portpair skc_portpair; 168 struct { 169 __be16 skc_dport; 170 __u16 skc_num; 171 }; 172 }; 173 174 unsigned short skc_family; 175 volatile unsigned char skc_state; 176 unsigned char skc_reuse:4; 177 unsigned char skc_reuseport:1; 178 unsigned char skc_ipv6only:1; 179 unsigned char skc_net_refcnt:1; 180 int skc_bound_dev_if; 181 union { 182 struct hlist_node skc_bind_node; 183 struct hlist_node skc_portaddr_node; 184 }; 185 struct proto *skc_prot; 186 possible_net_t skc_net; 187 188 #if IS_ENABLED(CONFIG_IPV6) 189 struct in6_addr skc_v6_daddr; 190 struct in6_addr skc_v6_rcv_saddr; 191 #endif 192 193 atomic64_t skc_cookie; 194 195 /* following fields are padding to force 196 * offset(struct sock, sk_refcnt) == 128 on 64bit arches 197 * assuming IPV6 is enabled. We use this padding differently 198 * for different kind of 'sockets' 199 */ 200 union { 201 unsigned long skc_flags; 202 struct sock *skc_listener; /* request_sock */ 203 struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */ 204 }; 205 /* 206 * fields between dontcopy_begin/dontcopy_end 207 * are not copied in sock_copy() 208 */ 209 /* private: */ 210 int skc_dontcopy_begin[0]; 211 /* public: */ 212 union { 213 struct hlist_node skc_node; 214 struct hlist_nulls_node skc_nulls_node; 215 }; 216 int skc_tx_queue_mapping; 217 union { 218 int skc_incoming_cpu; 219 u32 skc_rcv_wnd; 220 u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */ 221 }; 222 223 refcount_t skc_refcnt; 224 /* private: */ 225 int skc_dontcopy_end[0]; 226 union { 227 u32 skc_rxhash; 228 u32 skc_window_clamp; 229 u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */ 230 }; 231 /* public: */ 232 }; 233 234 /** 235 * struct sock - network layer representation of sockets 236 * @__sk_common: shared layout with inet_timewait_sock 237 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 238 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 239 * @sk_lock: synchronizer 240 * @sk_kern_sock: True if sock is using kernel lock classes 241 * @sk_rcvbuf: size of receive buffer in bytes 242 * @sk_wq: sock wait queue and async head 243 * @sk_rx_dst: receive input route used by early demux 244 * @sk_dst_cache: destination cache 245 * @sk_dst_pending_confirm: need to confirm neighbour 246 * @sk_policy: flow policy 247 * @sk_receive_queue: incoming packets 248 * @sk_wmem_alloc: transmit queue bytes committed 249 * @sk_tsq_flags: TCP Small Queues flags 250 * @sk_write_queue: Packet sending queue 251 * @sk_omem_alloc: "o" is "option" or "other" 252 * @sk_wmem_queued: persistent queue size 253 * @sk_forward_alloc: space allocated forward 254 * @sk_napi_id: id of the last napi context to receive data for sk 255 * @sk_ll_usec: usecs to busypoll when there is no data 256 * @sk_allocation: allocation mode 257 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) 258 * @sk_pacing_status: Pacing status (requested, handled by sch_fq) 259 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) 260 * @sk_sndbuf: size of send buffer in bytes 261 * @__sk_flags_offset: empty field used to determine location of bitfield 262 * @sk_padding: unused element for alignment 263 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets 264 * @sk_no_check_rx: allow zero checksum in RX packets 265 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 266 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) 267 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) 268 * @sk_gso_max_size: Maximum GSO segment size to build 269 * @sk_gso_max_segs: Maximum number of GSO segments 270 * @sk_pacing_shift: scaling factor for TCP Small Queues 271 * @sk_lingertime: %SO_LINGER l_linger setting 272 * @sk_backlog: always used with the per-socket spinlock held 273 * @sk_callback_lock: used with the callbacks in the end of this struct 274 * @sk_error_queue: rarely used 275 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, 276 * IPV6_ADDRFORM for instance) 277 * @sk_err: last error 278 * @sk_err_soft: errors that don't cause failure but are the cause of a 279 * persistent failure not just 'timed out' 280 * @sk_drops: raw/udp drops counter 281 * @sk_ack_backlog: current listen backlog 282 * @sk_max_ack_backlog: listen backlog set in listen() 283 * @sk_uid: user id of owner 284 * @sk_priority: %SO_PRIORITY setting 285 * @sk_type: socket type (%SOCK_STREAM, etc) 286 * @sk_protocol: which protocol this socket belongs in this network family 287 * @sk_peer_pid: &struct pid for this socket's peer 288 * @sk_peer_cred: %SO_PEERCRED setting 289 * @sk_rcvlowat: %SO_RCVLOWAT setting 290 * @sk_rcvtimeo: %SO_RCVTIMEO setting 291 * @sk_sndtimeo: %SO_SNDTIMEO setting 292 * @sk_txhash: computed flow hash for use on transmit 293 * @sk_filter: socket filtering instructions 294 * @sk_timer: sock cleanup timer 295 * @sk_stamp: time stamp of last packet received 296 * @sk_tsflags: SO_TIMESTAMPING socket options 297 * @sk_tskey: counter to disambiguate concurrent tstamp requests 298 * @sk_zckey: counter to order MSG_ZEROCOPY notifications 299 * @sk_socket: Identd and reporting IO signals 300 * @sk_user_data: RPC layer private data 301 * @sk_frag: cached page frag 302 * @sk_peek_off: current peek_offset value 303 * @sk_send_head: front of stuff to transmit 304 * @sk_security: used by security modules 305 * @sk_mark: generic packet mark 306 * @sk_cgrp_data: cgroup data for this cgroup 307 * @sk_memcg: this socket's memory cgroup association 308 * @sk_write_pending: a write to stream socket waits to start 309 * @sk_state_change: callback to indicate change in the state of the sock 310 * @sk_data_ready: callback to indicate there is data to be processed 311 * @sk_write_space: callback to indicate there is bf sending space available 312 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) 313 * @sk_backlog_rcv: callback to process the backlog 314 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 315 * @sk_reuseport_cb: reuseport group container 316 * @sk_rcu: used during RCU grace period 317 */ 318 struct sock { 319 /* 320 * Now struct inet_timewait_sock also uses sock_common, so please just 321 * don't add nothing before this first member (__sk_common) --acme 322 */ 323 struct sock_common __sk_common; 324 #define sk_node __sk_common.skc_node 325 #define sk_nulls_node __sk_common.skc_nulls_node 326 #define sk_refcnt __sk_common.skc_refcnt 327 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping 328 329 #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin 330 #define sk_dontcopy_end __sk_common.skc_dontcopy_end 331 #define sk_hash __sk_common.skc_hash 332 #define sk_portpair __sk_common.skc_portpair 333 #define sk_num __sk_common.skc_num 334 #define sk_dport __sk_common.skc_dport 335 #define sk_addrpair __sk_common.skc_addrpair 336 #define sk_daddr __sk_common.skc_daddr 337 #define sk_rcv_saddr __sk_common.skc_rcv_saddr 338 #define sk_family __sk_common.skc_family 339 #define sk_state __sk_common.skc_state 340 #define sk_reuse __sk_common.skc_reuse 341 #define sk_reuseport __sk_common.skc_reuseport 342 #define sk_ipv6only __sk_common.skc_ipv6only 343 #define sk_net_refcnt __sk_common.skc_net_refcnt 344 #define sk_bound_dev_if __sk_common.skc_bound_dev_if 345 #define sk_bind_node __sk_common.skc_bind_node 346 #define sk_prot __sk_common.skc_prot 347 #define sk_net __sk_common.skc_net 348 #define sk_v6_daddr __sk_common.skc_v6_daddr 349 #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr 350 #define sk_cookie __sk_common.skc_cookie 351 #define sk_incoming_cpu __sk_common.skc_incoming_cpu 352 #define sk_flags __sk_common.skc_flags 353 #define sk_rxhash __sk_common.skc_rxhash 354 355 socket_lock_t sk_lock; 356 atomic_t sk_drops; 357 int sk_rcvlowat; 358 struct sk_buff_head sk_error_queue; 359 struct sk_buff_head sk_receive_queue; 360 /* 361 * The backlog queue is special, it is always used with 362 * the per-socket spinlock held and requires low latency 363 * access. Therefore we special case it's implementation. 364 * Note : rmem_alloc is in this structure to fill a hole 365 * on 64bit arches, not because its logically part of 366 * backlog. 367 */ 368 struct { 369 atomic_t rmem_alloc; 370 int len; 371 struct sk_buff *head; 372 struct sk_buff *tail; 373 } sk_backlog; 374 #define sk_rmem_alloc sk_backlog.rmem_alloc 375 376 int sk_forward_alloc; 377 #ifdef CONFIG_NET_RX_BUSY_POLL 378 unsigned int sk_ll_usec; 379 /* ===== mostly read cache line ===== */ 380 unsigned int sk_napi_id; 381 #endif 382 int sk_rcvbuf; 383 384 struct sk_filter __rcu *sk_filter; 385 union { 386 struct socket_wq __rcu *sk_wq; 387 struct socket_wq *sk_wq_raw; 388 }; 389 #ifdef CONFIG_XFRM 390 struct xfrm_policy __rcu *sk_policy[2]; 391 #endif 392 struct dst_entry *sk_rx_dst; 393 struct dst_entry __rcu *sk_dst_cache; 394 atomic_t sk_omem_alloc; 395 int sk_sndbuf; 396 397 /* ===== cache line for TX ===== */ 398 int sk_wmem_queued; 399 refcount_t sk_wmem_alloc; 400 unsigned long sk_tsq_flags; 401 union { 402 struct sk_buff *sk_send_head; 403 struct rb_root tcp_rtx_queue; 404 }; 405 struct sk_buff_head sk_write_queue; 406 __s32 sk_peek_off; 407 int sk_write_pending; 408 __u32 sk_dst_pending_confirm; 409 u32 sk_pacing_status; /* see enum sk_pacing */ 410 long sk_sndtimeo; 411 struct timer_list sk_timer; 412 __u32 sk_priority; 413 __u32 sk_mark; 414 u32 sk_pacing_rate; /* bytes per second */ 415 u32 sk_max_pacing_rate; 416 struct page_frag sk_frag; 417 netdev_features_t sk_route_caps; 418 netdev_features_t sk_route_nocaps; 419 int sk_gso_type; 420 unsigned int sk_gso_max_size; 421 gfp_t sk_allocation; 422 __u32 sk_txhash; 423 424 /* 425 * Because of non atomicity rules, all 426 * changes are protected by socket lock. 427 */ 428 unsigned int __sk_flags_offset[0]; 429 #ifdef __BIG_ENDIAN_BITFIELD 430 #define SK_FL_PROTO_SHIFT 16 431 #define SK_FL_PROTO_MASK 0x00ff0000 432 433 #define SK_FL_TYPE_SHIFT 0 434 #define SK_FL_TYPE_MASK 0x0000ffff 435 #else 436 #define SK_FL_PROTO_SHIFT 8 437 #define SK_FL_PROTO_MASK 0x0000ff00 438 439 #define SK_FL_TYPE_SHIFT 16 440 #define SK_FL_TYPE_MASK 0xffff0000 441 #endif 442 443 unsigned int sk_padding : 1, 444 sk_kern_sock : 1, 445 sk_no_check_tx : 1, 446 sk_no_check_rx : 1, 447 sk_userlocks : 4, 448 sk_protocol : 8, 449 sk_type : 16; 450 #define SK_PROTOCOL_MAX U8_MAX 451 u16 sk_gso_max_segs; 452 u8 sk_pacing_shift; 453 unsigned long sk_lingertime; 454 struct proto *sk_prot_creator; 455 rwlock_t sk_callback_lock; 456 int sk_err, 457 sk_err_soft; 458 u32 sk_ack_backlog; 459 u32 sk_max_ack_backlog; 460 kuid_t sk_uid; 461 struct pid *sk_peer_pid; 462 const struct cred *sk_peer_cred; 463 long sk_rcvtimeo; 464 ktime_t sk_stamp; 465 u16 sk_tsflags; 466 u8 sk_shutdown; 467 u32 sk_tskey; 468 atomic_t sk_zckey; 469 struct socket *sk_socket; 470 void *sk_user_data; 471 #ifdef CONFIG_SECURITY 472 void *sk_security; 473 #endif 474 struct sock_cgroup_data sk_cgrp_data; 475 struct mem_cgroup *sk_memcg; 476 void (*sk_state_change)(struct sock *sk); 477 void (*sk_data_ready)(struct sock *sk); 478 void (*sk_write_space)(struct sock *sk); 479 void (*sk_error_report)(struct sock *sk); 480 int (*sk_backlog_rcv)(struct sock *sk, 481 struct sk_buff *skb); 482 void (*sk_destruct)(struct sock *sk); 483 struct sock_reuseport __rcu *sk_reuseport_cb; 484 struct rcu_head sk_rcu; 485 }; 486 487 enum sk_pacing { 488 SK_PACING_NONE = 0, 489 SK_PACING_NEEDED = 1, 490 SK_PACING_FQ = 2, 491 }; 492 493 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) 494 495 #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) 496 #define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) 497 498 /* 499 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK 500 * or not whether his port will be reused by someone else. SK_FORCE_REUSE 501 * on a socket means that the socket will reuse everybody else's port 502 * without looking at the other's sk_reuse value. 503 */ 504 505 #define SK_NO_REUSE 0 506 #define SK_CAN_REUSE 1 507 #define SK_FORCE_REUSE 2 508 509 int sk_set_peek_off(struct sock *sk, int val); 510 511 static inline int sk_peek_offset(struct sock *sk, int flags) 512 { 513 if (unlikely(flags & MSG_PEEK)) { 514 return READ_ONCE(sk->sk_peek_off); 515 } 516 517 return 0; 518 } 519 520 static inline void sk_peek_offset_bwd(struct sock *sk, int val) 521 { 522 s32 off = READ_ONCE(sk->sk_peek_off); 523 524 if (unlikely(off >= 0)) { 525 off = max_t(s32, off - val, 0); 526 WRITE_ONCE(sk->sk_peek_off, off); 527 } 528 } 529 530 static inline void sk_peek_offset_fwd(struct sock *sk, int val) 531 { 532 sk_peek_offset_bwd(sk, -val); 533 } 534 535 /* 536 * Hashed lists helper routines 537 */ 538 static inline struct sock *sk_entry(const struct hlist_node *node) 539 { 540 return hlist_entry(node, struct sock, sk_node); 541 } 542 543 static inline struct sock *__sk_head(const struct hlist_head *head) 544 { 545 return hlist_entry(head->first, struct sock, sk_node); 546 } 547 548 static inline struct sock *sk_head(const struct hlist_head *head) 549 { 550 return hlist_empty(head) ? NULL : __sk_head(head); 551 } 552 553 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) 554 { 555 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); 556 } 557 558 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) 559 { 560 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); 561 } 562 563 static inline struct sock *sk_next(const struct sock *sk) 564 { 565 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node); 566 } 567 568 static inline struct sock *sk_nulls_next(const struct sock *sk) 569 { 570 return (!is_a_nulls(sk->sk_nulls_node.next)) ? 571 hlist_nulls_entry(sk->sk_nulls_node.next, 572 struct sock, sk_nulls_node) : 573 NULL; 574 } 575 576 static inline bool sk_unhashed(const struct sock *sk) 577 { 578 return hlist_unhashed(&sk->sk_node); 579 } 580 581 static inline bool sk_hashed(const struct sock *sk) 582 { 583 return !sk_unhashed(sk); 584 } 585 586 static inline void sk_node_init(struct hlist_node *node) 587 { 588 node->pprev = NULL; 589 } 590 591 static inline void sk_nulls_node_init(struct hlist_nulls_node *node) 592 { 593 node->pprev = NULL; 594 } 595 596 static inline void __sk_del_node(struct sock *sk) 597 { 598 __hlist_del(&sk->sk_node); 599 } 600 601 /* NB: equivalent to hlist_del_init_rcu */ 602 static inline bool __sk_del_node_init(struct sock *sk) 603 { 604 if (sk_hashed(sk)) { 605 __sk_del_node(sk); 606 sk_node_init(&sk->sk_node); 607 return true; 608 } 609 return false; 610 } 611 612 /* Grab socket reference count. This operation is valid only 613 when sk is ALREADY grabbed f.e. it is found in hash table 614 or a list and the lookup is made under lock preventing hash table 615 modifications. 616 */ 617 618 static __always_inline void sock_hold(struct sock *sk) 619 { 620 refcount_inc(&sk->sk_refcnt); 621 } 622 623 /* Ungrab socket in the context, which assumes that socket refcnt 624 cannot hit zero, f.e. it is true in context of any socketcall. 625 */ 626 static __always_inline void __sock_put(struct sock *sk) 627 { 628 refcount_dec(&sk->sk_refcnt); 629 } 630 631 static inline bool sk_del_node_init(struct sock *sk) 632 { 633 bool rc = __sk_del_node_init(sk); 634 635 if (rc) { 636 /* paranoid for a while -acme */ 637 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 638 __sock_put(sk); 639 } 640 return rc; 641 } 642 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) 643 644 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) 645 { 646 if (sk_hashed(sk)) { 647 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); 648 return true; 649 } 650 return false; 651 } 652 653 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) 654 { 655 bool rc = __sk_nulls_del_node_init_rcu(sk); 656 657 if (rc) { 658 /* paranoid for a while -acme */ 659 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 660 __sock_put(sk); 661 } 662 return rc; 663 } 664 665 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) 666 { 667 hlist_add_head(&sk->sk_node, list); 668 } 669 670 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) 671 { 672 sock_hold(sk); 673 __sk_add_node(sk, list); 674 } 675 676 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) 677 { 678 sock_hold(sk); 679 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 680 sk->sk_family == AF_INET6) 681 hlist_add_tail_rcu(&sk->sk_node, list); 682 else 683 hlist_add_head_rcu(&sk->sk_node, list); 684 } 685 686 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 687 { 688 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 689 } 690 691 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 692 { 693 sock_hold(sk); 694 __sk_nulls_add_node_rcu(sk, list); 695 } 696 697 static inline void __sk_del_bind_node(struct sock *sk) 698 { 699 __hlist_del(&sk->sk_bind_node); 700 } 701 702 static inline void sk_add_bind_node(struct sock *sk, 703 struct hlist_head *list) 704 { 705 hlist_add_head(&sk->sk_bind_node, list); 706 } 707 708 #define sk_for_each(__sk, list) \ 709 hlist_for_each_entry(__sk, list, sk_node) 710 #define sk_for_each_rcu(__sk, list) \ 711 hlist_for_each_entry_rcu(__sk, list, sk_node) 712 #define sk_nulls_for_each(__sk, node, list) \ 713 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 714 #define sk_nulls_for_each_rcu(__sk, node, list) \ 715 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) 716 #define sk_for_each_from(__sk) \ 717 hlist_for_each_entry_from(__sk, sk_node) 718 #define sk_nulls_for_each_from(__sk, node) \ 719 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 720 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 721 #define sk_for_each_safe(__sk, tmp, list) \ 722 hlist_for_each_entry_safe(__sk, tmp, list, sk_node) 723 #define sk_for_each_bound(__sk, list) \ 724 hlist_for_each_entry(__sk, list, sk_bind_node) 725 726 /** 727 * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset 728 * @tpos: the type * to use as a loop cursor. 729 * @pos: the &struct hlist_node to use as a loop cursor. 730 * @head: the head for your list. 731 * @offset: offset of hlist_node within the struct. 732 * 733 */ 734 #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \ 735 for (pos = rcu_dereference(hlist_first_rcu(head)); \ 736 pos != NULL && \ 737 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \ 738 pos = rcu_dereference(hlist_next_rcu(pos))) 739 740 static inline struct user_namespace *sk_user_ns(struct sock *sk) 741 { 742 /* Careful only use this in a context where these parameters 743 * can not change and must all be valid, such as recvmsg from 744 * userspace. 745 */ 746 return sk->sk_socket->file->f_cred->user_ns; 747 } 748 749 /* Sock flags */ 750 enum sock_flags { 751 SOCK_DEAD, 752 SOCK_DONE, 753 SOCK_URGINLINE, 754 SOCK_KEEPOPEN, 755 SOCK_LINGER, 756 SOCK_DESTROY, 757 SOCK_BROADCAST, 758 SOCK_TIMESTAMP, 759 SOCK_ZAPPED, 760 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 761 SOCK_DBG, /* %SO_DEBUG setting */ 762 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 763 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ 764 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 765 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 766 SOCK_MEMALLOC, /* VM depends on this socket for swapping */ 767 SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */ 768 SOCK_FASYNC, /* fasync() active */ 769 SOCK_RXQ_OVFL, 770 SOCK_ZEROCOPY, /* buffers from userspace */ 771 SOCK_WIFI_STATUS, /* push wifi status to userspace */ 772 SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS. 773 * Will use last 4 bytes of packet sent from 774 * user-space instead. 775 */ 776 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ 777 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ 778 SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ 779 }; 780 781 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 782 783 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 784 { 785 nsk->sk_flags = osk->sk_flags; 786 } 787 788 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 789 { 790 __set_bit(flag, &sk->sk_flags); 791 } 792 793 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) 794 { 795 __clear_bit(flag, &sk->sk_flags); 796 } 797 798 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) 799 { 800 return test_bit(flag, &sk->sk_flags); 801 } 802 803 #ifdef CONFIG_NET 804 extern struct static_key memalloc_socks; 805 static inline int sk_memalloc_socks(void) 806 { 807 return static_key_false(&memalloc_socks); 808 } 809 #else 810 811 static inline int sk_memalloc_socks(void) 812 { 813 return 0; 814 } 815 816 #endif 817 818 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) 819 { 820 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); 821 } 822 823 static inline void sk_acceptq_removed(struct sock *sk) 824 { 825 sk->sk_ack_backlog--; 826 } 827 828 static inline void sk_acceptq_added(struct sock *sk) 829 { 830 sk->sk_ack_backlog++; 831 } 832 833 static inline bool sk_acceptq_is_full(const struct sock *sk) 834 { 835 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 836 } 837 838 /* 839 * Compute minimal free write space needed to queue new packets. 840 */ 841 static inline int sk_stream_min_wspace(const struct sock *sk) 842 { 843 return sk->sk_wmem_queued >> 1; 844 } 845 846 static inline int sk_stream_wspace(const struct sock *sk) 847 { 848 return sk->sk_sndbuf - sk->sk_wmem_queued; 849 } 850 851 void sk_stream_write_space(struct sock *sk); 852 853 /* OOB backlog add */ 854 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) 855 { 856 /* dont let skb dst not refcounted, we are going to leave rcu lock */ 857 skb_dst_force(skb); 858 859 if (!sk->sk_backlog.tail) 860 sk->sk_backlog.head = skb; 861 else 862 sk->sk_backlog.tail->next = skb; 863 864 sk->sk_backlog.tail = skb; 865 skb->next = NULL; 866 } 867 868 /* 869 * Take into account size of receive queue and backlog queue 870 * Do not take into account this skb truesize, 871 * to allow even a single big packet to come. 872 */ 873 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) 874 { 875 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 876 877 return qsize > limit; 878 } 879 880 /* The per-socket spinlock must be held here. */ 881 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, 882 unsigned int limit) 883 { 884 if (sk_rcvqueues_full(sk, limit)) 885 return -ENOBUFS; 886 887 /* 888 * If the skb was allocated from pfmemalloc reserves, only 889 * allow SOCK_MEMALLOC sockets to use it as this socket is 890 * helping free memory 891 */ 892 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) 893 return -ENOMEM; 894 895 __sk_add_backlog(sk, skb); 896 sk->sk_backlog.len += skb->truesize; 897 return 0; 898 } 899 900 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 901 902 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 903 { 904 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 905 return __sk_backlog_rcv(sk, skb); 906 907 return sk->sk_backlog_rcv(sk, skb); 908 } 909 910 static inline void sk_incoming_cpu_update(struct sock *sk) 911 { 912 int cpu = raw_smp_processor_id(); 913 914 if (unlikely(sk->sk_incoming_cpu != cpu)) 915 sk->sk_incoming_cpu = cpu; 916 } 917 918 static inline void sock_rps_record_flow_hash(__u32 hash) 919 { 920 #ifdef CONFIG_RPS 921 struct rps_sock_flow_table *sock_flow_table; 922 923 rcu_read_lock(); 924 sock_flow_table = rcu_dereference(rps_sock_flow_table); 925 rps_record_sock_flow(sock_flow_table, hash); 926 rcu_read_unlock(); 927 #endif 928 } 929 930 static inline void sock_rps_record_flow(const struct sock *sk) 931 { 932 #ifdef CONFIG_RPS 933 if (static_key_false(&rfs_needed)) { 934 /* Reading sk->sk_rxhash might incur an expensive cache line 935 * miss. 936 * 937 * TCP_ESTABLISHED does cover almost all states where RFS 938 * might be useful, and is cheaper [1] than testing : 939 * IPv4: inet_sk(sk)->inet_daddr 940 * IPv6: ipv6_addr_any(&sk->sk_v6_daddr) 941 * OR an additional socket flag 942 * [1] : sk_state and sk_prot are in the same cache line. 943 */ 944 if (sk->sk_state == TCP_ESTABLISHED) 945 sock_rps_record_flow_hash(sk->sk_rxhash); 946 } 947 #endif 948 } 949 950 static inline void sock_rps_save_rxhash(struct sock *sk, 951 const struct sk_buff *skb) 952 { 953 #ifdef CONFIG_RPS 954 if (unlikely(sk->sk_rxhash != skb->hash)) 955 sk->sk_rxhash = skb->hash; 956 #endif 957 } 958 959 static inline void sock_rps_reset_rxhash(struct sock *sk) 960 { 961 #ifdef CONFIG_RPS 962 sk->sk_rxhash = 0; 963 #endif 964 } 965 966 #define sk_wait_event(__sk, __timeo, __condition, __wait) \ 967 ({ int __rc; \ 968 release_sock(__sk); \ 969 __rc = __condition; \ 970 if (!__rc) { \ 971 *(__timeo) = wait_woken(__wait, \ 972 TASK_INTERRUPTIBLE, \ 973 *(__timeo)); \ 974 } \ 975 sched_annotate_sleep(); \ 976 lock_sock(__sk); \ 977 __rc = __condition; \ 978 __rc; \ 979 }) 980 981 int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 982 int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 983 void sk_stream_wait_close(struct sock *sk, long timeo_p); 984 int sk_stream_error(struct sock *sk, int flags, int err); 985 void sk_stream_kill_queues(struct sock *sk); 986 void sk_set_memalloc(struct sock *sk); 987 void sk_clear_memalloc(struct sock *sk); 988 989 void __sk_flush_backlog(struct sock *sk); 990 991 static inline bool sk_flush_backlog(struct sock *sk) 992 { 993 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { 994 __sk_flush_backlog(sk); 995 return true; 996 } 997 return false; 998 } 999 1000 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb); 1001 1002 struct request_sock_ops; 1003 struct timewait_sock_ops; 1004 struct inet_hashinfo; 1005 struct raw_hashinfo; 1006 struct smc_hashinfo; 1007 struct module; 1008 1009 /* 1010 * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes 1011 * un-modified. Special care is taken when initializing object to zero. 1012 */ 1013 static inline void sk_prot_clear_nulls(struct sock *sk, int size) 1014 { 1015 if (offsetof(struct sock, sk_node.next) != 0) 1016 memset(sk, 0, offsetof(struct sock, sk_node.next)); 1017 memset(&sk->sk_node.pprev, 0, 1018 size - offsetof(struct sock, sk_node.pprev)); 1019 } 1020 1021 /* Networking protocol blocks we attach to sockets. 1022 * socket layer -> transport layer interface 1023 */ 1024 struct proto { 1025 void (*close)(struct sock *sk, 1026 long timeout); 1027 int (*connect)(struct sock *sk, 1028 struct sockaddr *uaddr, 1029 int addr_len); 1030 int (*disconnect)(struct sock *sk, int flags); 1031 1032 struct sock * (*accept)(struct sock *sk, int flags, int *err, 1033 bool kern); 1034 1035 int (*ioctl)(struct sock *sk, int cmd, 1036 unsigned long arg); 1037 int (*init)(struct sock *sk); 1038 void (*destroy)(struct sock *sk); 1039 void (*shutdown)(struct sock *sk, int how); 1040 int (*setsockopt)(struct sock *sk, int level, 1041 int optname, char __user *optval, 1042 unsigned int optlen); 1043 int (*getsockopt)(struct sock *sk, int level, 1044 int optname, char __user *optval, 1045 int __user *option); 1046 void (*keepalive)(struct sock *sk, int valbool); 1047 #ifdef CONFIG_COMPAT 1048 int (*compat_setsockopt)(struct sock *sk, 1049 int level, 1050 int optname, char __user *optval, 1051 unsigned int optlen); 1052 int (*compat_getsockopt)(struct sock *sk, 1053 int level, 1054 int optname, char __user *optval, 1055 int __user *option); 1056 int (*compat_ioctl)(struct sock *sk, 1057 unsigned int cmd, unsigned long arg); 1058 #endif 1059 int (*sendmsg)(struct sock *sk, struct msghdr *msg, 1060 size_t len); 1061 int (*recvmsg)(struct sock *sk, struct msghdr *msg, 1062 size_t len, int noblock, int flags, 1063 int *addr_len); 1064 int (*sendpage)(struct sock *sk, struct page *page, 1065 int offset, size_t size, int flags); 1066 int (*bind)(struct sock *sk, 1067 struct sockaddr *uaddr, int addr_len); 1068 1069 int (*backlog_rcv) (struct sock *sk, 1070 struct sk_buff *skb); 1071 1072 void (*release_cb)(struct sock *sk); 1073 1074 /* Keeping track of sk's, looking them up, and port selection methods. */ 1075 int (*hash)(struct sock *sk); 1076 void (*unhash)(struct sock *sk); 1077 void (*rehash)(struct sock *sk); 1078 int (*get_port)(struct sock *sk, unsigned short snum); 1079 1080 /* Keeping track of sockets in use */ 1081 #ifdef CONFIG_PROC_FS 1082 unsigned int inuse_idx; 1083 #endif 1084 1085 bool (*stream_memory_free)(const struct sock *sk); 1086 /* Memory pressure */ 1087 void (*enter_memory_pressure)(struct sock *sk); 1088 void (*leave_memory_pressure)(struct sock *sk); 1089 atomic_long_t *memory_allocated; /* Current allocated memory. */ 1090 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 1091 /* 1092 * Pressure flag: try to collapse. 1093 * Technical note: it is used by multiple contexts non atomically. 1094 * All the __sk_mem_schedule() is of this nature: accounting 1095 * is strict, actions are advisory and have some latency. 1096 */ 1097 unsigned long *memory_pressure; 1098 long *sysctl_mem; 1099 1100 int *sysctl_wmem; 1101 int *sysctl_rmem; 1102 u32 sysctl_wmem_offset; 1103 u32 sysctl_rmem_offset; 1104 1105 int max_header; 1106 bool no_autobind; 1107 1108 struct kmem_cache *slab; 1109 unsigned int obj_size; 1110 slab_flags_t slab_flags; 1111 1112 struct percpu_counter *orphan_count; 1113 1114 struct request_sock_ops *rsk_prot; 1115 struct timewait_sock_ops *twsk_prot; 1116 1117 union { 1118 struct inet_hashinfo *hashinfo; 1119 struct udp_table *udp_table; 1120 struct raw_hashinfo *raw_hash; 1121 struct smc_hashinfo *smc_hash; 1122 } h; 1123 1124 struct module *owner; 1125 1126 char name[32]; 1127 1128 struct list_head node; 1129 #ifdef SOCK_REFCNT_DEBUG 1130 atomic_t socks; 1131 #endif 1132 int (*diag_destroy)(struct sock *sk, int err); 1133 } __randomize_layout; 1134 1135 int proto_register(struct proto *prot, int alloc_slab); 1136 void proto_unregister(struct proto *prot); 1137 1138 #ifdef SOCK_REFCNT_DEBUG 1139 static inline void sk_refcnt_debug_inc(struct sock *sk) 1140 { 1141 atomic_inc(&sk->sk_prot->socks); 1142 } 1143 1144 static inline void sk_refcnt_debug_dec(struct sock *sk) 1145 { 1146 atomic_dec(&sk->sk_prot->socks); 1147 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n", 1148 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 1149 } 1150 1151 static inline void sk_refcnt_debug_release(const struct sock *sk) 1152 { 1153 if (refcount_read(&sk->sk_refcnt) != 1) 1154 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 1155 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt)); 1156 } 1157 #else /* SOCK_REFCNT_DEBUG */ 1158 #define sk_refcnt_debug_inc(sk) do { } while (0) 1159 #define sk_refcnt_debug_dec(sk) do { } while (0) 1160 #define sk_refcnt_debug_release(sk) do { } while (0) 1161 #endif /* SOCK_REFCNT_DEBUG */ 1162 1163 static inline bool sk_stream_memory_free(const struct sock *sk) 1164 { 1165 if (sk->sk_wmem_queued >= sk->sk_sndbuf) 1166 return false; 1167 1168 return sk->sk_prot->stream_memory_free ? 1169 sk->sk_prot->stream_memory_free(sk) : true; 1170 } 1171 1172 static inline bool sk_stream_is_writeable(const struct sock *sk) 1173 { 1174 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && 1175 sk_stream_memory_free(sk); 1176 } 1177 1178 static inline int sk_under_cgroup_hierarchy(struct sock *sk, 1179 struct cgroup *ancestor) 1180 { 1181 #ifdef CONFIG_SOCK_CGROUP_DATA 1182 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), 1183 ancestor); 1184 #else 1185 return -ENOTSUPP; 1186 #endif 1187 } 1188 1189 static inline bool sk_has_memory_pressure(const struct sock *sk) 1190 { 1191 return sk->sk_prot->memory_pressure != NULL; 1192 } 1193 1194 static inline bool sk_under_memory_pressure(const struct sock *sk) 1195 { 1196 if (!sk->sk_prot->memory_pressure) 1197 return false; 1198 1199 if (mem_cgroup_sockets_enabled && sk->sk_memcg && 1200 mem_cgroup_under_socket_pressure(sk->sk_memcg)) 1201 return true; 1202 1203 return !!*sk->sk_prot->memory_pressure; 1204 } 1205 1206 static inline long 1207 sk_memory_allocated(const struct sock *sk) 1208 { 1209 return atomic_long_read(sk->sk_prot->memory_allocated); 1210 } 1211 1212 static inline long 1213 sk_memory_allocated_add(struct sock *sk, int amt) 1214 { 1215 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated); 1216 } 1217 1218 static inline void 1219 sk_memory_allocated_sub(struct sock *sk, int amt) 1220 { 1221 atomic_long_sub(amt, sk->sk_prot->memory_allocated); 1222 } 1223 1224 static inline void sk_sockets_allocated_dec(struct sock *sk) 1225 { 1226 percpu_counter_dec(sk->sk_prot->sockets_allocated); 1227 } 1228 1229 static inline void sk_sockets_allocated_inc(struct sock *sk) 1230 { 1231 percpu_counter_inc(sk->sk_prot->sockets_allocated); 1232 } 1233 1234 static inline int 1235 sk_sockets_allocated_read_positive(struct sock *sk) 1236 { 1237 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); 1238 } 1239 1240 static inline int 1241 proto_sockets_allocated_sum_positive(struct proto *prot) 1242 { 1243 return percpu_counter_sum_positive(prot->sockets_allocated); 1244 } 1245 1246 static inline long 1247 proto_memory_allocated(struct proto *prot) 1248 { 1249 return atomic_long_read(prot->memory_allocated); 1250 } 1251 1252 static inline bool 1253 proto_memory_pressure(struct proto *prot) 1254 { 1255 if (!prot->memory_pressure) 1256 return false; 1257 return !!*prot->memory_pressure; 1258 } 1259 1260 1261 #ifdef CONFIG_PROC_FS 1262 /* Called with local bh disabled */ 1263 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); 1264 int sock_prot_inuse_get(struct net *net, struct proto *proto); 1265 int sock_inuse_get(struct net *net); 1266 #else 1267 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, 1268 int inc) 1269 { 1270 } 1271 #endif 1272 1273 1274 /* With per-bucket locks this operation is not-atomic, so that 1275 * this version is not worse. 1276 */ 1277 static inline int __sk_prot_rehash(struct sock *sk) 1278 { 1279 sk->sk_prot->unhash(sk); 1280 return sk->sk_prot->hash(sk); 1281 } 1282 1283 /* About 10 seconds */ 1284 #define SOCK_DESTROY_TIME (10*HZ) 1285 1286 /* Sockets 0-1023 can't be bound to unless you are superuser */ 1287 #define PROT_SOCK 1024 1288 1289 #define SHUTDOWN_MASK 3 1290 #define RCV_SHUTDOWN 1 1291 #define SEND_SHUTDOWN 2 1292 1293 #define SOCK_SNDBUF_LOCK 1 1294 #define SOCK_RCVBUF_LOCK 2 1295 #define SOCK_BINDADDR_LOCK 4 1296 #define SOCK_BINDPORT_LOCK 8 1297 1298 struct socket_alloc { 1299 struct socket socket; 1300 struct inode vfs_inode; 1301 }; 1302 1303 static inline struct socket *SOCKET_I(struct inode *inode) 1304 { 1305 return &container_of(inode, struct socket_alloc, vfs_inode)->socket; 1306 } 1307 1308 static inline struct inode *SOCK_INODE(struct socket *socket) 1309 { 1310 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 1311 } 1312 1313 /* 1314 * Functions for memory accounting 1315 */ 1316 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind); 1317 int __sk_mem_schedule(struct sock *sk, int size, int kind); 1318 void __sk_mem_reduce_allocated(struct sock *sk, int amount); 1319 void __sk_mem_reclaim(struct sock *sk, int amount); 1320 1321 /* We used to have PAGE_SIZE here, but systems with 64KB pages 1322 * do not necessarily have 16x time more memory than 4KB ones. 1323 */ 1324 #define SK_MEM_QUANTUM 4096 1325 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) 1326 #define SK_MEM_SEND 0 1327 #define SK_MEM_RECV 1 1328 1329 /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */ 1330 static inline long sk_prot_mem_limits(const struct sock *sk, int index) 1331 { 1332 long val = sk->sk_prot->sysctl_mem[index]; 1333 1334 #if PAGE_SIZE > SK_MEM_QUANTUM 1335 val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT; 1336 #elif PAGE_SIZE < SK_MEM_QUANTUM 1337 val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT; 1338 #endif 1339 return val; 1340 } 1341 1342 static inline int sk_mem_pages(int amt) 1343 { 1344 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; 1345 } 1346 1347 static inline bool sk_has_account(struct sock *sk) 1348 { 1349 /* return true if protocol supports memory accounting */ 1350 return !!sk->sk_prot->memory_allocated; 1351 } 1352 1353 static inline bool sk_wmem_schedule(struct sock *sk, int size) 1354 { 1355 if (!sk_has_account(sk)) 1356 return true; 1357 return size <= sk->sk_forward_alloc || 1358 __sk_mem_schedule(sk, size, SK_MEM_SEND); 1359 } 1360 1361 static inline bool 1362 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) 1363 { 1364 if (!sk_has_account(sk)) 1365 return true; 1366 return size<= sk->sk_forward_alloc || 1367 __sk_mem_schedule(sk, size, SK_MEM_RECV) || 1368 skb_pfmemalloc(skb); 1369 } 1370 1371 static inline void sk_mem_reclaim(struct sock *sk) 1372 { 1373 if (!sk_has_account(sk)) 1374 return; 1375 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) 1376 __sk_mem_reclaim(sk, sk->sk_forward_alloc); 1377 } 1378 1379 static inline void sk_mem_reclaim_partial(struct sock *sk) 1380 { 1381 if (!sk_has_account(sk)) 1382 return; 1383 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) 1384 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); 1385 } 1386 1387 static inline void sk_mem_charge(struct sock *sk, int size) 1388 { 1389 if (!sk_has_account(sk)) 1390 return; 1391 sk->sk_forward_alloc -= size; 1392 } 1393 1394 static inline void sk_mem_uncharge(struct sock *sk, int size) 1395 { 1396 if (!sk_has_account(sk)) 1397 return; 1398 sk->sk_forward_alloc += size; 1399 1400 /* Avoid a possible overflow. 1401 * TCP send queues can make this happen, if sk_mem_reclaim() 1402 * is not called and more than 2 GBytes are released at once. 1403 * 1404 * If we reach 2 MBytes, reclaim 1 MBytes right now, there is 1405 * no need to hold that much forward allocation anyway. 1406 */ 1407 if (unlikely(sk->sk_forward_alloc >= 1 << 21)) 1408 __sk_mem_reclaim(sk, 1 << 20); 1409 } 1410 1411 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 1412 { 1413 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1414 sk->sk_wmem_queued -= skb->truesize; 1415 sk_mem_uncharge(sk, skb->truesize); 1416 __kfree_skb(skb); 1417 } 1418 1419 static inline void sock_release_ownership(struct sock *sk) 1420 { 1421 if (sk->sk_lock.owned) { 1422 sk->sk_lock.owned = 0; 1423 1424 /* The sk_lock has mutex_unlock() semantics: */ 1425 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 1426 } 1427 } 1428 1429 /* 1430 * Macro so as to not evaluate some arguments when 1431 * lockdep is not enabled. 1432 * 1433 * Mark both the sk_lock and the sk_lock.slock as a 1434 * per-address-family lock class. 1435 */ 1436 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 1437 do { \ 1438 sk->sk_lock.owned = 0; \ 1439 init_waitqueue_head(&sk->sk_lock.wq); \ 1440 spin_lock_init(&(sk)->sk_lock.slock); \ 1441 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ 1442 sizeof((sk)->sk_lock)); \ 1443 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ 1444 (skey), (sname)); \ 1445 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ 1446 } while (0) 1447 1448 #ifdef CONFIG_LOCKDEP 1449 static inline bool lockdep_sock_is_held(const struct sock *csk) 1450 { 1451 struct sock *sk = (struct sock *)csk; 1452 1453 return lockdep_is_held(&sk->sk_lock) || 1454 lockdep_is_held(&sk->sk_lock.slock); 1455 } 1456 #endif 1457 1458 void lock_sock_nested(struct sock *sk, int subclass); 1459 1460 static inline void lock_sock(struct sock *sk) 1461 { 1462 lock_sock_nested(sk, 0); 1463 } 1464 1465 void release_sock(struct sock *sk); 1466 1467 /* BH context may only use the following locking interface. */ 1468 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 1469 #define bh_lock_sock_nested(__sk) \ 1470 spin_lock_nested(&((__sk)->sk_lock.slock), \ 1471 SINGLE_DEPTH_NESTING) 1472 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 1473 1474 bool lock_sock_fast(struct sock *sk); 1475 /** 1476 * unlock_sock_fast - complement of lock_sock_fast 1477 * @sk: socket 1478 * @slow: slow mode 1479 * 1480 * fast unlock socket for user context. 1481 * If slow mode is on, we call regular release_sock() 1482 */ 1483 static inline void unlock_sock_fast(struct sock *sk, bool slow) 1484 { 1485 if (slow) 1486 release_sock(sk); 1487 else 1488 spin_unlock_bh(&sk->sk_lock.slock); 1489 } 1490 1491 /* Used by processes to "lock" a socket state, so that 1492 * interrupts and bottom half handlers won't change it 1493 * from under us. It essentially blocks any incoming 1494 * packets, so that we won't get any new data or any 1495 * packets that change the state of the socket. 1496 * 1497 * While locked, BH processing will add new packets to 1498 * the backlog queue. This queue is processed by the 1499 * owner of the socket lock right before it is released. 1500 * 1501 * Since ~2.3.5 it is also exclusive sleep lock serializing 1502 * accesses from user process context. 1503 */ 1504 1505 static inline void sock_owned_by_me(const struct sock *sk) 1506 { 1507 #ifdef CONFIG_LOCKDEP 1508 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); 1509 #endif 1510 } 1511 1512 static inline bool sock_owned_by_user(const struct sock *sk) 1513 { 1514 sock_owned_by_me(sk); 1515 return sk->sk_lock.owned; 1516 } 1517 1518 static inline bool sock_owned_by_user_nocheck(const struct sock *sk) 1519 { 1520 return sk->sk_lock.owned; 1521 } 1522 1523 /* no reclassification while locks are held */ 1524 static inline bool sock_allow_reclassification(const struct sock *csk) 1525 { 1526 struct sock *sk = (struct sock *)csk; 1527 1528 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock); 1529 } 1530 1531 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1532 struct proto *prot, int kern); 1533 void sk_free(struct sock *sk); 1534 void sk_destruct(struct sock *sk); 1535 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); 1536 void sk_free_unlock_clone(struct sock *sk); 1537 1538 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1539 gfp_t priority); 1540 void __sock_wfree(struct sk_buff *skb); 1541 void sock_wfree(struct sk_buff *skb); 1542 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, 1543 gfp_t priority); 1544 void skb_orphan_partial(struct sk_buff *skb); 1545 void sock_rfree(struct sk_buff *skb); 1546 void sock_efree(struct sk_buff *skb); 1547 #ifdef CONFIG_INET 1548 void sock_edemux(struct sk_buff *skb); 1549 #else 1550 #define sock_edemux sock_efree 1551 #endif 1552 1553 int sock_setsockopt(struct socket *sock, int level, int op, 1554 char __user *optval, unsigned int optlen); 1555 1556 int sock_getsockopt(struct socket *sock, int level, int op, 1557 char __user *optval, int __user *optlen); 1558 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1559 int noblock, int *errcode); 1560 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1561 unsigned long data_len, int noblock, 1562 int *errcode, int max_page_order); 1563 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); 1564 void sock_kfree_s(struct sock *sk, void *mem, int size); 1565 void sock_kzfree_s(struct sock *sk, void *mem, int size); 1566 void sk_send_sigurg(struct sock *sk); 1567 1568 struct sockcm_cookie { 1569 u32 mark; 1570 u16 tsflags; 1571 }; 1572 1573 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, 1574 struct sockcm_cookie *sockc); 1575 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 1576 struct sockcm_cookie *sockc); 1577 1578 /* 1579 * Functions to fill in entries in struct proto_ops when a protocol 1580 * does not implement a particular function. 1581 */ 1582 int sock_no_bind(struct socket *, struct sockaddr *, int); 1583 int sock_no_connect(struct socket *, struct sockaddr *, int, int); 1584 int sock_no_socketpair(struct socket *, struct socket *); 1585 int sock_no_accept(struct socket *, struct socket *, int, bool); 1586 int sock_no_getname(struct socket *, struct sockaddr *, int *, int); 1587 unsigned int sock_no_poll(struct file *, struct socket *, 1588 struct poll_table_struct *); 1589 int sock_no_ioctl(struct socket *, unsigned int, unsigned long); 1590 int sock_no_listen(struct socket *, int); 1591 int sock_no_shutdown(struct socket *, int); 1592 int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); 1593 int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); 1594 int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); 1595 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len); 1596 int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); 1597 int sock_no_mmap(struct file *file, struct socket *sock, 1598 struct vm_area_struct *vma); 1599 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, 1600 size_t size, int flags); 1601 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, 1602 int offset, size_t size, int flags); 1603 1604 /* 1605 * Functions to fill in entries in struct proto_ops when a protocol 1606 * uses the inet style. 1607 */ 1608 int sock_common_getsockopt(struct socket *sock, int level, int optname, 1609 char __user *optval, int __user *optlen); 1610 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1611 int flags); 1612 int sock_common_setsockopt(struct socket *sock, int level, int optname, 1613 char __user *optval, unsigned int optlen); 1614 int compat_sock_common_getsockopt(struct socket *sock, int level, 1615 int optname, char __user *optval, int __user *optlen); 1616 int compat_sock_common_setsockopt(struct socket *sock, int level, 1617 int optname, char __user *optval, unsigned int optlen); 1618 1619 void sk_common_release(struct sock *sk); 1620 1621 /* 1622 * Default socket callbacks and setup code 1623 */ 1624 1625 /* Initialise core socket variables */ 1626 void sock_init_data(struct socket *sock, struct sock *sk); 1627 1628 /* 1629 * Socket reference counting postulates. 1630 * 1631 * * Each user of socket SHOULD hold a reference count. 1632 * * Each access point to socket (an hash table bucket, reference from a list, 1633 * running timer, skb in flight MUST hold a reference count. 1634 * * When reference count hits 0, it means it will never increase back. 1635 * * When reference count hits 0, it means that no references from 1636 * outside exist to this socket and current process on current CPU 1637 * is last user and may/should destroy this socket. 1638 * * sk_free is called from any context: process, BH, IRQ. When 1639 * it is called, socket has no references from outside -> sk_free 1640 * may release descendant resources allocated by the socket, but 1641 * to the time when it is called, socket is NOT referenced by any 1642 * hash tables, lists etc. 1643 * * Packets, delivered from outside (from network or from another process) 1644 * and enqueued on receive/error queues SHOULD NOT grab reference count, 1645 * when they sit in queue. Otherwise, packets will leak to hole, when 1646 * socket is looked up by one cpu and unhasing is made by another CPU. 1647 * It is true for udp/raw, netlink (leak to receive and error queues), tcp 1648 * (leak to backlog). Packet socket does all the processing inside 1649 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets 1650 * use separate SMP lock, so that they are prone too. 1651 */ 1652 1653 /* Ungrab socket and destroy it, if it was the last reference. */ 1654 static inline void sock_put(struct sock *sk) 1655 { 1656 if (refcount_dec_and_test(&sk->sk_refcnt)) 1657 sk_free(sk); 1658 } 1659 /* Generic version of sock_put(), dealing with all sockets 1660 * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...) 1661 */ 1662 void sock_gen_put(struct sock *sk); 1663 1664 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, 1665 unsigned int trim_cap, bool refcounted); 1666 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1667 const int nested) 1668 { 1669 return __sk_receive_skb(sk, skb, nested, 1, true); 1670 } 1671 1672 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1673 { 1674 sk->sk_tx_queue_mapping = tx_queue; 1675 } 1676 1677 static inline void sk_tx_queue_clear(struct sock *sk) 1678 { 1679 sk->sk_tx_queue_mapping = -1; 1680 } 1681 1682 static inline int sk_tx_queue_get(const struct sock *sk) 1683 { 1684 return sk ? sk->sk_tx_queue_mapping : -1; 1685 } 1686 1687 static inline void sk_set_socket(struct sock *sk, struct socket *sock) 1688 { 1689 sk_tx_queue_clear(sk); 1690 sk->sk_socket = sock; 1691 } 1692 1693 static inline wait_queue_head_t *sk_sleep(struct sock *sk) 1694 { 1695 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0); 1696 return &rcu_dereference_raw(sk->sk_wq)->wait; 1697 } 1698 /* Detach socket from process context. 1699 * Announce socket dead, detach it from wait queue and inode. 1700 * Note that parent inode held reference count on this struct sock, 1701 * we do not release it in this function, because protocol 1702 * probably wants some additional cleanups or even continuing 1703 * to work with this socket (TCP). 1704 */ 1705 static inline void sock_orphan(struct sock *sk) 1706 { 1707 write_lock_bh(&sk->sk_callback_lock); 1708 sock_set_flag(sk, SOCK_DEAD); 1709 sk_set_socket(sk, NULL); 1710 sk->sk_wq = NULL; 1711 write_unlock_bh(&sk->sk_callback_lock); 1712 } 1713 1714 static inline void sock_graft(struct sock *sk, struct socket *parent) 1715 { 1716 WARN_ON(parent->sk); 1717 write_lock_bh(&sk->sk_callback_lock); 1718 sk->sk_wq = parent->wq; 1719 parent->sk = sk; 1720 sk_set_socket(sk, parent); 1721 sk->sk_uid = SOCK_INODE(parent)->i_uid; 1722 security_sock_graft(sk, parent); 1723 write_unlock_bh(&sk->sk_callback_lock); 1724 } 1725 1726 kuid_t sock_i_uid(struct sock *sk); 1727 unsigned long sock_i_ino(struct sock *sk); 1728 1729 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) 1730 { 1731 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); 1732 } 1733 1734 static inline u32 net_tx_rndhash(void) 1735 { 1736 u32 v = prandom_u32(); 1737 1738 return v ?: 1; 1739 } 1740 1741 static inline void sk_set_txhash(struct sock *sk) 1742 { 1743 sk->sk_txhash = net_tx_rndhash(); 1744 } 1745 1746 static inline void sk_rethink_txhash(struct sock *sk) 1747 { 1748 if (sk->sk_txhash) 1749 sk_set_txhash(sk); 1750 } 1751 1752 static inline struct dst_entry * 1753 __sk_dst_get(struct sock *sk) 1754 { 1755 return rcu_dereference_check(sk->sk_dst_cache, 1756 lockdep_sock_is_held(sk)); 1757 } 1758 1759 static inline struct dst_entry * 1760 sk_dst_get(struct sock *sk) 1761 { 1762 struct dst_entry *dst; 1763 1764 rcu_read_lock(); 1765 dst = rcu_dereference(sk->sk_dst_cache); 1766 if (dst && !atomic_inc_not_zero(&dst->__refcnt)) 1767 dst = NULL; 1768 rcu_read_unlock(); 1769 return dst; 1770 } 1771 1772 static inline void dst_negative_advice(struct sock *sk) 1773 { 1774 struct dst_entry *ndst, *dst = __sk_dst_get(sk); 1775 1776 sk_rethink_txhash(sk); 1777 1778 if (dst && dst->ops->negative_advice) { 1779 ndst = dst->ops->negative_advice(dst); 1780 1781 if (ndst != dst) { 1782 rcu_assign_pointer(sk->sk_dst_cache, ndst); 1783 sk_tx_queue_clear(sk); 1784 sk->sk_dst_pending_confirm = 0; 1785 } 1786 } 1787 } 1788 1789 static inline void 1790 __sk_dst_set(struct sock *sk, struct dst_entry *dst) 1791 { 1792 struct dst_entry *old_dst; 1793 1794 sk_tx_queue_clear(sk); 1795 sk->sk_dst_pending_confirm = 0; 1796 old_dst = rcu_dereference_protected(sk->sk_dst_cache, 1797 lockdep_sock_is_held(sk)); 1798 rcu_assign_pointer(sk->sk_dst_cache, dst); 1799 dst_release(old_dst); 1800 } 1801 1802 static inline void 1803 sk_dst_set(struct sock *sk, struct dst_entry *dst) 1804 { 1805 struct dst_entry *old_dst; 1806 1807 sk_tx_queue_clear(sk); 1808 sk->sk_dst_pending_confirm = 0; 1809 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); 1810 dst_release(old_dst); 1811 } 1812 1813 static inline void 1814 __sk_dst_reset(struct sock *sk) 1815 { 1816 __sk_dst_set(sk, NULL); 1817 } 1818 1819 static inline void 1820 sk_dst_reset(struct sock *sk) 1821 { 1822 sk_dst_set(sk, NULL); 1823 } 1824 1825 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1826 1827 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1828 1829 static inline void sk_dst_confirm(struct sock *sk) 1830 { 1831 if (!sk->sk_dst_pending_confirm) 1832 sk->sk_dst_pending_confirm = 1; 1833 } 1834 1835 static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) 1836 { 1837 if (skb_get_dst_pending_confirm(skb)) { 1838 struct sock *sk = skb->sk; 1839 unsigned long now = jiffies; 1840 1841 /* avoid dirtying neighbour */ 1842 if (n->confirmed != now) 1843 n->confirmed = now; 1844 if (sk && sk->sk_dst_pending_confirm) 1845 sk->sk_dst_pending_confirm = 0; 1846 } 1847 } 1848 1849 bool sk_mc_loop(struct sock *sk); 1850 1851 static inline bool sk_can_gso(const struct sock *sk) 1852 { 1853 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1854 } 1855 1856 void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1857 1858 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) 1859 { 1860 sk->sk_route_nocaps |= flags; 1861 sk->sk_route_caps &= ~flags; 1862 } 1863 1864 static inline bool sk_check_csum_caps(struct sock *sk) 1865 { 1866 return (sk->sk_route_caps & NETIF_F_HW_CSUM) || 1867 (sk->sk_family == PF_INET && 1868 (sk->sk_route_caps & NETIF_F_IP_CSUM)) || 1869 (sk->sk_family == PF_INET6 && 1870 (sk->sk_route_caps & NETIF_F_IPV6_CSUM)); 1871 } 1872 1873 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, 1874 struct iov_iter *from, char *to, 1875 int copy, int offset) 1876 { 1877 if (skb->ip_summed == CHECKSUM_NONE) { 1878 __wsum csum = 0; 1879 if (!csum_and_copy_from_iter_full(to, copy, &csum, from)) 1880 return -EFAULT; 1881 skb->csum = csum_block_add(skb->csum, csum, offset); 1882 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { 1883 if (!copy_from_iter_full_nocache(to, copy, from)) 1884 return -EFAULT; 1885 } else if (!copy_from_iter_full(to, copy, from)) 1886 return -EFAULT; 1887 1888 return 0; 1889 } 1890 1891 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, 1892 struct iov_iter *from, int copy) 1893 { 1894 int err, offset = skb->len; 1895 1896 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), 1897 copy, offset); 1898 if (err) 1899 __skb_trim(skb, offset); 1900 1901 return err; 1902 } 1903 1904 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, 1905 struct sk_buff *skb, 1906 struct page *page, 1907 int off, int copy) 1908 { 1909 int err; 1910 1911 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, 1912 copy, skb->len); 1913 if (err) 1914 return err; 1915 1916 skb->len += copy; 1917 skb->data_len += copy; 1918 skb->truesize += copy; 1919 sk->sk_wmem_queued += copy; 1920 sk_mem_charge(sk, copy); 1921 return 0; 1922 } 1923 1924 /** 1925 * sk_wmem_alloc_get - returns write allocations 1926 * @sk: socket 1927 * 1928 * Returns sk_wmem_alloc minus initial offset of one 1929 */ 1930 static inline int sk_wmem_alloc_get(const struct sock *sk) 1931 { 1932 return refcount_read(&sk->sk_wmem_alloc) - 1; 1933 } 1934 1935 /** 1936 * sk_rmem_alloc_get - returns read allocations 1937 * @sk: socket 1938 * 1939 * Returns sk_rmem_alloc 1940 */ 1941 static inline int sk_rmem_alloc_get(const struct sock *sk) 1942 { 1943 return atomic_read(&sk->sk_rmem_alloc); 1944 } 1945 1946 /** 1947 * sk_has_allocations - check if allocations are outstanding 1948 * @sk: socket 1949 * 1950 * Returns true if socket has write or read allocations 1951 */ 1952 static inline bool sk_has_allocations(const struct sock *sk) 1953 { 1954 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); 1955 } 1956 1957 /** 1958 * skwq_has_sleeper - check if there are any waiting processes 1959 * @wq: struct socket_wq 1960 * 1961 * Returns true if socket_wq has waiting processes 1962 * 1963 * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory 1964 * barrier call. They were added due to the race found within the tcp code. 1965 * 1966 * Consider following tcp code paths:: 1967 * 1968 * CPU1 CPU2 1969 * sys_select receive packet 1970 * ... ... 1971 * __add_wait_queue update tp->rcv_nxt 1972 * ... ... 1973 * tp->rcv_nxt check sock_def_readable 1974 * ... { 1975 * schedule rcu_read_lock(); 1976 * wq = rcu_dereference(sk->sk_wq); 1977 * if (wq && waitqueue_active(&wq->wait)) 1978 * wake_up_interruptible(&wq->wait) 1979 * ... 1980 * } 1981 * 1982 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay 1983 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1 1984 * could then endup calling schedule and sleep forever if there are no more 1985 * data on the socket. 1986 * 1987 */ 1988 static inline bool skwq_has_sleeper(struct socket_wq *wq) 1989 { 1990 return wq && wq_has_sleeper(&wq->wait); 1991 } 1992 1993 /** 1994 * sock_poll_wait - place memory barrier behind the poll_wait call. 1995 * @filp: file 1996 * @wait_address: socket wait queue 1997 * @p: poll_table 1998 * 1999 * See the comments in the wq_has_sleeper function. 2000 */ 2001 static inline void sock_poll_wait(struct file *filp, 2002 wait_queue_head_t *wait_address, poll_table *p) 2003 { 2004 if (!poll_does_not_wait(p) && wait_address) { 2005 poll_wait(filp, wait_address, p); 2006 /* We need to be sure we are in sync with the 2007 * socket flags modification. 2008 * 2009 * This memory barrier is paired in the wq_has_sleeper. 2010 */ 2011 smp_mb(); 2012 } 2013 } 2014 2015 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) 2016 { 2017 if (sk->sk_txhash) { 2018 skb->l4_hash = 1; 2019 skb->hash = sk->sk_txhash; 2020 } 2021 } 2022 2023 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk); 2024 2025 /* 2026 * Queue a received datagram if it will fit. Stream and sequenced 2027 * protocols can't normally use this as they need to fit buffers in 2028 * and play with them. 2029 * 2030 * Inlined as it's very short and called for pretty much every 2031 * packet ever received. 2032 */ 2033 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 2034 { 2035 skb_orphan(skb); 2036 skb->sk = sk; 2037 skb->destructor = sock_rfree; 2038 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2039 sk_mem_charge(sk, skb->truesize); 2040 } 2041 2042 void sk_reset_timer(struct sock *sk, struct timer_list *timer, 2043 unsigned long expires); 2044 2045 void sk_stop_timer(struct sock *sk, struct timer_list *timer); 2046 2047 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, 2048 struct sk_buff *skb, unsigned int flags, 2049 void (*destructor)(struct sock *sk, 2050 struct sk_buff *skb)); 2051 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 2052 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 2053 2054 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 2055 struct sk_buff *sock_dequeue_err_skb(struct sock *sk); 2056 2057 /* 2058 * Recover an error report and clear atomically 2059 */ 2060 2061 static inline int sock_error(struct sock *sk) 2062 { 2063 int err; 2064 if (likely(!sk->sk_err)) 2065 return 0; 2066 err = xchg(&sk->sk_err, 0); 2067 return -err; 2068 } 2069 2070 static inline unsigned long sock_wspace(struct sock *sk) 2071 { 2072 int amt = 0; 2073 2074 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 2075 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); 2076 if (amt < 0) 2077 amt = 0; 2078 } 2079 return amt; 2080 } 2081 2082 /* Note: 2083 * We use sk->sk_wq_raw, from contexts knowing this 2084 * pointer is not NULL and cannot disappear/change. 2085 */ 2086 static inline void sk_set_bit(int nr, struct sock *sk) 2087 { 2088 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) && 2089 !sock_flag(sk, SOCK_FASYNC)) 2090 return; 2091 2092 set_bit(nr, &sk->sk_wq_raw->flags); 2093 } 2094 2095 static inline void sk_clear_bit(int nr, struct sock *sk) 2096 { 2097 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) && 2098 !sock_flag(sk, SOCK_FASYNC)) 2099 return; 2100 2101 clear_bit(nr, &sk->sk_wq_raw->flags); 2102 } 2103 2104 static inline void sk_wake_async(const struct sock *sk, int how, int band) 2105 { 2106 if (sock_flag(sk, SOCK_FASYNC)) { 2107 rcu_read_lock(); 2108 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); 2109 rcu_read_unlock(); 2110 } 2111 } 2112 2113 /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might 2114 * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak. 2115 * Note: for send buffers, TCP works better if we can build two skbs at 2116 * minimum. 2117 */ 2118 #define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff))) 2119 2120 #define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2) 2121 #define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE 2122 2123 static inline void sk_stream_moderate_sndbuf(struct sock *sk) 2124 { 2125 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { 2126 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); 2127 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); 2128 } 2129 } 2130 2131 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, 2132 bool force_schedule); 2133 2134 /** 2135 * sk_page_frag - return an appropriate page_frag 2136 * @sk: socket 2137 * 2138 * If socket allocation mode allows current thread to sleep, it means its 2139 * safe to use the per task page_frag instead of the per socket one. 2140 */ 2141 static inline struct page_frag *sk_page_frag(struct sock *sk) 2142 { 2143 if (gfpflags_allow_blocking(sk->sk_allocation)) 2144 return ¤t->task_frag; 2145 2146 return &sk->sk_frag; 2147 } 2148 2149 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); 2150 2151 /* 2152 * Default write policy as shown to user space via poll/select/SIGIO 2153 */ 2154 static inline bool sock_writeable(const struct sock *sk) 2155 { 2156 return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); 2157 } 2158 2159 static inline gfp_t gfp_any(void) 2160 { 2161 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 2162 } 2163 2164 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) 2165 { 2166 return noblock ? 0 : sk->sk_rcvtimeo; 2167 } 2168 2169 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) 2170 { 2171 return noblock ? 0 : sk->sk_sndtimeo; 2172 } 2173 2174 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) 2175 { 2176 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; 2177 } 2178 2179 /* Alas, with timeout socket operations are not restartable. 2180 * Compare this to poll(). 2181 */ 2182 static inline int sock_intr_errno(long timeo) 2183 { 2184 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 2185 } 2186 2187 struct sock_skb_cb { 2188 u32 dropcount; 2189 }; 2190 2191 /* Store sock_skb_cb at the end of skb->cb[] so protocol families 2192 * using skb->cb[] would keep using it directly and utilize its 2193 * alignement guarantee. 2194 */ 2195 #define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \ 2196 sizeof(struct sock_skb_cb))) 2197 2198 #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \ 2199 SOCK_SKB_CB_OFFSET)) 2200 2201 #define sock_skb_cb_check_size(size) \ 2202 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET) 2203 2204 static inline void 2205 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) 2206 { 2207 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? 2208 atomic_read(&sk->sk_drops) : 0; 2209 } 2210 2211 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) 2212 { 2213 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 2214 2215 atomic_add(segs, &sk->sk_drops); 2216 } 2217 2218 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 2219 struct sk_buff *skb); 2220 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, 2221 struct sk_buff *skb); 2222 2223 static inline void 2224 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 2225 { 2226 ktime_t kt = skb->tstamp; 2227 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 2228 2229 /* 2230 * generate control messages if 2231 * - receive time stamping in software requested 2232 * - software time stamp available and wanted 2233 * - hardware time stamps available and wanted 2234 */ 2235 if (sock_flag(sk, SOCK_RCVTSTAMP) || 2236 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || 2237 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || 2238 (hwtstamps->hwtstamp && 2239 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) 2240 __sock_recv_timestamp(msg, sk, skb); 2241 else 2242 sk->sk_stamp = kt; 2243 2244 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) 2245 __sock_recv_wifi_status(msg, sk, skb); 2246 } 2247 2248 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2249 struct sk_buff *skb); 2250 2251 #define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC) 2252 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2253 struct sk_buff *skb) 2254 { 2255 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ 2256 (1UL << SOCK_RCVTSTAMP)) 2257 #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \ 2258 SOF_TIMESTAMPING_RAW_HARDWARE) 2259 2260 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) 2261 __sock_recv_ts_and_drops(msg, sk, skb); 2262 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) 2263 sk->sk_stamp = skb->tstamp; 2264 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP)) 2265 sk->sk_stamp = 0; 2266 } 2267 2268 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); 2269 2270 /** 2271 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 2272 * @sk: socket sending this packet 2273 * @tsflags: timestamping flags to use 2274 * @tx_flags: completed with instructions for time stamping 2275 * 2276 * Note: callers should take care of initial ``*tx_flags`` value (usually 0) 2277 */ 2278 static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags, 2279 __u8 *tx_flags) 2280 { 2281 if (unlikely(tsflags)) 2282 __sock_tx_timestamp(tsflags, tx_flags); 2283 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) 2284 *tx_flags |= SKBTX_WIFI_STATUS; 2285 } 2286 2287 /** 2288 * sk_eat_skb - Release a skb if it is no longer needed 2289 * @sk: socket to eat this skb from 2290 * @skb: socket buffer to eat 2291 * 2292 * This routine must be called with interrupts disabled or with the socket 2293 * locked so that the sk_buff queue operation is ok. 2294 */ 2295 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) 2296 { 2297 __skb_unlink(skb, &sk->sk_receive_queue); 2298 __kfree_skb(skb); 2299 } 2300 2301 static inline 2302 struct net *sock_net(const struct sock *sk) 2303 { 2304 return read_pnet(&sk->sk_net); 2305 } 2306 2307 static inline 2308 void sock_net_set(struct sock *sk, struct net *net) 2309 { 2310 write_pnet(&sk->sk_net, net); 2311 } 2312 2313 static inline struct sock *skb_steal_sock(struct sk_buff *skb) 2314 { 2315 if (skb->sk) { 2316 struct sock *sk = skb->sk; 2317 2318 skb->destructor = NULL; 2319 skb->sk = NULL; 2320 return sk; 2321 } 2322 return NULL; 2323 } 2324 2325 /* This helper checks if a socket is a full socket, 2326 * ie _not_ a timewait or request socket. 2327 */ 2328 static inline bool sk_fullsock(const struct sock *sk) 2329 { 2330 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); 2331 } 2332 2333 /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV 2334 * SYNACK messages can be attached to either ones (depending on SYNCOOKIE) 2335 */ 2336 static inline bool sk_listener(const struct sock *sk) 2337 { 2338 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); 2339 } 2340 2341 void sock_enable_timestamp(struct sock *sk, int flag); 2342 int sock_get_timestamp(struct sock *, struct timeval __user *); 2343 int sock_get_timestampns(struct sock *, struct timespec __user *); 2344 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, 2345 int type); 2346 2347 bool sk_ns_capable(const struct sock *sk, 2348 struct user_namespace *user_ns, int cap); 2349 bool sk_capable(const struct sock *sk, int cap); 2350 bool sk_net_capable(const struct sock *sk, int cap); 2351 2352 void sk_get_meminfo(const struct sock *sk, u32 *meminfo); 2353 2354 /* Take into consideration the size of the struct sk_buff overhead in the 2355 * determination of these values, since that is non-constant across 2356 * platforms. This makes socket queueing behavior and performance 2357 * not depend upon such differences. 2358 */ 2359 #define _SK_MEM_PACKETS 256 2360 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 2361 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 2362 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 2363 2364 extern __u32 sysctl_wmem_max; 2365 extern __u32 sysctl_rmem_max; 2366 2367 extern int sysctl_tstamp_allow_data; 2368 extern int sysctl_optmem_max; 2369 2370 extern __u32 sysctl_wmem_default; 2371 extern __u32 sysctl_rmem_default; 2372 2373 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) 2374 { 2375 /* Does this proto have per netns sysctl_wmem ? */ 2376 if (proto->sysctl_wmem_offset) 2377 return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset); 2378 2379 return *proto->sysctl_wmem; 2380 } 2381 2382 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) 2383 { 2384 /* Does this proto have per netns sysctl_rmem ? */ 2385 if (proto->sysctl_rmem_offset) 2386 return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset); 2387 2388 return *proto->sysctl_rmem; 2389 } 2390 2391 /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10) 2392 * Some wifi drivers need to tweak it to get more chunks. 2393 * They can use this helper from their ndo_start_xmit() 2394 */ 2395 static inline void sk_pacing_shift_update(struct sock *sk, int val) 2396 { 2397 if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val) 2398 return; 2399 sk->sk_pacing_shift = val; 2400 } 2401 2402 #endif /* _SOCK_H */ 2403