xref: /openbmc/linux/include/net/sock.h (revision ed4543328f7108e1047b83b96ca7f7208747d930)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Definitions for the AF_INET socket handler.
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Version:	@(#)sock.h	1.0.4	05/13/93
101da177e4SLinus Torvalds  *
1102c30a84SJesper Juhl  * Authors:	Ross Biro
121da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Fixes:
171da177e4SLinus Torvalds  *		Alan Cox	:	Volatiles in skbuff pointers. See
181da177e4SLinus Torvalds  *					skbuff comments. May be overdone,
191da177e4SLinus Torvalds  *					better to prove they can be removed
201da177e4SLinus Torvalds  *					than the reverse.
211da177e4SLinus Torvalds  *		Alan Cox	:	Added a zapped field for tcp to note
221da177e4SLinus Torvalds  *					a socket is reset and must stay shut up
231da177e4SLinus Torvalds  *		Alan Cox	:	New fields for options
241da177e4SLinus Torvalds  *	Pauline Middelink	:	identd support
251da177e4SLinus Torvalds  *		Alan Cox	:	Eliminate low level recv/recvfrom
261da177e4SLinus Torvalds  *		David S. Miller	:	New socket lookup architecture.
271da177e4SLinus Torvalds  *              Steve Whitehouse:       Default routines for sock_ops
281da177e4SLinus Torvalds  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
291da177e4SLinus Torvalds  *              			protinfo be just a void pointer, as the
301da177e4SLinus Torvalds  *              			protocol specific parts were moved to
311da177e4SLinus Torvalds  *              			respective headers and ipv4/v6, etc now
321da177e4SLinus Torvalds  *              			use private slabcaches for its socks
331da177e4SLinus Torvalds  *              Pedro Hortas	:	New flags field for socket options
341da177e4SLinus Torvalds  */
351da177e4SLinus Torvalds #ifndef _SOCK_H
361da177e4SLinus Torvalds #define _SOCK_H
371da177e4SLinus Torvalds 
38a6b7a407SAlexey Dobriyan #include <linux/hardirq.h>
39172589ccSIlpo Järvinen #include <linux/kernel.h>
401da177e4SLinus Torvalds #include <linux/list.h>
4188ab1932SEric Dumazet #include <linux/list_nulls.h>
421da177e4SLinus Torvalds #include <linux/timer.h>
431da177e4SLinus Torvalds #include <linux/cache.h>
443f134619SGlauber Costa #include <linux/bitops.h>
45a5b5bb9aSIngo Molnar #include <linux/lockdep.h>
461da177e4SLinus Torvalds #include <linux/netdevice.h>
471da177e4SLinus Torvalds #include <linux/skbuff.h>	/* struct sk_buff */
48d7fe0f24SAl Viro #include <linux/mm.h>
491da177e4SLinus Torvalds #include <linux/security.h>
505a0e3ad6STejun Heo #include <linux/slab.h>
51c6e1a0d1STom Herbert #include <linux/uaccess.h>
523e32cb2eSJohannes Weiner #include <linux/page_counter.h>
53180d8cd9SGlauber Costa #include <linux/memcontrol.h>
54c5905afbSIngo Molnar #include <linux/static_key.h>
5540401530SAl Viro #include <linux/sched.h>
561ce0bf50SHerbert Xu #include <linux/wait.h>
572a56a1feSTejun Heo #include <linux/cgroup-defs.h>
5875c119afSEric Dumazet #include <linux/rbtree.h>
5988ab1932SEric Dumazet #include <linux/rculist_nulls.h>
60a57de0b4SJiri Olsa #include <linux/poll.h>
61c8c1bbb6SChristoph Hellwig #include <linux/sockptr.h>
621c5f2cedSEric Dumazet #include <linux/indirect_call_wrapper.h>
63c31504dcSEric Dumazet #include <linux/atomic.h>
6441c6d650SReshetova, Elena #include <linux/refcount.h>
65f35f8219SEric Dumazet #include <linux/llist.h>
661da177e4SLinus Torvalds #include <net/dst.h>
671da177e4SLinus Torvalds #include <net/checksum.h>
681d0ab253SEric Dumazet #include <net/tcp_states.h>
69b9f40e21SWillem de Bruijn #include <linux/net_tstamp.h>
7054dc3e33SDavid Ahern #include <net/l3mdev.h>
7104190bf8SPavel Tikhomirov #include <uapi/linux/socket.h>
721da177e4SLinus Torvalds 
731da177e4SLinus Torvalds /*
741da177e4SLinus Torvalds  * This structure really needs to be cleaned up.
751da177e4SLinus Torvalds  * Most of it is for TCP, and not used by any of
761da177e4SLinus Torvalds  * the other protocols.
771da177e4SLinus Torvalds  */
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds /* Define this to get the SOCK_DBG debugging facility. */
801da177e4SLinus Torvalds #define SOCK_DEBUGGING
811da177e4SLinus Torvalds #ifdef SOCK_DEBUGGING
821da177e4SLinus Torvalds #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
831da177e4SLinus Torvalds 					printk(KERN_DEBUG msg); } while (0)
841da177e4SLinus Torvalds #else
854cd9029dSStephen Hemminger /* Validate arguments and do nothing */
86b9075fa9SJoe Perches static inline __printf(2, 3)
SOCK_DEBUG(const struct sock * sk,const char * msg,...)87dc6b9b78SEric Dumazet void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
884cd9029dSStephen Hemminger {
894cd9029dSStephen Hemminger }
901da177e4SLinus Torvalds #endif
911da177e4SLinus Torvalds 
921da177e4SLinus Torvalds /* This is the per-socket lock.  The spinlock provides a synchronization
931da177e4SLinus Torvalds  * between user contexts and software interrupt processing, whereas the
941da177e4SLinus Torvalds  * mini-semaphore synchronizes multiple users amongst themselves.
951da177e4SLinus Torvalds  */
961da177e4SLinus Torvalds typedef struct {
971da177e4SLinus Torvalds 	spinlock_t		slock;
98d2e9117cSJohn Heffner 	int			owned;
991da177e4SLinus Torvalds 	wait_queue_head_t	wq;
100a5b5bb9aSIngo Molnar 	/*
101a5b5bb9aSIngo Molnar 	 * We express the mutex-alike socket_lock semantics
102a5b5bb9aSIngo Molnar 	 * to the lock validator by explicitly managing
103a5b5bb9aSIngo Molnar 	 * the slock as a lock variant (in addition to
104a5b5bb9aSIngo Molnar 	 * the slock itself):
105a5b5bb9aSIngo Molnar 	 */
106a5b5bb9aSIngo Molnar #ifdef CONFIG_DEBUG_LOCK_ALLOC
107a5b5bb9aSIngo Molnar 	struct lockdep_map dep_map;
108a5b5bb9aSIngo Molnar #endif
1091da177e4SLinus Torvalds } socket_lock_t;
1101da177e4SLinus Torvalds 
1111da177e4SLinus Torvalds struct sock;
1128feaf0c0SArnaldo Carvalho de Melo struct proto;
1130eeb8ffcSDenis V. Lunev struct net;
1141da177e4SLinus Torvalds 
115077b393dSEric Dumazet typedef __u32 __bitwise __portpair;
116077b393dSEric Dumazet typedef __u64 __bitwise __addrpair;
117077b393dSEric Dumazet 
1181da177e4SLinus Torvalds /**
1191da177e4SLinus Torvalds  *	struct sock_common - minimal network layer representation of sockets
12068835abaSEric Dumazet  *	@skc_daddr: Foreign IPv4 addr
12168835abaSEric Dumazet  *	@skc_rcv_saddr: Bound local IPv4 addr
12266256e0bSRandy Dunlap  *	@skc_addrpair: 8-byte-aligned __u64 union of @skc_daddr & @skc_rcv_saddr
1234dc6dc71SEric Dumazet  *	@skc_hash: hash value used with various protocol lookup tables
124d4cada4aSEric Dumazet  *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
125ce43b03eSEric Dumazet  *	@skc_dport: placeholder for inet_dport/tw_dport
126ce43b03eSEric Dumazet  *	@skc_num: placeholder for inet_num/tw_num
12766256e0bSRandy Dunlap  *	@skc_portpair: __u32 union of @skc_dport & @skc_num
1284dc3b16bSPavel Pisa  *	@skc_family: network address family
1294dc3b16bSPavel Pisa  *	@skc_state: Connection state
1304dc3b16bSPavel Pisa  *	@skc_reuse: %SO_REUSEADDR setting
131055dc21aSTom Herbert  *	@skc_reuseport: %SO_REUSEPORT setting
13266256e0bSRandy Dunlap  *	@skc_ipv6only: socket is IPV6 only
13366256e0bSRandy Dunlap  *	@skc_net_refcnt: socket is using net ref counting
1344dc3b16bSPavel Pisa  *	@skc_bound_dev_if: bound device index if != 0
1354dc3b16bSPavel Pisa  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
136512615b6SEric Dumazet  *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
1378feaf0c0SArnaldo Carvalho de Melo  *	@skc_prot: protocol handlers inside a network family
13807feaebfSEric W. Biederman  *	@skc_net: reference to the network namespace of this socket
13966256e0bSRandy Dunlap  *	@skc_v6_daddr: IPV6 destination address
14066256e0bSRandy Dunlap  *	@skc_v6_rcv_saddr: IPV6 source address
14166256e0bSRandy Dunlap  *	@skc_cookie: socket's cookie value
14268835abaSEric Dumazet  *	@skc_node: main hash linkage for various protocol lookup tables
14368835abaSEric Dumazet  *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
14468835abaSEric Dumazet  *	@skc_tx_queue_mapping: tx queue number for this connection
145c6345ce7SAmritha Nambiar  *	@skc_rx_queue_mapping: rx queue number for this connection
1468e5eb54dSEric Dumazet  *	@skc_flags: place holder for sk_flags
1478e5eb54dSEric Dumazet  *		%SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
1488e5eb54dSEric Dumazet  *		%SO_OOBINLINE settings, %SO_TIMESTAMPING settings
14966256e0bSRandy Dunlap  *	@skc_listener: connection request listener socket (aka rsk_listener)
15066256e0bSRandy Dunlap  *		[union with @skc_flags]
15166256e0bSRandy Dunlap  *	@skc_tw_dr: (aka tw_dr) ptr to &struct inet_timewait_death_row
15266256e0bSRandy Dunlap  *		[union with @skc_flags]
15370da268bSEric Dumazet  *	@skc_incoming_cpu: record/match cpu processing incoming packets
15466256e0bSRandy Dunlap  *	@skc_rcv_wnd: (aka rsk_rcv_wnd) TCP receive window size (possibly scaled)
15566256e0bSRandy Dunlap  *		[union with @skc_incoming_cpu]
15666256e0bSRandy Dunlap  *	@skc_tw_rcv_nxt: (aka tw_rcv_nxt) TCP window next expected seq number
15766256e0bSRandy Dunlap  *		[union with @skc_incoming_cpu]
15868835abaSEric Dumazet  *	@skc_refcnt: reference count
1591da177e4SLinus Torvalds  *
1601da177e4SLinus Torvalds  *	This is the minimal network layer representation of sockets, the header
1618feaf0c0SArnaldo Carvalho de Melo  *	for struct sock and struct inet_timewait_sock.
1621da177e4SLinus Torvalds  */
1631da177e4SLinus Torvalds struct sock_common {
164ce43b03eSEric Dumazet 	union {
165077b393dSEric Dumazet 		__addrpair	skc_addrpair;
166ce43b03eSEric Dumazet 		struct {
16768835abaSEric Dumazet 			__be32	skc_daddr;
16868835abaSEric Dumazet 			__be32	skc_rcv_saddr;
169ce43b03eSEric Dumazet 		};
170ce43b03eSEric Dumazet 	};
171d4cada4aSEric Dumazet 	union  {
17281c3d547SEric Dumazet 		unsigned int	skc_hash;
173d4cada4aSEric Dumazet 		__u16		skc_u16hashes[2];
174d4cada4aSEric Dumazet 	};
175ce43b03eSEric Dumazet 	/* skc_dport && skc_num must be grouped as well */
176ce43b03eSEric Dumazet 	union {
177077b393dSEric Dumazet 		__portpair	skc_portpair;
178ce43b03eSEric Dumazet 		struct {
179ce43b03eSEric Dumazet 			__be16	skc_dport;
180ce43b03eSEric Dumazet 			__u16	skc_num;
181ce43b03eSEric Dumazet 		};
182ce43b03eSEric Dumazet 	};
183ce43b03eSEric Dumazet 
1844dc6dc71SEric Dumazet 	unsigned short		skc_family;
1854dc6dc71SEric Dumazet 	volatile unsigned char	skc_state;
186055dc21aSTom Herbert 	unsigned char		skc_reuse:4;
1879fe516baSEric Dumazet 	unsigned char		skc_reuseport:1;
1889fe516baSEric Dumazet 	unsigned char		skc_ipv6only:1;
18926abe143SEric W. Biederman 	unsigned char		skc_net_refcnt:1;
1904dc6dc71SEric Dumazet 	int			skc_bound_dev_if;
191512615b6SEric Dumazet 	union {
1924dc6dc71SEric Dumazet 		struct hlist_node	skc_bind_node;
193ca065d0cSEric Dumazet 		struct hlist_node	skc_portaddr_node;
194512615b6SEric Dumazet 	};
1958feaf0c0SArnaldo Carvalho de Melo 	struct proto		*skc_prot;
1960c5c9fb5SEric W. Biederman 	possible_net_t		skc_net;
197efe4208fSEric Dumazet 
198efe4208fSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
199efe4208fSEric Dumazet 	struct in6_addr		skc_v6_daddr;
200efe4208fSEric Dumazet 	struct in6_addr		skc_v6_rcv_saddr;
201efe4208fSEric Dumazet #endif
202efe4208fSEric Dumazet 
20333cf7c90SEric Dumazet 	atomic64_t		skc_cookie;
20433cf7c90SEric Dumazet 
2058e5eb54dSEric Dumazet 	/* following fields are padding to force
2068e5eb54dSEric Dumazet 	 * offset(struct sock, sk_refcnt) == 128 on 64bit arches
2078e5eb54dSEric Dumazet 	 * assuming IPV6 is enabled. We use this padding differently
2088e5eb54dSEric Dumazet 	 * for different kind of 'sockets'
2098e5eb54dSEric Dumazet 	 */
2108e5eb54dSEric Dumazet 	union {
2118e5eb54dSEric Dumazet 		unsigned long	skc_flags;
2128e5eb54dSEric Dumazet 		struct sock	*skc_listener; /* request_sock */
2138e5eb54dSEric Dumazet 		struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
2148e5eb54dSEric Dumazet 	};
21568835abaSEric Dumazet 	/*
21668835abaSEric Dumazet 	 * fields between dontcopy_begin/dontcopy_end
21768835abaSEric Dumazet 	 * are not copied in sock_copy()
21868835abaSEric Dumazet 	 */
219928c41e7SRandy Dunlap 	/* private: */
22068835abaSEric Dumazet 	int			skc_dontcopy_begin[0];
221928c41e7SRandy Dunlap 	/* public: */
22268835abaSEric Dumazet 	union {
22368835abaSEric Dumazet 		struct hlist_node	skc_node;
22468835abaSEric Dumazet 		struct hlist_nulls_node skc_nulls_node;
22568835abaSEric Dumazet 	};
226755c31cdSAmritha Nambiar 	unsigned short		skc_tx_queue_mapping;
2274e1beeccSTariq Toukan #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
228c6345ce7SAmritha Nambiar 	unsigned short		skc_rx_queue_mapping;
229c6345ce7SAmritha Nambiar #endif
230ed53d0abSEric Dumazet 	union {
23170da268bSEric Dumazet 		int		skc_incoming_cpu;
232ed53d0abSEric Dumazet 		u32		skc_rcv_wnd;
233d475f090SEric Dumazet 		u32		skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
234ed53d0abSEric Dumazet 	};
23570da268bSEric Dumazet 
23641c6d650SReshetova, Elena 	refcount_t		skc_refcnt;
237928c41e7SRandy Dunlap 	/* private: */
23868835abaSEric Dumazet 	int                     skc_dontcopy_end[0];
239ed53d0abSEric Dumazet 	union {
240ed53d0abSEric Dumazet 		u32		skc_rxhash;
241ed53d0abSEric Dumazet 		u32		skc_window_clamp;
242d475f090SEric Dumazet 		u32		skc_tw_snd_nxt; /* struct tcp_timewait_sock */
243ed53d0abSEric Dumazet 	};
244928c41e7SRandy Dunlap 	/* public: */
2451da177e4SLinus Torvalds };
2461da177e4SLinus Torvalds 
2471f00d375SKP Singh struct bpf_local_storage;
248b6459415SJakub Kicinski struct sk_filter;
2496ac99e8fSMartin KaFai Lau 
2501da177e4SLinus Torvalds /**
2511da177e4SLinus Torvalds   *	struct sock - network layer representation of sockets
2528feaf0c0SArnaldo Carvalho de Melo   *	@__sk_common: shared layout with inet_timewait_sock
2534dc3b16bSPavel Pisa   *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
2544dc3b16bSPavel Pisa   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
2554dc3b16bSPavel Pisa   *	@sk_lock:	synchronizer
256cdfbabfbSDavid Howells   *	@sk_kern_sock: True if sock is using kernel lock classes
2574dc3b16bSPavel Pisa   *	@sk_rcvbuf: size of receive buffer in bytes
25843815482SEric Dumazet   *	@sk_wq: sock wait queue and async head
259421b3885SShawn Bohrer   *	@sk_rx_dst: receive input route used by early demux
2600c0a5ef8SEric Dumazet   *	@sk_rx_dst_ifindex: ifindex for @sk_rx_dst
261ef57c161SEric Dumazet   *	@sk_rx_dst_cookie: cookie for @sk_rx_dst
2624dc3b16bSPavel Pisa   *	@sk_dst_cache: destination cache
2639b8805a3SJulian Anastasov   *	@sk_dst_pending_confirm: need to confirm neighbour
2644dc3b16bSPavel Pisa   *	@sk_policy: flow policy
2654dc3b16bSPavel Pisa   *	@sk_receive_queue: incoming packets
2664dc3b16bSPavel Pisa   *	@sk_wmem_alloc: transmit queue bytes committed
267771edcafSstephen hemminger   *	@sk_tsq_flags: TCP Small Queues flags
2684dc3b16bSPavel Pisa   *	@sk_write_queue: Packet sending queue
2694dc3b16bSPavel Pisa   *	@sk_omem_alloc: "o" is "option" or "other"
2704dc3b16bSPavel Pisa   *	@sk_wmem_queued: persistent queue size
2714dc3b16bSPavel Pisa   *	@sk_forward_alloc: space allocated forward
2722bb2f5fbSWei Wang   *	@sk_reserved_mem: space reserved and non-reclaimable for the socket
27306021292SEliezer Tamir   *	@sk_napi_id: id of the last napi context to receive data for sk
274dafcc438SEliezer Tamir   *	@sk_ll_usec: usecs to busypoll when there is no data
2754dc3b16bSPavel Pisa   *	@sk_allocation: allocation mode
27695bd09ebSEric Dumazet   *	@sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
277218af599SEric Dumazet   *	@sk_pacing_status: Pacing status (requested, handled by sch_fq)
278c3f40d7cSEric Dumazet   *	@sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
2794dc3b16bSPavel Pisa   *	@sk_sndbuf: size of send buffer in bytes
280771edcafSstephen hemminger   *	@__sk_flags_offset: empty field used to determine location of bitfield
281293de7deSStephen Hemminger   *	@sk_padding: unused element for alignment
28228448b80STom Herbert   *	@sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
28328448b80STom Herbert   *	@sk_no_check_rx: allow zero checksum in RX packets
2844dc3b16bSPavel Pisa   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
285aba54656SEric Dumazet   *	@sk_gso_disabled: if set, NETIF_F_GSO_MASK is forbidden.
286bcd76111SHerbert Xu   *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
28782cc1a7aSPeter P Waskiewicz Jr   *	@sk_gso_max_size: Maximum GSO segment size to build
2881485348dSBen Hutchings   *	@sk_gso_max_segs: Maximum number of GSO segments
2893a9b76fdSEric Dumazet   *	@sk_pacing_shift: scaling factor for TCP Small Queues
2904dc3b16bSPavel Pisa   *	@sk_lingertime: %SO_LINGER l_linger setting
2914dc3b16bSPavel Pisa   *	@sk_backlog: always used with the per-socket spinlock held
2924dc3b16bSPavel Pisa   *	@sk_callback_lock: used with the callbacks in the end of this struct
2934dc3b16bSPavel Pisa   *	@sk_error_queue: rarely used
29433c732c3SWang Chen   *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
29533c732c3SWang Chen   *			  IPV6_ADDRFORM for instance)
2964dc3b16bSPavel Pisa   *	@sk_err: last error
29733c732c3SWang Chen   *	@sk_err_soft: errors that don't cause failure but are the cause of a
29833c732c3SWang Chen   *		      persistent failure not just 'timed out'
299cb61cb9bSEric Dumazet   *	@sk_drops: raw/udp drops counter
3004dc3b16bSPavel Pisa   *	@sk_ack_backlog: current listen backlog
3014dc3b16bSPavel Pisa   *	@sk_max_ack_backlog: listen backlog set in listen()
302771edcafSstephen hemminger   *	@sk_uid: user id of owner
3037fd3253aSBjörn Töpel   *	@sk_prefer_busy_poll: prefer busypolling over softirq processing
3047c951cafSBjörn Töpel   *	@sk_busy_poll_budget: napi processing budget when busypolling
3054dc3b16bSPavel Pisa   *	@sk_priority: %SO_PRIORITY setting
3064dc3b16bSPavel Pisa   *	@sk_type: socket type (%SOCK_STREAM, etc)
3074dc3b16bSPavel Pisa   *	@sk_protocol: which protocol this socket belongs in this network family
3085fb14d20SEric Dumazet   *	@sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred
30953c3fa20SRandy Dunlap   *	@sk_peer_pid: &struct pid for this socket's peer
31053c3fa20SRandy Dunlap   *	@sk_peer_cred: %SO_PEERCRED setting
3114dc3b16bSPavel Pisa   *	@sk_rcvlowat: %SO_RCVLOWAT setting
3124dc3b16bSPavel Pisa   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
3134dc3b16bSPavel Pisa   *	@sk_sndtimeo: %SO_SNDTIMEO setting
314b73c3d0eSTom Herbert   *	@sk_txhash: computed flow hash for use on transmit
31526859240SAkhmat Karakotov   *	@sk_txrehash: enable TX hash rethink
3164dc3b16bSPavel Pisa   *	@sk_filter: socket filtering instructions
3174dc3b16bSPavel Pisa   *	@sk_timer: sock cleanup timer
3184dc3b16bSPavel Pisa   *	@sk_stamp: time stamp of last packet received
3193a0ed3e9SDeepa Dinamani   *	@sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
320d463126eSYangbo Lu   *	@sk_tsflags: SO_TIMESTAMPING flags
321fb87bd47SGuillaume Nault   *	@sk_use_task_frag: allow sk_page_frag() to use current->task_frag.
322fb87bd47SGuillaume Nault   *			   Sockets that can be used under memory reclaim should
323fb87bd47SGuillaume Nault   *			   set this to false.
324d463126eSYangbo Lu   *	@sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
325d463126eSYangbo Lu   *	              for timestamping
32609c2d251SWillem de Bruijn   *	@sk_tskey: counter to disambiguate concurrent tstamp requests
32752267790SWillem de Bruijn   *	@sk_zckey: counter to order MSG_ZEROCOPY notifications
3284dc3b16bSPavel Pisa   *	@sk_socket: Identd and reporting IO signals
329b68777d5SJakub Sitnicki   *	@sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
3305640f768SEric Dumazet   *	@sk_frag: cached page frag
331d3d4f0a0SRandy Dunlap   *	@sk_peek_off: current peek_offset value
3324dc3b16bSPavel Pisa   *	@sk_send_head: front of stuff to transmit
33366256e0bSRandy Dunlap   *	@tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
33467be2dd1SMartin Waitz   *	@sk_security: used by security modules
33531729363SRandy Dunlap   *	@sk_mark: generic packet mark
3362a56a1feSTejun Heo   *	@sk_cgrp_data: cgroup data for this cgroup
337baac50bbSJohannes Weiner   *	@sk_memcg: this socket's memory cgroup association
3384dc3b16bSPavel Pisa   *	@sk_write_pending: a write to stream socket waits to start
339419ce133SPaolo Abeni   *	@sk_disconnects: number of disconnect operations performed on this sock
3404dc3b16bSPavel Pisa   *	@sk_state_change: callback to indicate change in the state of the sock
3414dc3b16bSPavel Pisa   *	@sk_data_ready: callback to indicate there is data to be processed
3424dc3b16bSPavel Pisa   *	@sk_write_space: callback to indicate there is bf sending space available
3434dc3b16bSPavel Pisa   *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
3444dc3b16bSPavel Pisa   *	@sk_backlog_rcv: callback to process the backlog
34566256e0bSRandy Dunlap   *	@sk_validate_xmit_skb: ptr to an optional validate function
3464dc3b16bSPavel Pisa   *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
347ef456144SCraig Gallek   *	@sk_reuseport_cb: reuseport group container
34866256e0bSRandy Dunlap   *	@sk_bpf_storage: ptr to cache and control for bpf_sk_storage
349293de7deSStephen Hemminger   *	@sk_rcu: used during RCU grace period
35080b14deeSRichard Cochran   *	@sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
35180b14deeSRichard Cochran   *	@sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
35266256e0bSRandy Dunlap   *	@sk_txtime_report_errors: set report errors mode for SO_TXTIME
35380b14deeSRichard Cochran   *	@sk_txtime_unused: unused txtime flags
354ffa84b5fSEric Dumazet   *	@ns_tracker: tracker for netns reference
35528044fc1SJoanne Koong   *	@sk_bind2_node: bind node in the bhash2 table
3561da177e4SLinus Torvalds   */
3571da177e4SLinus Torvalds struct sock {
3581da177e4SLinus Torvalds 	/*
3598feaf0c0SArnaldo Carvalho de Melo 	 * Now struct inet_timewait_sock also uses sock_common, so please just
3601da177e4SLinus Torvalds 	 * don't add nothing before this first member (__sk_common) --acme
3611da177e4SLinus Torvalds 	 */
3621da177e4SLinus Torvalds 	struct sock_common	__sk_common;
3634dc6dc71SEric Dumazet #define sk_node			__sk_common.skc_node
3644dc6dc71SEric Dumazet #define sk_nulls_node		__sk_common.skc_nulls_node
3654dc6dc71SEric Dumazet #define sk_refcnt		__sk_common.skc_refcnt
366e022f0b4SKrishna Kumar #define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
3674e1beeccSTariq Toukan #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
368c6345ce7SAmritha Nambiar #define sk_rx_queue_mapping	__sk_common.skc_rx_queue_mapping
369c6345ce7SAmritha Nambiar #endif
3704dc6dc71SEric Dumazet 
37168835abaSEric Dumazet #define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
37268835abaSEric Dumazet #define sk_dontcopy_end		__sk_common.skc_dontcopy_end
3734dc6dc71SEric Dumazet #define sk_hash			__sk_common.skc_hash
37450805466SEric Dumazet #define sk_portpair		__sk_common.skc_portpair
37505dbc7b5SEric Dumazet #define sk_num			__sk_common.skc_num
37605dbc7b5SEric Dumazet #define sk_dport		__sk_common.skc_dport
37750805466SEric Dumazet #define sk_addrpair		__sk_common.skc_addrpair
37850805466SEric Dumazet #define sk_daddr		__sk_common.skc_daddr
37950805466SEric Dumazet #define sk_rcv_saddr		__sk_common.skc_rcv_saddr
3801da177e4SLinus Torvalds #define sk_family		__sk_common.skc_family
3811da177e4SLinus Torvalds #define sk_state		__sk_common.skc_state
3821da177e4SLinus Torvalds #define sk_reuse		__sk_common.skc_reuse
383055dc21aSTom Herbert #define sk_reuseport		__sk_common.skc_reuseport
3849fe516baSEric Dumazet #define sk_ipv6only		__sk_common.skc_ipv6only
38526abe143SEric W. Biederman #define sk_net_refcnt		__sk_common.skc_net_refcnt
3861da177e4SLinus Torvalds #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
3871da177e4SLinus Torvalds #define sk_bind_node		__sk_common.skc_bind_node
3888feaf0c0SArnaldo Carvalho de Melo #define sk_prot			__sk_common.skc_prot
38907feaebfSEric W. Biederman #define sk_net			__sk_common.skc_net
390efe4208fSEric Dumazet #define sk_v6_daddr		__sk_common.skc_v6_daddr
391efe4208fSEric Dumazet #define sk_v6_rcv_saddr	__sk_common.skc_v6_rcv_saddr
39233cf7c90SEric Dumazet #define sk_cookie		__sk_common.skc_cookie
39370da268bSEric Dumazet #define sk_incoming_cpu		__sk_common.skc_incoming_cpu
3948e5eb54dSEric Dumazet #define sk_flags		__sk_common.skc_flags
395ed53d0abSEric Dumazet #define sk_rxhash		__sk_common.skc_rxhash
396efe4208fSEric Dumazet 
39743f51df4SEric Dumazet 	/* early demux fields */
3988b3f9133SJakub Kicinski 	struct dst_entry __rcu	*sk_rx_dst;
39943f51df4SEric Dumazet 	int			sk_rx_dst_ifindex;
40043f51df4SEric Dumazet 	u32			sk_rx_dst_cookie;
40143f51df4SEric Dumazet 
402b178bb3dSEric Dumazet 	socket_lock_t		sk_lock;
4039115e8cdSEric Dumazet 	atomic_t		sk_drops;
4049115e8cdSEric Dumazet 	int			sk_rcvlowat;
4059115e8cdSEric Dumazet 	struct sk_buff_head	sk_error_queue;
406b178bb3dSEric Dumazet 	struct sk_buff_head	sk_receive_queue;
407b178bb3dSEric Dumazet 	/*
408b178bb3dSEric Dumazet 	 * The backlog queue is special, it is always used with
409b178bb3dSEric Dumazet 	 * the per-socket spinlock held and requires low latency
410b178bb3dSEric Dumazet 	 * access. Therefore we special case it's implementation.
411b178bb3dSEric Dumazet 	 * Note : rmem_alloc is in this structure to fill a hole
412b178bb3dSEric Dumazet 	 * on 64bit arches, not because its logically part of
413b178bb3dSEric Dumazet 	 * backlog.
414b178bb3dSEric Dumazet 	 */
415b178bb3dSEric Dumazet 	struct {
416b178bb3dSEric Dumazet 		atomic_t	rmem_alloc;
417b178bb3dSEric Dumazet 		int		len;
418b178bb3dSEric Dumazet 		struct sk_buff	*head;
419b178bb3dSEric Dumazet 		struct sk_buff	*tail;
420b178bb3dSEric Dumazet 	} sk_backlog;
421f35f8219SEric Dumazet 
422b178bb3dSEric Dumazet #define sk_rmem_alloc sk_backlog.rmem_alloc
4232c8c56e1SEric Dumazet 
4249115e8cdSEric Dumazet 	int			sk_forward_alloc;
4252bb2f5fbSWei Wang 	u32			sk_reserved_mem;
426e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL
427dafcc438SEliezer Tamir 	unsigned int		sk_ll_usec;
4289115e8cdSEric Dumazet 	/* ===== mostly read cache line ===== */
4299115e8cdSEric Dumazet 	unsigned int		sk_napi_id;
43006021292SEliezer Tamir #endif
431b178bb3dSEric Dumazet 	int			sk_rcvbuf;
432419ce133SPaolo Abeni 	int			sk_disconnects;
433b178bb3dSEric Dumazet 
434b178bb3dSEric Dumazet 	struct sk_filter __rcu	*sk_filter;
435ceb5d58bSEric Dumazet 	union {
436eaefd110SEric Dumazet 		struct socket_wq __rcu	*sk_wq;
43766256e0bSRandy Dunlap 		/* private: */
438ceb5d58bSEric Dumazet 		struct socket_wq	*sk_wq_raw;
43966256e0bSRandy Dunlap 		/* public: */
440ceb5d58bSEric Dumazet 	};
441b178bb3dSEric Dumazet #ifdef CONFIG_XFRM
442d188ba86SEric Dumazet 	struct xfrm_policy __rcu *sk_policy[2];
443b178bb3dSEric Dumazet #endif
4440c0a5ef8SEric Dumazet 
4450e36cbb3SCong Wang 	struct dst_entry __rcu	*sk_dst_cache;
446b178bb3dSEric Dumazet 	atomic_t		sk_omem_alloc;
447b178bb3dSEric Dumazet 	int			sk_sndbuf;
4489115e8cdSEric Dumazet 
4499115e8cdSEric Dumazet 	/* ===== cache line for TX ===== */
4509115e8cdSEric Dumazet 	int			sk_wmem_queued;
45114afee4bSReshetova, Elena 	refcount_t		sk_wmem_alloc;
4529115e8cdSEric Dumazet 	unsigned long		sk_tsq_flags;
45375c119afSEric Dumazet 	union {
4549115e8cdSEric Dumazet 		struct sk_buff	*sk_send_head;
45575c119afSEric Dumazet 		struct rb_root	tcp_rtx_queue;
45675c119afSEric Dumazet 	};
457b178bb3dSEric Dumazet 	struct sk_buff_head	sk_write_queue;
4589115e8cdSEric Dumazet 	__s32			sk_peek_off;
4599115e8cdSEric Dumazet 	int			sk_write_pending;
4609b8805a3SJulian Anastasov 	__u32			sk_dst_pending_confirm;
461218af599SEric Dumazet 	u32			sk_pacing_status; /* see enum sk_pacing */
4629115e8cdSEric Dumazet 	long			sk_sndtimeo;
4639115e8cdSEric Dumazet 	struct timer_list	sk_timer;
4649115e8cdSEric Dumazet 	__u32			sk_priority;
4659115e8cdSEric Dumazet 	__u32			sk_mark;
46676a9ebe8SEric Dumazet 	unsigned long		sk_pacing_rate; /* bytes per second */
46776a9ebe8SEric Dumazet 	unsigned long		sk_max_pacing_rate;
4689115e8cdSEric Dumazet 	struct page_frag	sk_frag;
4699115e8cdSEric Dumazet 	netdev_features_t	sk_route_caps;
4709115e8cdSEric Dumazet 	int			sk_gso_type;
4719115e8cdSEric Dumazet 	unsigned int		sk_gso_max_size;
4729115e8cdSEric Dumazet 	gfp_t			sk_allocation;
4739115e8cdSEric Dumazet 	__u32			sk_txhash;
474fc64869cSAndrey Ryabinin 
475fc64869cSAndrey Ryabinin 	/*
476fc64869cSAndrey Ryabinin 	 * Because of non atomicity rules, all
477fc64869cSAndrey Ryabinin 	 * changes are protected by socket lock.
478fc64869cSAndrey Ryabinin 	 */
479aba54656SEric Dumazet 	u8			sk_gso_disabled : 1,
480cdfbabfbSDavid Howells 				sk_kern_sock : 1,
48128448b80STom Herbert 				sk_no_check_tx : 1,
48228448b80STom Herbert 				sk_no_check_rx : 1,
483bf976514SMat Martineau 				sk_userlocks : 4;
4843a9b76fdSEric Dumazet 	u8			sk_pacing_shift;
485bf976514SMat Martineau 	u16			sk_type;
486bf976514SMat Martineau 	u16			sk_protocol;
487bf976514SMat Martineau 	u16			sk_gso_max_segs;
4881da177e4SLinus Torvalds 	unsigned long	        sk_lingertime;
489476e19cfSArnaldo Carvalho de Melo 	struct proto		*sk_prot_creator;
4901da177e4SLinus Torvalds 	rwlock_t		sk_callback_lock;
4911da177e4SLinus Torvalds 	int			sk_err,
4921da177e4SLinus Torvalds 				sk_err_soft;
493becb74f0SEric Dumazet 	u32			sk_ack_backlog;
494becb74f0SEric Dumazet 	u32			sk_max_ack_backlog;
49586741ec2SLorenzo Colitti 	kuid_t			sk_uid;
49626859240SAkhmat Karakotov 	u8			sk_txrehash;
4977fd3253aSBjörn Töpel #ifdef CONFIG_NET_RX_BUSY_POLL
4987fd3253aSBjörn Töpel 	u8			sk_prefer_busy_poll;
4997c951cafSBjörn Töpel 	u16			sk_busy_poll_budget;
5007fd3253aSBjörn Töpel #endif
50135306eb2SEric Dumazet 	spinlock_t		sk_peer_lock;
5021ace2b4dSEric Dumazet 	int			sk_bind_phc;
503109f6e39SEric W. Biederman 	struct pid		*sk_peer_pid;
504109f6e39SEric W. Biederman 	const struct cred	*sk_peer_cred;
50535306eb2SEric Dumazet 
5061da177e4SLinus Torvalds 	long			sk_rcvtimeo;
507b7aa0bf7SEric Dumazet 	ktime_t			sk_stamp;
5083a0ed3e9SDeepa Dinamani #if BITS_PER_LONG==32
5093a0ed3e9SDeepa Dinamani 	seqlock_t		sk_stamp_seq;
5103a0ed3e9SDeepa Dinamani #endif
511a1cdec57SEric Dumazet 	atomic_t		sk_tskey;
51252267790SWillem de Bruijn 	atomic_t		sk_zckey;
513b534dc46SWillem de Bruijn 	u32			sk_tsflags;
514b534dc46SWillem de Bruijn 	u8			sk_shutdown;
51580b14deeSRichard Cochran 
51680b14deeSRichard Cochran 	u8			sk_clockid;
51780b14deeSRichard Cochran 	u8			sk_txtime_deadline_mode : 1,
5184b15c707SJesus Sanchez-Palencia 				sk_txtime_report_errors : 1,
5194b15c707SJesus Sanchez-Palencia 				sk_txtime_unused : 6;
520fb87bd47SGuillaume Nault 	bool			sk_use_task_frag;
52180b14deeSRichard Cochran 
5221da177e4SLinus Torvalds 	struct socket		*sk_socket;
5231da177e4SLinus Torvalds 	void			*sk_user_data;
524d5f64238SAlexey Dobriyan #ifdef CONFIG_SECURITY
5251da177e4SLinus Torvalds 	void			*sk_security;
526d5f64238SAlexey Dobriyan #endif
5272a56a1feSTejun Heo 	struct sock_cgroup_data	sk_cgrp_data;
528baac50bbSJohannes Weiner 	struct mem_cgroup	*sk_memcg;
5291da177e4SLinus Torvalds 	void			(*sk_state_change)(struct sock *sk);
530676d2369SDavid S. Miller 	void			(*sk_data_ready)(struct sock *sk);
5311da177e4SLinus Torvalds 	void			(*sk_write_space)(struct sock *sk);
5321da177e4SLinus Torvalds 	void			(*sk_error_report)(struct sock *sk);
5331da177e4SLinus Torvalds 	int			(*sk_backlog_rcv)(struct sock *sk,
5341da177e4SLinus Torvalds 						  struct sk_buff *skb);
535ebf4e808SIlya Lesokhin #ifdef CONFIG_SOCK_VALIDATE_XMIT
536ebf4e808SIlya Lesokhin 	struct sk_buff*		(*sk_validate_xmit_skb)(struct sock *sk,
537ebf4e808SIlya Lesokhin 							struct net_device *dev,
538ebf4e808SIlya Lesokhin 							struct sk_buff *skb);
539ebf4e808SIlya Lesokhin #endif
5401da177e4SLinus Torvalds 	void                    (*sk_destruct)(struct sock *sk);
541ef456144SCraig Gallek 	struct sock_reuseport __rcu	*sk_reuseport_cb;
5426ac99e8fSMartin KaFai Lau #ifdef CONFIG_BPF_SYSCALL
5431f00d375SKP Singh 	struct bpf_local_storage __rcu	*sk_bpf_storage;
5446ac99e8fSMartin KaFai Lau #endif
545a4298e45SEric Dumazet 	struct rcu_head		sk_rcu;
546ffa84b5fSEric Dumazet 	netns_tracker		ns_tracker;
54728044fc1SJoanne Koong 	struct hlist_node	sk_bind2_node;
5481da177e4SLinus Torvalds };
5491da177e4SLinus Torvalds 
550218af599SEric Dumazet enum sk_pacing {
551218af599SEric Dumazet 	SK_PACING_NONE		= 0,
552218af599SEric Dumazet 	SK_PACING_NEEDED	= 1,
553218af599SEric Dumazet 	SK_PACING_FQ		= 2,
554218af599SEric Dumazet };
555218af599SEric Dumazet 
5562a013372SHawkins Jiawei /* flag bits in sk_user_data
5572a013372SHawkins Jiawei  *
5582a013372SHawkins Jiawei  * - SK_USER_DATA_NOCOPY:      Pointer stored in sk_user_data might
5592a013372SHawkins Jiawei  *   not be suitable for copying when cloning the socket. For instance,
5602a013372SHawkins Jiawei  *   it can point to a reference counted object. sk_user_data bottom
5612a013372SHawkins Jiawei  *   bit is set if pointer must not be copied.
5622a013372SHawkins Jiawei  *
5632a013372SHawkins Jiawei  * - SK_USER_DATA_BPF:         Mark whether sk_user_data field is
5642a013372SHawkins Jiawei  *   managed/owned by a BPF reuseport array. This bit should be set
5652a013372SHawkins Jiawei  *   when sk_user_data's sk is added to the bpf's reuseport_array.
5662a013372SHawkins Jiawei  *
5672a013372SHawkins Jiawei  * - SK_USER_DATA_PSOCK:       Mark whether pointer stored in
5682a013372SHawkins Jiawei  *   sk_user_data points to psock type. This bit should be set
5692a013372SHawkins Jiawei  *   when sk_user_data is assigned to a psock object.
570f1ff5ce2SJakub Sitnicki  */
571f1ff5ce2SJakub Sitnicki #define SK_USER_DATA_NOCOPY	1UL
5722a013372SHawkins Jiawei #define SK_USER_DATA_BPF	2UL
5732a013372SHawkins Jiawei #define SK_USER_DATA_PSOCK	4UL
5742a013372SHawkins Jiawei #define SK_USER_DATA_PTRMASK	~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
5752a013372SHawkins Jiawei 				  SK_USER_DATA_PSOCK)
576f1ff5ce2SJakub Sitnicki 
577f1ff5ce2SJakub Sitnicki /**
578f1ff5ce2SJakub Sitnicki  * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
579f1ff5ce2SJakub Sitnicki  * @sk: socket
580f1ff5ce2SJakub Sitnicki  */
sk_user_data_is_nocopy(const struct sock * sk)581f1ff5ce2SJakub Sitnicki static inline bool sk_user_data_is_nocopy(const struct sock *sk)
582f1ff5ce2SJakub Sitnicki {
583f1ff5ce2SJakub Sitnicki 	return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
584f1ff5ce2SJakub Sitnicki }
585f1ff5ce2SJakub Sitnicki 
586559835eaSPravin B Shelar #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
587559835eaSPravin B Shelar 
5882a013372SHawkins Jiawei /**
589fc4aaf9fSDavid Howells  * __locked_read_sk_user_data_with_flags - return the pointer
590fc4aaf9fSDavid Howells  * only if argument flags all has been set in sk_user_data. Otherwise
591fc4aaf9fSDavid Howells  * return NULL
592fc4aaf9fSDavid Howells  *
593fc4aaf9fSDavid Howells  * @sk: socket
594fc4aaf9fSDavid Howells  * @flags: flag bits
595fc4aaf9fSDavid Howells  *
596fc4aaf9fSDavid Howells  * The caller must be holding sk->sk_callback_lock.
597fc4aaf9fSDavid Howells  */
598fc4aaf9fSDavid Howells static inline void *
__locked_read_sk_user_data_with_flags(const struct sock * sk,uintptr_t flags)599fc4aaf9fSDavid Howells __locked_read_sk_user_data_with_flags(const struct sock *sk,
600fc4aaf9fSDavid Howells 				      uintptr_t flags)
601fc4aaf9fSDavid Howells {
602fc4aaf9fSDavid Howells 	uintptr_t sk_user_data =
603fc4aaf9fSDavid Howells 		(uintptr_t)rcu_dereference_check(__sk_user_data(sk),
604fc4aaf9fSDavid Howells 						 lockdep_is_held(&sk->sk_callback_lock));
605fc4aaf9fSDavid Howells 
606fc4aaf9fSDavid Howells 	WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
607fc4aaf9fSDavid Howells 
608fc4aaf9fSDavid Howells 	if ((sk_user_data & flags) == flags)
609fc4aaf9fSDavid Howells 		return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
610fc4aaf9fSDavid Howells 	return NULL;
611fc4aaf9fSDavid Howells }
612fc4aaf9fSDavid Howells 
613fc4aaf9fSDavid Howells /**
6142a013372SHawkins Jiawei  * __rcu_dereference_sk_user_data_with_flags - return the pointer
6152a013372SHawkins Jiawei  * only if argument flags all has been set in sk_user_data. Otherwise
6162a013372SHawkins Jiawei  * return NULL
6172a013372SHawkins Jiawei  *
6182a013372SHawkins Jiawei  * @sk: socket
6192a013372SHawkins Jiawei  * @flags: flag bits
6202a013372SHawkins Jiawei  */
6212a013372SHawkins Jiawei static inline void *
__rcu_dereference_sk_user_data_with_flags(const struct sock * sk,uintptr_t flags)6222a013372SHawkins Jiawei __rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
6232a013372SHawkins Jiawei 					  uintptr_t flags)
6242a013372SHawkins Jiawei {
6252a013372SHawkins Jiawei 	uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
6262a013372SHawkins Jiawei 
6272a013372SHawkins Jiawei 	WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
6282a013372SHawkins Jiawei 
6292a013372SHawkins Jiawei 	if ((sk_user_data & flags) == flags)
6302a013372SHawkins Jiawei 		return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
6312a013372SHawkins Jiawei 	return NULL;
6322a013372SHawkins Jiawei }
6332a013372SHawkins Jiawei 
634f1ff5ce2SJakub Sitnicki #define rcu_dereference_sk_user_data(sk)				\
6352a013372SHawkins Jiawei 	__rcu_dereference_sk_user_data_with_flags(sk, 0)
6362a013372SHawkins Jiawei #define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags)		\
637f1ff5ce2SJakub Sitnicki ({									\
6382a013372SHawkins Jiawei 	uintptr_t __tmp1 = (uintptr_t)(ptr),				\
6392a013372SHawkins Jiawei 		  __tmp2 = (uintptr_t)(flags);				\
6402a013372SHawkins Jiawei 	WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK);			\
6412a013372SHawkins Jiawei 	WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK);			\
6422a013372SHawkins Jiawei 	rcu_assign_pointer(__sk_user_data((sk)),			\
6432a013372SHawkins Jiawei 			   __tmp1 | __tmp2);				\
644f1ff5ce2SJakub Sitnicki })
645f1ff5ce2SJakub Sitnicki #define rcu_assign_sk_user_data(sk, ptr)				\
6462a013372SHawkins Jiawei 	__rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
647559835eaSPravin B Shelar 
648e187013aSAkhmat Karakotov static inline
sock_net(const struct sock * sk)649e187013aSAkhmat Karakotov struct net *sock_net(const struct sock *sk)
650e187013aSAkhmat Karakotov {
651e187013aSAkhmat Karakotov 	return read_pnet(&sk->sk_net);
652e187013aSAkhmat Karakotov }
653e187013aSAkhmat Karakotov 
654e187013aSAkhmat Karakotov static inline
sock_net_set(struct sock * sk,struct net * net)655e187013aSAkhmat Karakotov void sock_net_set(struct sock *sk, struct net *net)
656e187013aSAkhmat Karakotov {
657e187013aSAkhmat Karakotov 	write_pnet(&sk->sk_net, net);
658e187013aSAkhmat Karakotov }
659e187013aSAkhmat Karakotov 
6604a17fd52SPavel Emelyanov /*
6614a17fd52SPavel Emelyanov  * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
6624a17fd52SPavel Emelyanov  * or not whether his port will be reused by someone else. SK_FORCE_REUSE
6634a17fd52SPavel Emelyanov  * on a socket means that the socket will reuse everybody else's port
6644a17fd52SPavel Emelyanov  * without looking at the other's sk_reuse value.
6654a17fd52SPavel Emelyanov  */
6664a17fd52SPavel Emelyanov 
6674a17fd52SPavel Emelyanov #define SK_NO_REUSE	0
6684a17fd52SPavel Emelyanov #define SK_CAN_REUSE	1
6694a17fd52SPavel Emelyanov #define SK_FORCE_REUSE	2
6704a17fd52SPavel Emelyanov 
671627d2d6bSsamanthakumar int sk_set_peek_off(struct sock *sk, int val);
672627d2d6bSsamanthakumar 
sk_peek_offset(const struct sock * sk,int flags)673a84a434bSPeter Lafreniere static inline int sk_peek_offset(const struct sock *sk, int flags)
674ef64a54fSPavel Emelyanov {
675b9bb53f3SWillem de Bruijn 	if (unlikely(flags & MSG_PEEK)) {
676a0917e0bSMatthew Dawson 		return READ_ONCE(sk->sk_peek_off);
677b9bb53f3SWillem de Bruijn 	}
678b9bb53f3SWillem de Bruijn 
679ef64a54fSPavel Emelyanov 	return 0;
680ef64a54fSPavel Emelyanov }
681ef64a54fSPavel Emelyanov 
sk_peek_offset_bwd(struct sock * sk,int val)682ef64a54fSPavel Emelyanov static inline void sk_peek_offset_bwd(struct sock *sk, int val)
683ef64a54fSPavel Emelyanov {
684b9bb53f3SWillem de Bruijn 	s32 off = READ_ONCE(sk->sk_peek_off);
685b9bb53f3SWillem de Bruijn 
686b9bb53f3SWillem de Bruijn 	if (unlikely(off >= 0)) {
687b9bb53f3SWillem de Bruijn 		off = max_t(s32, off - val, 0);
688b9bb53f3SWillem de Bruijn 		WRITE_ONCE(sk->sk_peek_off, off);
689ef64a54fSPavel Emelyanov 	}
690ef64a54fSPavel Emelyanov }
691ef64a54fSPavel Emelyanov 
sk_peek_offset_fwd(struct sock * sk,int val)692ef64a54fSPavel Emelyanov static inline void sk_peek_offset_fwd(struct sock *sk, int val)
693ef64a54fSPavel Emelyanov {
694b9bb53f3SWillem de Bruijn 	sk_peek_offset_bwd(sk, -val);
695ef64a54fSPavel Emelyanov }
696ef64a54fSPavel Emelyanov 
6971da177e4SLinus Torvalds /*
6981da177e4SLinus Torvalds  * Hashed lists helper routines
6991da177e4SLinus Torvalds  */
sk_entry(const struct hlist_node * node)700c4146644SLi Zefan static inline struct sock *sk_entry(const struct hlist_node *node)
701c4146644SLi Zefan {
702c4146644SLi Zefan 	return hlist_entry(node, struct sock, sk_node);
703c4146644SLi Zefan }
704c4146644SLi Zefan 
__sk_head(const struct hlist_head * head)705e48c414eSArnaldo Carvalho de Melo static inline struct sock *__sk_head(const struct hlist_head *head)
7061da177e4SLinus Torvalds {
7071da177e4SLinus Torvalds 	return hlist_entry(head->first, struct sock, sk_node);
7081da177e4SLinus Torvalds }
7091da177e4SLinus Torvalds 
sk_head(const struct hlist_head * head)710e48c414eSArnaldo Carvalho de Melo static inline struct sock *sk_head(const struct hlist_head *head)
7111da177e4SLinus Torvalds {
7121da177e4SLinus Torvalds 	return hlist_empty(head) ? NULL : __sk_head(head);
7131da177e4SLinus Torvalds }
7141da177e4SLinus Torvalds 
__sk_nulls_head(const struct hlist_nulls_head * head)71588ab1932SEric Dumazet static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
71688ab1932SEric Dumazet {
71788ab1932SEric Dumazet 	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
71888ab1932SEric Dumazet }
71988ab1932SEric Dumazet 
sk_nulls_head(const struct hlist_nulls_head * head)72088ab1932SEric Dumazet static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
72188ab1932SEric Dumazet {
72288ab1932SEric Dumazet 	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
72388ab1932SEric Dumazet }
72488ab1932SEric Dumazet 
sk_next(const struct sock * sk)725e48c414eSArnaldo Carvalho de Melo static inline struct sock *sk_next(const struct sock *sk)
7261da177e4SLinus Torvalds {
7276c59ebd3SGeliang Tang 	return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
7281da177e4SLinus Torvalds }
7291da177e4SLinus Torvalds 
sk_nulls_next(const struct sock * sk)73088ab1932SEric Dumazet static inline struct sock *sk_nulls_next(const struct sock *sk)
73188ab1932SEric Dumazet {
73288ab1932SEric Dumazet 	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
73388ab1932SEric Dumazet 		hlist_nulls_entry(sk->sk_nulls_node.next,
73488ab1932SEric Dumazet 				  struct sock, sk_nulls_node) :
73588ab1932SEric Dumazet 		NULL;
73688ab1932SEric Dumazet }
73788ab1932SEric Dumazet 
sk_unhashed(const struct sock * sk)738dc6b9b78SEric Dumazet static inline bool sk_unhashed(const struct sock *sk)
7391da177e4SLinus Torvalds {
7401da177e4SLinus Torvalds 	return hlist_unhashed(&sk->sk_node);
7411da177e4SLinus Torvalds }
7421da177e4SLinus Torvalds 
sk_hashed(const struct sock * sk)743dc6b9b78SEric Dumazet static inline bool sk_hashed(const struct sock *sk)
7441da177e4SLinus Torvalds {
745da753beaSAkinobu Mita 	return !sk_unhashed(sk);
7461da177e4SLinus Torvalds }
7471da177e4SLinus Torvalds 
sk_node_init(struct hlist_node * node)748dc6b9b78SEric Dumazet static inline void sk_node_init(struct hlist_node *node)
7491da177e4SLinus Torvalds {
7501da177e4SLinus Torvalds 	node->pprev = NULL;
7511da177e4SLinus Torvalds }
7521da177e4SLinus Torvalds 
__sk_del_node(struct sock * sk)753dc6b9b78SEric Dumazet static inline void __sk_del_node(struct sock *sk)
7541da177e4SLinus Torvalds {
7551da177e4SLinus Torvalds 	__hlist_del(&sk->sk_node);
7561da177e4SLinus Torvalds }
7571da177e4SLinus Torvalds 
758808f5114Sstephen hemminger /* NB: equivalent to hlist_del_init_rcu */
__sk_del_node_init(struct sock * sk)759dc6b9b78SEric Dumazet static inline bool __sk_del_node_init(struct sock *sk)
7601da177e4SLinus Torvalds {
7611da177e4SLinus Torvalds 	if (sk_hashed(sk)) {
7621da177e4SLinus Torvalds 		__sk_del_node(sk);
7631da177e4SLinus Torvalds 		sk_node_init(&sk->sk_node);
764dc6b9b78SEric Dumazet 		return true;
7651da177e4SLinus Torvalds 	}
766dc6b9b78SEric Dumazet 	return false;
7671da177e4SLinus Torvalds }
7681da177e4SLinus Torvalds 
7691da177e4SLinus Torvalds /* Grab socket reference count. This operation is valid only
7701da177e4SLinus Torvalds    when sk is ALREADY grabbed f.e. it is found in hash table
7711da177e4SLinus Torvalds    or a list and the lookup is made under lock preventing hash table
7721da177e4SLinus Torvalds    modifications.
7731da177e4SLinus Torvalds  */
7741da177e4SLinus Torvalds 
sock_hold(struct sock * sk)775f9a7cbbfSDenys Vlasenko static __always_inline void sock_hold(struct sock *sk)
7761da177e4SLinus Torvalds {
77741c6d650SReshetova, Elena 	refcount_inc(&sk->sk_refcnt);
7781da177e4SLinus Torvalds }
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds /* Ungrab socket in the context, which assumes that socket refcnt
7811da177e4SLinus Torvalds    cannot hit zero, f.e. it is true in context of any socketcall.
7821da177e4SLinus Torvalds  */
__sock_put(struct sock * sk)783f9a7cbbfSDenys Vlasenko static __always_inline void __sock_put(struct sock *sk)
7841da177e4SLinus Torvalds {
78541c6d650SReshetova, Elena 	refcount_dec(&sk->sk_refcnt);
7861da177e4SLinus Torvalds }
7871da177e4SLinus Torvalds 
sk_del_node_init(struct sock * sk)788dc6b9b78SEric Dumazet static inline bool sk_del_node_init(struct sock *sk)
7891da177e4SLinus Torvalds {
790dc6b9b78SEric Dumazet 	bool rc = __sk_del_node_init(sk);
7911da177e4SLinus Torvalds 
7921da177e4SLinus Torvalds 	if (rc) {
7931da177e4SLinus Torvalds 		/* paranoid for a while -acme */
79441c6d650SReshetova, Elena 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
7951da177e4SLinus Torvalds 		__sock_put(sk);
7961da177e4SLinus Torvalds 	}
7971da177e4SLinus Torvalds 	return rc;
7981da177e4SLinus Torvalds }
799808f5114Sstephen hemminger #define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
8001da177e4SLinus Torvalds 
__sk_nulls_del_node_init_rcu(struct sock * sk)801dc6b9b78SEric Dumazet static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
802271b72c7SEric Dumazet {
803271b72c7SEric Dumazet 	if (sk_hashed(sk)) {
80488ab1932SEric Dumazet 		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
805dc6b9b78SEric Dumazet 		return true;
806271b72c7SEric Dumazet 	}
807dc6b9b78SEric Dumazet 	return false;
808271b72c7SEric Dumazet }
809271b72c7SEric Dumazet 
sk_nulls_del_node_init_rcu(struct sock * sk)810dc6b9b78SEric Dumazet static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
811271b72c7SEric Dumazet {
812dc6b9b78SEric Dumazet 	bool rc = __sk_nulls_del_node_init_rcu(sk);
813271b72c7SEric Dumazet 
814271b72c7SEric Dumazet 	if (rc) {
815271b72c7SEric Dumazet 		/* paranoid for a while -acme */
81641c6d650SReshetova, Elena 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
817271b72c7SEric Dumazet 		__sock_put(sk);
818271b72c7SEric Dumazet 	}
819271b72c7SEric Dumazet 	return rc;
820271b72c7SEric Dumazet }
821271b72c7SEric Dumazet 
__sk_add_node(struct sock * sk,struct hlist_head * list)822dc6b9b78SEric Dumazet static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
8231da177e4SLinus Torvalds {
8241da177e4SLinus Torvalds 	hlist_add_head(&sk->sk_node, list);
8251da177e4SLinus Torvalds }
8261da177e4SLinus Torvalds 
sk_add_node(struct sock * sk,struct hlist_head * list)827dc6b9b78SEric Dumazet static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
8281da177e4SLinus Torvalds {
8291da177e4SLinus Torvalds 	sock_hold(sk);
8301da177e4SLinus Torvalds 	__sk_add_node(sk, list);
8311da177e4SLinus Torvalds }
8321da177e4SLinus Torvalds 
sk_add_node_rcu(struct sock * sk,struct hlist_head * list)833dc6b9b78SEric Dumazet static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
834808f5114Sstephen hemminger {
835808f5114Sstephen hemminger 	sock_hold(sk);
836d296ba60SCraig Gallek 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
837d296ba60SCraig Gallek 	    sk->sk_family == AF_INET6)
838d296ba60SCraig Gallek 		hlist_add_tail_rcu(&sk->sk_node, list);
839d296ba60SCraig Gallek 	else
840808f5114Sstephen hemminger 		hlist_add_head_rcu(&sk->sk_node, list);
841808f5114Sstephen hemminger }
842808f5114Sstephen hemminger 
sk_add_node_tail_rcu(struct sock * sk,struct hlist_head * list)843a4dc6a49SMaxime Chevallier static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
844a4dc6a49SMaxime Chevallier {
845a4dc6a49SMaxime Chevallier 	sock_hold(sk);
846a4dc6a49SMaxime Chevallier 	hlist_add_tail_rcu(&sk->sk_node, list);
847a4dc6a49SMaxime Chevallier }
848a4dc6a49SMaxime Chevallier 
__sk_nulls_add_node_rcu(struct sock * sk,struct hlist_nulls_head * list)849dc6b9b78SEric Dumazet static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
850271b72c7SEric Dumazet {
85188ab1932SEric Dumazet 	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
852271b72c7SEric Dumazet }
853271b72c7SEric Dumazet 
__sk_nulls_add_node_tail_rcu(struct sock * sk,struct hlist_nulls_head * list)8548dbd76e7SEric Dumazet static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
8558dbd76e7SEric Dumazet {
8568dbd76e7SEric Dumazet 	hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
8578dbd76e7SEric Dumazet }
8588dbd76e7SEric Dumazet 
sk_nulls_add_node_rcu(struct sock * sk,struct hlist_nulls_head * list)859dc6b9b78SEric Dumazet static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
860271b72c7SEric Dumazet {
861271b72c7SEric Dumazet 	sock_hold(sk);
86288ab1932SEric Dumazet 	__sk_nulls_add_node_rcu(sk, list);
863271b72c7SEric Dumazet }
864271b72c7SEric Dumazet 
__sk_del_bind_node(struct sock * sk)865dc6b9b78SEric Dumazet static inline void __sk_del_bind_node(struct sock *sk)
8661da177e4SLinus Torvalds {
8671da177e4SLinus Torvalds 	__hlist_del(&sk->sk_bind_node);
8681da177e4SLinus Torvalds }
8691da177e4SLinus Torvalds 
sk_add_bind_node(struct sock * sk,struct hlist_head * list)870dc6b9b78SEric Dumazet static inline void sk_add_bind_node(struct sock *sk,
8711da177e4SLinus Torvalds 					struct hlist_head *list)
8721da177e4SLinus Torvalds {
8731da177e4SLinus Torvalds 	hlist_add_head(&sk->sk_bind_node, list);
8741da177e4SLinus Torvalds }
8751da177e4SLinus Torvalds 
__sk_del_bind2_node(struct sock * sk)87628044fc1SJoanne Koong static inline void __sk_del_bind2_node(struct sock *sk)
87728044fc1SJoanne Koong {
87828044fc1SJoanne Koong 	__hlist_del(&sk->sk_bind2_node);
87928044fc1SJoanne Koong }
88028044fc1SJoanne Koong 
sk_add_bind2_node(struct sock * sk,struct hlist_head * list)88128044fc1SJoanne Koong static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
88228044fc1SJoanne Koong {
88328044fc1SJoanne Koong 	hlist_add_head(&sk->sk_bind2_node, list);
88428044fc1SJoanne Koong }
88528044fc1SJoanne Koong 
886b67bfe0dSSasha Levin #define sk_for_each(__sk, list) \
887b67bfe0dSSasha Levin 	hlist_for_each_entry(__sk, list, sk_node)
888b67bfe0dSSasha Levin #define sk_for_each_rcu(__sk, list) \
889b67bfe0dSSasha Levin 	hlist_for_each_entry_rcu(__sk, list, sk_node)
89088ab1932SEric Dumazet #define sk_nulls_for_each(__sk, node, list) \
89188ab1932SEric Dumazet 	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
89288ab1932SEric Dumazet #define sk_nulls_for_each_rcu(__sk, node, list) \
89388ab1932SEric Dumazet 	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
894b67bfe0dSSasha Levin #define sk_for_each_from(__sk) \
895b67bfe0dSSasha Levin 	hlist_for_each_entry_from(__sk, sk_node)
89688ab1932SEric Dumazet #define sk_nulls_for_each_from(__sk, node) \
89788ab1932SEric Dumazet 	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
89888ab1932SEric Dumazet 		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
899b67bfe0dSSasha Levin #define sk_for_each_safe(__sk, tmp, list) \
900b67bfe0dSSasha Levin 	hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
901b67bfe0dSSasha Levin #define sk_for_each_bound(__sk, list) \
902b67bfe0dSSasha Levin 	hlist_for_each_entry(__sk, list, sk_bind_node)
90328044fc1SJoanne Koong #define sk_for_each_bound_bhash2(__sk, list) \
90428044fc1SJoanne Koong 	hlist_for_each_entry(__sk, list, sk_bind2_node)
9053be342e0SAnastasia Kovaleva #define sk_for_each_bound_safe(__sk, tmp, list) \
9063be342e0SAnastasia Kovaleva 	hlist_for_each_entry_safe(__sk, tmp, list, sk_bind_node)
9071da177e4SLinus Torvalds 
9082dc41cffSDavid Held /**
909ca065d0cSEric Dumazet  * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
9102dc41cffSDavid Held  * @tpos:	the type * to use as a loop cursor.
9112dc41cffSDavid Held  * @pos:	the &struct hlist_node to use as a loop cursor.
9122dc41cffSDavid Held  * @head:	the head for your list.
9132dc41cffSDavid Held  * @offset:	offset of hlist_node within the struct.
9142dc41cffSDavid Held  *
9152dc41cffSDavid Held  */
916ca065d0cSEric Dumazet #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset)		       \
917b6f4f848STim Hansen 	for (pos = rcu_dereference(hlist_first_rcu(head));		       \
918ca065d0cSEric Dumazet 	     pos != NULL &&						       \
9192dc41cffSDavid Held 		({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;});       \
920b6f4f848STim Hansen 	     pos = rcu_dereference(hlist_next_rcu(pos)))
9212dc41cffSDavid Held 
sk_user_ns(const struct sock * sk)922a84a434bSPeter Lafreniere static inline struct user_namespace *sk_user_ns(const struct sock *sk)
923c336d148SEric W. Biederman {
924c336d148SEric W. Biederman 	/* Careful only use this in a context where these parameters
925c336d148SEric W. Biederman 	 * can not change and must all be valid, such as recvmsg from
926c336d148SEric W. Biederman 	 * userspace.
927c336d148SEric W. Biederman 	 */
928c336d148SEric W. Biederman 	return sk->sk_socket->file->f_cred->user_ns;
929c336d148SEric W. Biederman }
930c336d148SEric W. Biederman 
9311da177e4SLinus Torvalds /* Sock flags */
9321da177e4SLinus Torvalds enum sock_flags {
9331da177e4SLinus Torvalds 	SOCK_DEAD,
9341da177e4SLinus Torvalds 	SOCK_DONE,
9351da177e4SLinus Torvalds 	SOCK_URGINLINE,
9361da177e4SLinus Torvalds 	SOCK_KEEPOPEN,
9371da177e4SLinus Torvalds 	SOCK_LINGER,
9381da177e4SLinus Torvalds 	SOCK_DESTROY,
9391da177e4SLinus Torvalds 	SOCK_BROADCAST,
9401da177e4SLinus Torvalds 	SOCK_TIMESTAMP,
9411da177e4SLinus Torvalds 	SOCK_ZAPPED,
9421da177e4SLinus Torvalds 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
9431da177e4SLinus Torvalds 	SOCK_DBG, /* %SO_DEBUG setting */
9441da177e4SLinus Torvalds 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
94592f37fd2SEric Dumazet 	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
9461da177e4SLinus Torvalds 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
9477cb02404SMel Gorman 	SOCK_MEMALLOC, /* VM depends on this socket for swapping */
94820d49473SPatrick Ohly 	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
949bcdce719SEric Dumazet 	SOCK_FASYNC, /* fasync() active */
9503b885787SNeil Horman 	SOCK_RXQ_OVFL,
9511cdebb42SShirley Ma 	SOCK_ZEROCOPY, /* buffers from userspace */
9526e3e939fSJohannes Berg 	SOCK_WIFI_STATUS, /* push wifi status to userspace */
9533bdc0ebaSBen Greear 	SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
9543bdc0ebaSBen Greear 		     * Will use last 4 bytes of packet sent from
9553bdc0ebaSBen Greear 		     * user-space instead.
9563bdc0ebaSBen Greear 		     */
957d59577b6SVincent Bernat 	SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
9587d4c04fcSKeller, Jacob E 	SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
959a4298e45SEric Dumazet 	SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
96080b14deeSRichard Cochran 	SOCK_TXTIME,
961e4a2a304SJason Wang 	SOCK_XDP, /* XDP is attached */
962887feae3SDeepa Dinamani 	SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
9636fd1d51cSErin MacNeil 	SOCK_RCVMARK, /* Receive SO_MARK  ancillary data with packet */
9641da177e4SLinus Torvalds };
9651da177e4SLinus Torvalds 
96601ce63c9SMarcelo Ricardo Leitner #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
96701ce63c9SMarcelo Ricardo Leitner 
sock_copy_flags(struct sock * nsk,const struct sock * osk)968a84a434bSPeter Lafreniere static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk)
96953b924b3SRalf Baechle {
97053b924b3SRalf Baechle 	nsk->sk_flags = osk->sk_flags;
97153b924b3SRalf Baechle }
97253b924b3SRalf Baechle 
sock_set_flag(struct sock * sk,enum sock_flags flag)9731da177e4SLinus Torvalds static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
9741da177e4SLinus Torvalds {
9751da177e4SLinus Torvalds 	__set_bit(flag, &sk->sk_flags);
9761da177e4SLinus Torvalds }
9771da177e4SLinus Torvalds 
sock_reset_flag(struct sock * sk,enum sock_flags flag)9781da177e4SLinus Torvalds static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
9791da177e4SLinus Torvalds {
9801da177e4SLinus Torvalds 	__clear_bit(flag, &sk->sk_flags);
9811da177e4SLinus Torvalds }
9821da177e4SLinus Torvalds 
sock_valbool_flag(struct sock * sk,enum sock_flags bit,int valbool)983dfde1d7dSDmitry Yakunin static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
984dfde1d7dSDmitry Yakunin 				     int valbool)
985dfde1d7dSDmitry Yakunin {
986dfde1d7dSDmitry Yakunin 	if (valbool)
987dfde1d7dSDmitry Yakunin 		sock_set_flag(sk, bit);
988dfde1d7dSDmitry Yakunin 	else
989dfde1d7dSDmitry Yakunin 		sock_reset_flag(sk, bit);
990dfde1d7dSDmitry Yakunin }
991dfde1d7dSDmitry Yakunin 
sock_flag(const struct sock * sk,enum sock_flags flag)9921b23a5dfSEric Dumazet static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
9931da177e4SLinus Torvalds {
9941da177e4SLinus Torvalds 	return test_bit(flag, &sk->sk_flags);
9951da177e4SLinus Torvalds }
9961da177e4SLinus Torvalds 
997c93bdd0eSMel Gorman #ifdef CONFIG_NET
998a7950ae8SDavidlohr Bueso DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
sk_memalloc_socks(void)999c93bdd0eSMel Gorman static inline int sk_memalloc_socks(void)
1000c93bdd0eSMel Gorman {
1001a7950ae8SDavidlohr Bueso 	return static_branch_unlikely(&memalloc_socks_key);
1002c93bdd0eSMel Gorman }
1003d9539752SKees Cook 
1004d9539752SKees Cook void __receive_sock(struct file *file);
1005c93bdd0eSMel Gorman #else
1006c93bdd0eSMel Gorman 
sk_memalloc_socks(void)1007c93bdd0eSMel Gorman static inline int sk_memalloc_socks(void)
1008c93bdd0eSMel Gorman {
1009c93bdd0eSMel Gorman 	return 0;
1010c93bdd0eSMel Gorman }
1011c93bdd0eSMel Gorman 
__receive_sock(struct file * file)1012d9539752SKees Cook static inline void __receive_sock(struct file *file)
1013d9539752SKees Cook { }
1014c93bdd0eSMel Gorman #endif
1015c93bdd0eSMel Gorman 
sk_gfp_mask(const struct sock * sk,gfp_t gfp_mask)10167450aaf6SEric Dumazet static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
101799a1dec7SMel Gorman {
10187450aaf6SEric Dumazet 	return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
101999a1dec7SMel Gorman }
102099a1dec7SMel Gorman 
sk_acceptq_removed(struct sock * sk)10211da177e4SLinus Torvalds static inline void sk_acceptq_removed(struct sock *sk)
10221da177e4SLinus Torvalds {
1023288efe86SEric Dumazet 	WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
10241da177e4SLinus Torvalds }
10251da177e4SLinus Torvalds 
sk_acceptq_added(struct sock * sk)10261da177e4SLinus Torvalds static inline void sk_acceptq_added(struct sock *sk)
10271da177e4SLinus Torvalds {
1028288efe86SEric Dumazet 	WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
10291da177e4SLinus Torvalds }
10301da177e4SLinus Torvalds 
1031c609e6aaSEric Dumazet /* Note: If you think the test should be:
1032c609e6aaSEric Dumazet  *	return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
1033c609e6aaSEric Dumazet  * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
1034c609e6aaSEric Dumazet  */
sk_acceptq_is_full(const struct sock * sk)1035dc6b9b78SEric Dumazet static inline bool sk_acceptq_is_full(const struct sock *sk)
10361da177e4SLinus Torvalds {
1037c609e6aaSEric Dumazet 	return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
10381da177e4SLinus Torvalds }
10391da177e4SLinus Torvalds 
10401da177e4SLinus Torvalds /*
10411da177e4SLinus Torvalds  * Compute minimal free write space needed to queue new packets.
10421da177e4SLinus Torvalds  */
sk_stream_min_wspace(const struct sock * sk)1043dc6b9b78SEric Dumazet static inline int sk_stream_min_wspace(const struct sock *sk)
10441da177e4SLinus Torvalds {
1045ab4e846aSEric Dumazet 	return READ_ONCE(sk->sk_wmem_queued) >> 1;
10461da177e4SLinus Torvalds }
10471da177e4SLinus Torvalds 
sk_stream_wspace(const struct sock * sk)1048dc6b9b78SEric Dumazet static inline int sk_stream_wspace(const struct sock *sk)
10491da177e4SLinus Torvalds {
1050ab4e846aSEric Dumazet 	return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
1051ab4e846aSEric Dumazet }
1052ab4e846aSEric Dumazet 
sk_wmem_queued_add(struct sock * sk,int val)1053ab4e846aSEric Dumazet static inline void sk_wmem_queued_add(struct sock *sk, int val)
1054ab4e846aSEric Dumazet {
1055ab4e846aSEric Dumazet 	WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
10561da177e4SLinus Torvalds }
10571da177e4SLinus Torvalds 
sk_forward_alloc_add(struct sock * sk,int val)10585e6300e7SEric Dumazet static inline void sk_forward_alloc_add(struct sock *sk, int val)
10595e6300e7SEric Dumazet {
10605e6300e7SEric Dumazet 	/* Paired with lockless reads of sk->sk_forward_alloc */
10615e6300e7SEric Dumazet 	WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
10625e6300e7SEric Dumazet }
10635e6300e7SEric Dumazet 
106469336bd2SJoe Perches void sk_stream_write_space(struct sock *sk);
10651da177e4SLinus Torvalds 
10668eae939fSZhu Yi /* OOB backlog add */
__sk_add_backlog(struct sock * sk,struct sk_buff * skb)1067a3a858ffSZhu Yi static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
10689ee6b535SStephen Hemminger {
10697fee226aSEric Dumazet 	/* dont let skb dst not refcounted, we are going to leave rcu lock */
1070222d7dbdSEric Dumazet 	skb_dst_force(skb);
10717fee226aSEric Dumazet 
10727fee226aSEric Dumazet 	if (!sk->sk_backlog.tail)
10739ed498c6SEric Dumazet 		WRITE_ONCE(sk->sk_backlog.head, skb);
10747fee226aSEric Dumazet 	else
10759ee6b535SStephen Hemminger 		sk->sk_backlog.tail->next = skb;
10767fee226aSEric Dumazet 
10779ed498c6SEric Dumazet 	WRITE_ONCE(sk->sk_backlog.tail, skb);
10789ee6b535SStephen Hemminger 	skb->next = NULL;
10799ee6b535SStephen Hemminger }
10801da177e4SLinus Torvalds 
1081c377411fSEric Dumazet /*
1082c377411fSEric Dumazet  * Take into account size of receive queue and backlog queue
10830fd7bac6SEric Dumazet  * Do not take into account this skb truesize,
10840fd7bac6SEric Dumazet  * to allow even a single big packet to come.
1085c377411fSEric Dumazet  */
sk_rcvqueues_full(const struct sock * sk,unsigned int limit)1086274f482dSSorin Dumitru static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
1087c377411fSEric Dumazet {
1088c377411fSEric Dumazet 	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
1089c377411fSEric Dumazet 
1090f545a38fSEric Dumazet 	return qsize > limit;
1091c377411fSEric Dumazet }
1092c377411fSEric Dumazet 
10938eae939fSZhu Yi /* The per-socket spinlock must be held here. */
sk_add_backlog(struct sock * sk,struct sk_buff * skb,unsigned int limit)1094f545a38fSEric Dumazet static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
1095f545a38fSEric Dumazet 					      unsigned int limit)
10968eae939fSZhu Yi {
1097274f482dSSorin Dumitru 	if (sk_rcvqueues_full(sk, limit))
10988eae939fSZhu Yi 		return -ENOBUFS;
10998eae939fSZhu Yi 
1100c7c49b8fSEric Dumazet 	/*
1101c7c49b8fSEric Dumazet 	 * If the skb was allocated from pfmemalloc reserves, only
1102c7c49b8fSEric Dumazet 	 * allow SOCK_MEMALLOC sockets to use it as this socket is
1103c7c49b8fSEric Dumazet 	 * helping free memory
1104c7c49b8fSEric Dumazet 	 */
1105c7c49b8fSEric Dumazet 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
1106c7c49b8fSEric Dumazet 		return -ENOMEM;
1107c7c49b8fSEric Dumazet 
1108a3a858ffSZhu Yi 	__sk_add_backlog(sk, skb);
11098eae939fSZhu Yi 	sk->sk_backlog.len += skb->truesize;
11108eae939fSZhu Yi 	return 0;
11118eae939fSZhu Yi }
11128eae939fSZhu Yi 
111369336bd2SJoe Perches int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1114b4b9e355SMel Gorman 
1115d2489c7bSEric Dumazet INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
1116d2489c7bSEric Dumazet INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
1117d2489c7bSEric Dumazet 
sk_backlog_rcv(struct sock * sk,struct sk_buff * skb)1118c57943a1SPeter Zijlstra static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1119c57943a1SPeter Zijlstra {
1120b4b9e355SMel Gorman 	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
1121b4b9e355SMel Gorman 		return __sk_backlog_rcv(sk, skb);
1122b4b9e355SMel Gorman 
1123d2489c7bSEric Dumazet 	return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
1124d2489c7bSEric Dumazet 				  tcp_v6_do_rcv,
1125d2489c7bSEric Dumazet 				  tcp_v4_do_rcv,
1126d2489c7bSEric Dumazet 				  sk, skb);
1127c57943a1SPeter Zijlstra }
1128c57943a1SPeter Zijlstra 
sk_incoming_cpu_update(struct sock * sk)11292c8c56e1SEric Dumazet static inline void sk_incoming_cpu_update(struct sock *sk)
11302c8c56e1SEric Dumazet {
113134cfb542SPaolo Abeni 	int cpu = raw_smp_processor_id();
113234cfb542SPaolo Abeni 
11337170a977SEric Dumazet 	if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
11347170a977SEric Dumazet 		WRITE_ONCE(sk->sk_incoming_cpu, cpu);
11352c8c56e1SEric Dumazet }
11362c8c56e1SEric Dumazet 
sock_rps_record_flow_hash(__u32 hash)1137fe477558STom Herbert static inline void sock_rps_record_flow_hash(__u32 hash)
1138c58dc01bSDavid S. Miller {
1139c58dc01bSDavid S. Miller #ifdef CONFIG_RPS
1140c58dc01bSDavid S. Miller 	struct rps_sock_flow_table *sock_flow_table;
1141c58dc01bSDavid S. Miller 
1142c58dc01bSDavid S. Miller 	rcu_read_lock();
1143c58dc01bSDavid S. Miller 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
1144fe477558STom Herbert 	rps_record_sock_flow(sock_flow_table, hash);
1145c58dc01bSDavid S. Miller 	rcu_read_unlock();
1146c58dc01bSDavid S. Miller #endif
1147c58dc01bSDavid S. Miller }
1148c58dc01bSDavid S. Miller 
sock_rps_record_flow(const struct sock * sk)1149fe477558STom Herbert static inline void sock_rps_record_flow(const struct sock *sk)
1150fe477558STom Herbert {
1151c9d8ca04SZhi Yong Wu #ifdef CONFIG_RPS
1152dc05360fSEric Dumazet 	if (static_branch_unlikely(&rfs_needed)) {
115313bfff25SEric Dumazet 		/* Reading sk->sk_rxhash might incur an expensive cache line
115413bfff25SEric Dumazet 		 * miss.
11555b8e2f61SEric Dumazet 		 *
11565b8e2f61SEric Dumazet 		 * TCP_ESTABLISHED does cover almost all states where RFS
11575b8e2f61SEric Dumazet 		 * might be useful, and is cheaper [1] than testing :
11585b8e2f61SEric Dumazet 		 *	IPv4: inet_sk(sk)->inet_daddr
11595b8e2f61SEric Dumazet 		 * 	IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
11605b8e2f61SEric Dumazet 		 * OR	an additional socket flag
11615b8e2f61SEric Dumazet 		 * [1] : sk_state and sk_prot are in the same cache line.
11625b8e2f61SEric Dumazet 		 */
11631e5c647cSEric Dumazet 		if (sk->sk_state == TCP_ESTABLISHED) {
11641e5c647cSEric Dumazet 			/* This READ_ONCE() is paired with the WRITE_ONCE()
11651e5c647cSEric Dumazet 			 * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
11661e5c647cSEric Dumazet 			 */
11671e5c647cSEric Dumazet 			sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
11681e5c647cSEric Dumazet 		}
116913bfff25SEric Dumazet 	}
1170c9d8ca04SZhi Yong Wu #endif
1171fe477558STom Herbert }
1172fe477558STom Herbert 
sock_rps_save_rxhash(struct sock * sk,const struct sk_buff * skb)1173bdeab991STom Herbert static inline void sock_rps_save_rxhash(struct sock *sk,
1174bdeab991STom Herbert 					const struct sk_buff *skb)
1175c58dc01bSDavid S. Miller {
1176c58dc01bSDavid S. Miller #ifdef CONFIG_RPS
11771e5c647cSEric Dumazet 	/* The following WRITE_ONCE() is paired with the READ_ONCE()
11781e5c647cSEric Dumazet 	 * here, and another one in sock_rps_record_flow().
11791e5c647cSEric Dumazet 	 */
11801e5c647cSEric Dumazet 	if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
11811e5c647cSEric Dumazet 		WRITE_ONCE(sk->sk_rxhash, skb->hash);
1182c58dc01bSDavid S. Miller #endif
1183c58dc01bSDavid S. Miller }
1184c58dc01bSDavid S. Miller 
sock_rps_reset_rxhash(struct sock * sk)1185bdeab991STom Herbert static inline void sock_rps_reset_rxhash(struct sock *sk)
1186bdeab991STom Herbert {
1187bdeab991STom Herbert #ifdef CONFIG_RPS
11881e5c647cSEric Dumazet 	/* Paired with READ_ONCE() in sock_rps_record_flow() */
11891e5c647cSEric Dumazet 	WRITE_ONCE(sk->sk_rxhash, 0);
1190bdeab991STom Herbert #endif
1191bdeab991STom Herbert }
1192bdeab991STom Herbert 
1193d9dc8b0fSWANG Cong #define sk_wait_event(__sk, __timeo, __condition, __wait)		\
1194419ce133SPaolo Abeni 	({	int __rc, __dis = __sk->sk_disconnects;			\
11951da177e4SLinus Torvalds 		release_sock(__sk);					\
1196cfcabdccSStephen Hemminger 		__rc = __condition;					\
1197cfcabdccSStephen Hemminger 		if (!__rc) {						\
1198d9dc8b0fSWANG Cong 			*(__timeo) = wait_woken(__wait,			\
1199d9dc8b0fSWANG Cong 						TASK_INTERRUPTIBLE,	\
1200d9dc8b0fSWANG Cong 						*(__timeo));		\
12011da177e4SLinus Torvalds 		}							\
120226cabd31SPeter Zijlstra 		sched_annotate_sleep();					\
12031da177e4SLinus Torvalds 		lock_sock(__sk);					\
1204419ce133SPaolo Abeni 		__rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
1205cfcabdccSStephen Hemminger 		__rc;							\
12061da177e4SLinus Torvalds 	})
12071da177e4SLinus Torvalds 
120869336bd2SJoe Perches int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
120969336bd2SJoe Perches int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
121069336bd2SJoe Perches void sk_stream_wait_close(struct sock *sk, long timeo_p);
121169336bd2SJoe Perches int sk_stream_error(struct sock *sk, int flags, int err);
121269336bd2SJoe Perches void sk_stream_kill_queues(struct sock *sk);
121369336bd2SJoe Perches void sk_set_memalloc(struct sock *sk);
121469336bd2SJoe Perches void sk_clear_memalloc(struct sock *sk);
12151da177e4SLinus Torvalds 
1216d41a69f1SEric Dumazet void __sk_flush_backlog(struct sock *sk);
1217d41a69f1SEric Dumazet 
sk_flush_backlog(struct sock * sk)1218d41a69f1SEric Dumazet static inline bool sk_flush_backlog(struct sock *sk)
1219d41a69f1SEric Dumazet {
1220d41a69f1SEric Dumazet 	if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1221d41a69f1SEric Dumazet 		__sk_flush_backlog(sk);
1222d41a69f1SEric Dumazet 		return true;
1223d41a69f1SEric Dumazet 	}
1224d41a69f1SEric Dumazet 	return false;
1225d41a69f1SEric Dumazet }
1226d41a69f1SEric Dumazet 
1227dfbafc99SSabrina Dubroca int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
12281da177e4SLinus Torvalds 
122960236fddSArnaldo Carvalho de Melo struct request_sock_ops;
12306d6ee43eSArnaldo Carvalho de Melo struct timewait_sock_ops;
1231ab1e0a13SArnaldo Carvalho de Melo struct inet_hashinfo;
1232fc8717baSPavel Emelyanov struct raw_hashinfo;
1233f16a7dd5SUrsula Braun struct smc_hashinfo;
1234de477254SPaul Gortmaker struct module;
123551e0158aSCong Wang struct sk_psock;
12362e6599cbSArnaldo Carvalho de Melo 
1237f77d6021SEric Dumazet /*
12385f0d5a3aSPaul E. McKenney  * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
1239f77d6021SEric Dumazet  * un-modified. Special care is taken when initializing object to zero.
1240f77d6021SEric Dumazet  */
sk_prot_clear_nulls(struct sock * sk,int size)1241f77d6021SEric Dumazet static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1242f77d6021SEric Dumazet {
1243f77d6021SEric Dumazet 	if (offsetof(struct sock, sk_node.next) != 0)
1244f77d6021SEric Dumazet 		memset(sk, 0, offsetof(struct sock, sk_node.next));
1245f77d6021SEric Dumazet 	memset(&sk->sk_node.pprev, 0,
1246f77d6021SEric Dumazet 	       size - offsetof(struct sock, sk_node.pprev));
1247f77d6021SEric Dumazet }
1248f77d6021SEric Dumazet 
12491da177e4SLinus Torvalds /* Networking protocol blocks we attach to sockets.
12501da177e4SLinus Torvalds  * socket layer -> transport layer interface
12511da177e4SLinus Torvalds  */
12521da177e4SLinus Torvalds struct proto {
12531da177e4SLinus Torvalds 	void			(*close)(struct sock *sk,
12541da177e4SLinus Torvalds 					long timeout);
1255d74bad4eSAndrey Ignatov 	int			(*pre_connect)(struct sock *sk,
1256d74bad4eSAndrey Ignatov 					struct sockaddr *uaddr,
1257d74bad4eSAndrey Ignatov 					int addr_len);
12581da177e4SLinus Torvalds 	int			(*connect)(struct sock *sk,
12591da177e4SLinus Torvalds 					struct sockaddr *uaddr,
12601da177e4SLinus Torvalds 					int addr_len);
12611da177e4SLinus Torvalds 	int			(*disconnect)(struct sock *sk, int flags);
12621da177e4SLinus Torvalds 
1263cdfbabfbSDavid Howells 	struct sock *		(*accept)(struct sock *sk, int flags, int *err,
1264cdfbabfbSDavid Howells 					  bool kern);
12651da177e4SLinus Torvalds 
12661da177e4SLinus Torvalds 	int			(*ioctl)(struct sock *sk, int cmd,
1267e1d001faSBreno Leitao 					 int *karg);
12681da177e4SLinus Torvalds 	int			(*init)(struct sock *sk);
12697d06b2e0SBrian Haley 	void			(*destroy)(struct sock *sk);
12701da177e4SLinus Torvalds 	void			(*shutdown)(struct sock *sk, int how);
12711da177e4SLinus Torvalds 	int			(*setsockopt)(struct sock *sk, int level,
1272a7b75c5aSChristoph Hellwig 					int optname, sockptr_t optval,
1273b7058842SDavid S. Miller 					unsigned int optlen);
12741da177e4SLinus Torvalds 	int			(*getsockopt)(struct sock *sk, int level,
12751da177e4SLinus Torvalds 					int optname, char __user *optval,
12761da177e4SLinus Torvalds 					int __user *option);
12774b9d07a4SUrsula Braun 	void			(*keepalive)(struct sock *sk, int valbool);
1278af01d537SAlexey Dobriyan #ifdef CONFIG_COMPAT
1279709b46e8SEric W. Biederman 	int			(*compat_ioctl)(struct sock *sk,
1280709b46e8SEric W. Biederman 					unsigned int cmd, unsigned long arg);
1281af01d537SAlexey Dobriyan #endif
12821b784140SYing Xue 	int			(*sendmsg)(struct sock *sk, struct msghdr *msg,
12831b784140SYing Xue 					   size_t len);
12841b784140SYing Xue 	int			(*recvmsg)(struct sock *sk, struct msghdr *msg,
1285ec095263SOliver Hartkopp 					   size_t len, int flags, int *addr_len);
12862bfc6685SDavid Howells 	void			(*splice_eof)(struct socket *sock);
12871da177e4SLinus Torvalds 	int			(*bind)(struct sock *sk,
1288c0425a42SChristoph Hellwig 					struct sockaddr *addr, int addr_len);
1289c0425a42SChristoph Hellwig 	int			(*bind_add)(struct sock *sk,
1290c0425a42SChristoph Hellwig 					struct sockaddr *addr, int addr_len);
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds 	int			(*backlog_rcv) (struct sock *sk,
12931da177e4SLinus Torvalds 						struct sk_buff *skb);
12949cacf81fSStanislav Fomichev 	bool			(*bpf_bypass_getsockopt)(int level,
12959cacf81fSStanislav Fomichev 							 int optname);
12961da177e4SLinus Torvalds 
129746d3ceabSEric Dumazet 	void		(*release_cb)(struct sock *sk);
129846d3ceabSEric Dumazet 
12991da177e4SLinus Torvalds 	/* Keeping track of sk's, looking them up, and port selection methods. */
1300086c653fSCraig Gallek 	int			(*hash)(struct sock *sk);
13011da177e4SLinus Torvalds 	void			(*unhash)(struct sock *sk);
1302719f8358SEric Dumazet 	void			(*rehash)(struct sock *sk);
13031da177e4SLinus Torvalds 	int			(*get_port)(struct sock *sk, unsigned short snum);
130491a760b2SMenglong Dong 	void			(*put_port)(struct sock *sk);
13058a59f9d1SCong Wang #ifdef CONFIG_BPF_SYSCALL
130651e0158aSCong Wang 	int			(*psock_update_sk_prot)(struct sock *sk,
130751e0158aSCong Wang 							struct sk_psock *psock,
130851e0158aSCong Wang 							bool restore);
13098a59f9d1SCong Wang #endif
13101da177e4SLinus Torvalds 
1311286ab3d4SEric Dumazet 	/* Keeping track of sockets in use */
131265f76517SEric Dumazet #ifdef CONFIG_PROC_FS
131313ff3d6fSPavel Emelyanov 	unsigned int		inuse_idx;
131465f76517SEric Dumazet #endif
1315ebb53d75SArnaldo Carvalho de Melo 
13166c302e79SEric Dumazet #if IS_ENABLED(CONFIG_MPTCP)
1317292e6077SPaolo Abeni 	int			(*forward_alloc_get)(const struct sock *sk);
13186c302e79SEric Dumazet #endif
1319292e6077SPaolo Abeni 
1320a74f0fa0SEric Dumazet 	bool			(*stream_memory_free)(const struct sock *sk, int wake);
13217b50ecfcSCong Wang 	bool			(*sock_is_readable)(struct sock *sk);
13221da177e4SLinus Torvalds 	/* Memory pressure */
13235c52ba17SPavel Emelyanov 	void			(*enter_memory_pressure)(struct sock *sk);
132406044751SEric Dumazet 	void			(*leave_memory_pressure)(struct sock *sk);
13258d987e5cSEric Dumazet 	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
13260defbb0aSEric Dumazet 	int  __percpu		*per_cpu_fw_alloc;
13271748376bSEric Dumazet 	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
1328292e6077SPaolo Abeni 
13291da177e4SLinus Torvalds 	/*
13301da177e4SLinus Torvalds 	 * Pressure flag: try to collapse.
13311da177e4SLinus Torvalds 	 * Technical note: it is used by multiple contexts non atomically.
133276f33296SEric Dumazet 	 * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
13333ab224beSHideo Aoki 	 * All the __sk_mem_schedule() is of this nature: accounting
13341da177e4SLinus Torvalds 	 * is strict, actions are advisory and have some latency.
13351da177e4SLinus Torvalds 	 */
133606044751SEric Dumazet 	unsigned long		*memory_pressure;
13378d987e5cSEric Dumazet 	long			*sysctl_mem;
1338a3dcaf17SEric Dumazet 
13391da177e4SLinus Torvalds 	int			*sysctl_wmem;
13401da177e4SLinus Torvalds 	int			*sysctl_rmem;
1341a3dcaf17SEric Dumazet 	u32			sysctl_wmem_offset;
1342a3dcaf17SEric Dumazet 	u32			sysctl_rmem_offset;
1343a3dcaf17SEric Dumazet 
13441da177e4SLinus Torvalds 	int			max_header;
13457ba42910SChangli Gao 	bool			no_autobind;
13461da177e4SLinus Torvalds 
1347e18b890bSChristoph Lameter 	struct kmem_cache	*slab;
13481da177e4SLinus Torvalds 	unsigned int		obj_size;
1349f5f80e32SEric Dumazet 	unsigned int		ipv6_pinfo_offset;
1350d50112edSAlexey Dobriyan 	slab_flags_t		slab_flags;
13517bbdb81eSAlexey Dobriyan 	unsigned int		useroffset;	/* Usercopy region offset */
13527bbdb81eSAlexey Dobriyan 	unsigned int		usersize;	/* Usercopy region size */
13531da177e4SLinus Torvalds 
135419757cebSEric Dumazet 	unsigned int __percpu	*orphan_count;
13558feaf0c0SArnaldo Carvalho de Melo 
135660236fddSArnaldo Carvalho de Melo 	struct request_sock_ops	*rsk_prot;
13576d6ee43eSArnaldo Carvalho de Melo 	struct timewait_sock_ops *twsk_prot;
13582e6599cbSArnaldo Carvalho de Melo 
135939d8cda7SPavel Emelyanov 	union {
1360ab1e0a13SArnaldo Carvalho de Melo 		struct inet_hashinfo	*hashinfo;
1361645ca708SEric Dumazet 		struct udp_table	*udp_table;
1362fc8717baSPavel Emelyanov 		struct raw_hashinfo	*raw_hash;
1363f16a7dd5SUrsula Braun 		struct smc_hashinfo	*smc_hash;
136439d8cda7SPavel Emelyanov 	} h;
1365ab1e0a13SArnaldo Carvalho de Melo 
13661da177e4SLinus Torvalds 	struct module		*owner;
13671da177e4SLinus Torvalds 
13681da177e4SLinus Torvalds 	char			name[32];
13691da177e4SLinus Torvalds 
13701da177e4SLinus Torvalds 	struct list_head	node;
137164be0aedSLorenzo Colitti 	int			(*diag_destroy)(struct sock *sk, int err);
13723859a271SKees Cook } __randomize_layout;
1373e1aab161SGlauber Costa 
137469336bd2SJoe Perches int proto_register(struct proto *prot, int alloc_slab);
137569336bd2SJoe Perches void proto_unregister(struct proto *prot);
1376bf2ae2e4SXin Long int sock_load_diag_module(int family, int protocol);
13771da177e4SLinus Torvalds 
13781c5f2cedSEric Dumazet INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
13791c5f2cedSEric Dumazet 
sk_forward_alloc_get(const struct sock * sk)1380292e6077SPaolo Abeni static inline int sk_forward_alloc_get(const struct sock *sk)
1381292e6077SPaolo Abeni {
13826c302e79SEric Dumazet #if IS_ENABLED(CONFIG_MPTCP)
13836c302e79SEric Dumazet 	if (sk->sk_prot->forward_alloc_get)
1384292e6077SPaolo Abeni 		return sk->sk_prot->forward_alloc_get(sk);
13856c302e79SEric Dumazet #endif
13865e6300e7SEric Dumazet 	return READ_ONCE(sk->sk_forward_alloc);
1387292e6077SPaolo Abeni }
1388292e6077SPaolo Abeni 
__sk_stream_memory_free(const struct sock * sk,int wake)1389a74f0fa0SEric Dumazet static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1390c9bee3b7SEric Dumazet {
1391ab4e846aSEric Dumazet 	if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1392c9bee3b7SEric Dumazet 		return false;
1393c9bee3b7SEric Dumazet 
1394c9bee3b7SEric Dumazet 	return sk->sk_prot->stream_memory_free ?
1395a406290aSEric Dumazet 		INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free,
1396a406290aSEric Dumazet 				     tcp_stream_memory_free, sk, wake) : true;
1397a74f0fa0SEric Dumazet }
1398a74f0fa0SEric Dumazet 
sk_stream_memory_free(const struct sock * sk)1399a74f0fa0SEric Dumazet static inline bool sk_stream_memory_free(const struct sock *sk)
1400a74f0fa0SEric Dumazet {
1401a74f0fa0SEric Dumazet 	return __sk_stream_memory_free(sk, 0);
1402a74f0fa0SEric Dumazet }
1403a74f0fa0SEric Dumazet 
__sk_stream_is_writeable(const struct sock * sk,int wake)1404a74f0fa0SEric Dumazet static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1405a74f0fa0SEric Dumazet {
1406a74f0fa0SEric Dumazet 	return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1407a74f0fa0SEric Dumazet 	       __sk_stream_memory_free(sk, wake);
1408c9bee3b7SEric Dumazet }
1409c9bee3b7SEric Dumazet 
sk_stream_is_writeable(const struct sock * sk)141064dc6130SEric Dumazet static inline bool sk_stream_is_writeable(const struct sock *sk)
141164dc6130SEric Dumazet {
1412a74f0fa0SEric Dumazet 	return __sk_stream_is_writeable(sk, 0);
141364dc6130SEric Dumazet }
1414e1aab161SGlauber Costa 
sk_under_cgroup_hierarchy(struct sock * sk,struct cgroup * ancestor)141554fd9c2dSDaniel Borkmann static inline int sk_under_cgroup_hierarchy(struct sock *sk,
141654fd9c2dSDaniel Borkmann 					    struct cgroup *ancestor)
141754fd9c2dSDaniel Borkmann {
141854fd9c2dSDaniel Borkmann #ifdef CONFIG_SOCK_CGROUP_DATA
141954fd9c2dSDaniel Borkmann 	return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
142054fd9c2dSDaniel Borkmann 				    ancestor);
142154fd9c2dSDaniel Borkmann #else
142254fd9c2dSDaniel Borkmann 	return -ENOTSUPP;
142354fd9c2dSDaniel Borkmann #endif
142454fd9c2dSDaniel Borkmann }
1425c9bee3b7SEric Dumazet 
sk_has_memory_pressure(const struct sock * sk)1426180d8cd9SGlauber Costa static inline bool sk_has_memory_pressure(const struct sock *sk)
1427180d8cd9SGlauber Costa {
1428180d8cd9SGlauber Costa 	return sk->sk_prot->memory_pressure != NULL;
1429180d8cd9SGlauber Costa }
1430180d8cd9SGlauber Costa 
sk_under_global_memory_pressure(const struct sock * sk)14312d0c88e8SAbel Wu static inline bool sk_under_global_memory_pressure(const struct sock *sk)
14322d0c88e8SAbel Wu {
14332d0c88e8SAbel Wu 	return sk->sk_prot->memory_pressure &&
143476f33296SEric Dumazet 		!!READ_ONCE(*sk->sk_prot->memory_pressure);
14352d0c88e8SAbel Wu }
14362d0c88e8SAbel Wu 
sk_under_memory_pressure(const struct sock * sk)1437180d8cd9SGlauber Costa static inline bool sk_under_memory_pressure(const struct sock *sk)
1438180d8cd9SGlauber Costa {
1439180d8cd9SGlauber Costa 	if (!sk->sk_prot->memory_pressure)
1440180d8cd9SGlauber Costa 		return false;
1441e1aab161SGlauber Costa 
1442baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1443baac50bbSJohannes Weiner 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
1444e805605cSJohannes Weiner 		return true;
1445e1aab161SGlauber Costa 
144676f33296SEric Dumazet 	return !!READ_ONCE(*sk->sk_prot->memory_pressure);
1447180d8cd9SGlauber Costa }
1448180d8cd9SGlauber Costa 
1449180d8cd9SGlauber Costa static inline long
proto_memory_allocated(const struct proto * prot)14503cd3399dSEric Dumazet proto_memory_allocated(const struct proto *prot)
14513cd3399dSEric Dumazet {
14523cd3399dSEric Dumazet 	return max(0L, atomic_long_read(prot->memory_allocated));
14533cd3399dSEric Dumazet }
14543cd3399dSEric Dumazet 
14553cd3399dSEric Dumazet static inline long
sk_memory_allocated(const struct sock * sk)1456180d8cd9SGlauber Costa sk_memory_allocated(const struct sock *sk)
1457180d8cd9SGlauber Costa {
14583cd3399dSEric Dumazet 	return proto_memory_allocated(sk->sk_prot);
1459180d8cd9SGlauber Costa }
1460180d8cd9SGlauber Costa 
14613cd3399dSEric Dumazet /* 1 MB per cpu, in page units */
14623cd3399dSEric Dumazet #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
1463fe1e8381SAdam Li extern int sysctl_mem_pcpu_rsv;
14643cd3399dSEric Dumazet 
proto_memory_pcpu_drain(struct proto * proto)14655e53816dSEric Dumazet static inline void proto_memory_pcpu_drain(struct proto *proto)
1466180d8cd9SGlauber Costa {
14675e53816dSEric Dumazet 	int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
14683cd3399dSEric Dumazet 
14695e53816dSEric Dumazet 	if (val)
14705e53816dSEric Dumazet 		atomic_long_add(val, proto->memory_allocated);
1471180d8cd9SGlauber Costa }
1472180d8cd9SGlauber Costa 
1473180d8cd9SGlauber Costa static inline void
sk_memory_allocated_add(const struct sock * sk,int val)14745e53816dSEric Dumazet sk_memory_allocated_add(const struct sock *sk, int val)
1475180d8cd9SGlauber Costa {
14765e53816dSEric Dumazet 	struct proto *proto = sk->sk_prot;
14773cd3399dSEric Dumazet 
14785e53816dSEric Dumazet 	val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
14795e53816dSEric Dumazet 
14805e53816dSEric Dumazet 	if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
14815e53816dSEric Dumazet 		proto_memory_pcpu_drain(proto);
14823cd3399dSEric Dumazet }
14835e53816dSEric Dumazet 
14845e53816dSEric Dumazet static inline void
sk_memory_allocated_sub(const struct sock * sk,int val)14855e53816dSEric Dumazet sk_memory_allocated_sub(const struct sock *sk, int val)
14865e53816dSEric Dumazet {
14875e53816dSEric Dumazet 	struct proto *proto = sk->sk_prot;
14885e53816dSEric Dumazet 
14895e53816dSEric Dumazet 	val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
14905e53816dSEric Dumazet 
14915e53816dSEric Dumazet 	if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
14925e53816dSEric Dumazet 		proto_memory_pcpu_drain(proto);
1493180d8cd9SGlauber Costa }
1494180d8cd9SGlauber Costa 
1495f5a5589cSWei Wang #define SK_ALLOC_PERCPU_COUNTER_BATCH 16
1496f5a5589cSWei Wang 
sk_sockets_allocated_dec(struct sock * sk)1497180d8cd9SGlauber Costa static inline void sk_sockets_allocated_dec(struct sock *sk)
1498180d8cd9SGlauber Costa {
1499f5a5589cSWei Wang 	percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1,
1500f5a5589cSWei Wang 				 SK_ALLOC_PERCPU_COUNTER_BATCH);
1501180d8cd9SGlauber Costa }
1502180d8cd9SGlauber Costa 
sk_sockets_allocated_inc(struct sock * sk)1503180d8cd9SGlauber Costa static inline void sk_sockets_allocated_inc(struct sock *sk)
1504180d8cd9SGlauber Costa {
1505f5a5589cSWei Wang 	percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1,
1506f5a5589cSWei Wang 				 SK_ALLOC_PERCPU_COUNTER_BATCH);
1507180d8cd9SGlauber Costa }
1508180d8cd9SGlauber Costa 
15095bf325a5SEric Dumazet static inline u64
sk_sockets_allocated_read_positive(struct sock * sk)1510180d8cd9SGlauber Costa sk_sockets_allocated_read_positive(struct sock *sk)
1511180d8cd9SGlauber Costa {
1512af95d7dfSJohannes Weiner 	return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1513180d8cd9SGlauber Costa }
1514180d8cd9SGlauber Costa 
1515180d8cd9SGlauber Costa static inline int
proto_sockets_allocated_sum_positive(struct proto * prot)1516180d8cd9SGlauber Costa proto_sockets_allocated_sum_positive(struct proto *prot)
1517180d8cd9SGlauber Costa {
1518180d8cd9SGlauber Costa 	return percpu_counter_sum_positive(prot->sockets_allocated);
1519180d8cd9SGlauber Costa }
1520180d8cd9SGlauber Costa 
1521180d8cd9SGlauber Costa static inline bool
proto_memory_pressure(struct proto * prot)1522180d8cd9SGlauber Costa proto_memory_pressure(struct proto *prot)
1523180d8cd9SGlauber Costa {
1524180d8cd9SGlauber Costa 	if (!prot->memory_pressure)
1525180d8cd9SGlauber Costa 		return false;
152676f33296SEric Dumazet 	return !!READ_ONCE(*prot->memory_pressure);
1527180d8cd9SGlauber Costa }
1528180d8cd9SGlauber Costa 
152965f76517SEric Dumazet 
153065f76517SEric Dumazet #ifdef CONFIG_PROC_FS
15312a12ae5dSEric Dumazet #define PROTO_INUSE_NR	64	/* should be enough for the first time */
15322a12ae5dSEric Dumazet struct prot_inuse {
15334199bae1SEric Dumazet 	int all;
15342a12ae5dSEric Dumazet 	int val[PROTO_INUSE_NR];
15352a12ae5dSEric Dumazet };
1536b3cb764aSEric Dumazet 
sock_prot_inuse_add(const struct net * net,const struct proto * prot,int val)15372a12ae5dSEric Dumazet static inline void sock_prot_inuse_add(const struct net *net,
15382a12ae5dSEric Dumazet 				       const struct proto *prot, int val)
15392a12ae5dSEric Dumazet {
1540b3cb764aSEric Dumazet 	this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
15412a12ae5dSEric Dumazet }
1542d477eb90SEric Dumazet 
sock_inuse_add(const struct net * net,int val)1543d477eb90SEric Dumazet static inline void sock_inuse_add(const struct net *net, int val)
1544d477eb90SEric Dumazet {
15454199bae1SEric Dumazet 	this_cpu_add(net->core.prot_inuse->all, val);
1546d477eb90SEric Dumazet }
1547d477eb90SEric Dumazet 
154869336bd2SJoe Perches int sock_prot_inuse_get(struct net *net, struct proto *proto);
1549648845abSTonghao Zhang int sock_inuse_get(struct net *net);
155065f76517SEric Dumazet #else
sock_prot_inuse_add(const struct net * net,const struct proto * prot,int val)15512a12ae5dSEric Dumazet static inline void sock_prot_inuse_add(const struct net *net,
15522a12ae5dSEric Dumazet 				       const struct proto *prot, int val)
155365f76517SEric Dumazet {
155465f76517SEric Dumazet }
1555d477eb90SEric Dumazet 
sock_inuse_add(const struct net * net,int val)1556d477eb90SEric Dumazet static inline void sock_inuse_add(const struct net *net, int val)
1557d477eb90SEric Dumazet {
1558d477eb90SEric Dumazet }
155965f76517SEric Dumazet #endif
156065f76517SEric Dumazet 
15611da177e4SLinus Torvalds 
1562614c6cb4SArnaldo Carvalho de Melo /* With per-bucket locks this operation is not-atomic, so that
1563614c6cb4SArnaldo Carvalho de Melo  * this version is not worse.
1564614c6cb4SArnaldo Carvalho de Melo  */
__sk_prot_rehash(struct sock * sk)1565086c653fSCraig Gallek static inline int __sk_prot_rehash(struct sock *sk)
1566614c6cb4SArnaldo Carvalho de Melo {
1567614c6cb4SArnaldo Carvalho de Melo 	sk->sk_prot->unhash(sk);
1568086c653fSCraig Gallek 	return sk->sk_prot->hash(sk);
1569614c6cb4SArnaldo Carvalho de Melo }
1570614c6cb4SArnaldo Carvalho de Melo 
15711da177e4SLinus Torvalds /* About 10 seconds */
15721da177e4SLinus Torvalds #define SOCK_DESTROY_TIME (10*HZ)
15731da177e4SLinus Torvalds 
15741da177e4SLinus Torvalds /* Sockets 0-1023 can't be bound to unless you are superuser */
15751da177e4SLinus Torvalds #define PROT_SOCK	1024
15761da177e4SLinus Torvalds 
15771da177e4SLinus Torvalds #define SHUTDOWN_MASK	3
15781da177e4SLinus Torvalds #define RCV_SHUTDOWN	1
15791da177e4SLinus Torvalds #define SEND_SHUTDOWN	2
15801da177e4SLinus Torvalds 
15811da177e4SLinus Torvalds #define SOCK_BINDADDR_LOCK	4
15821da177e4SLinus Torvalds #define SOCK_BINDPORT_LOCK	8
15831da177e4SLinus Torvalds 
15841da177e4SLinus Torvalds struct socket_alloc {
15851da177e4SLinus Torvalds 	struct socket socket;
15861da177e4SLinus Torvalds 	struct inode vfs_inode;
15871da177e4SLinus Torvalds };
15881da177e4SLinus Torvalds 
SOCKET_I(struct inode * inode)15891da177e4SLinus Torvalds static inline struct socket *SOCKET_I(struct inode *inode)
15901da177e4SLinus Torvalds {
15911da177e4SLinus Torvalds 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
15921da177e4SLinus Torvalds }
15931da177e4SLinus Torvalds 
SOCK_INODE(struct socket * socket)15941da177e4SLinus Torvalds static inline struct inode *SOCK_INODE(struct socket *socket)
15951da177e4SLinus Torvalds {
15961da177e4SLinus Torvalds 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
15971da177e4SLinus Torvalds }
15981da177e4SLinus Torvalds 
15993ab224beSHideo Aoki /*
16003ab224beSHideo Aoki  * Functions for memory accounting
16013ab224beSHideo Aoki  */
1602f8c3bf00SPaolo Abeni int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
160369336bd2SJoe Perches int __sk_mem_schedule(struct sock *sk, int size, int kind);
1604f8c3bf00SPaolo Abeni void __sk_mem_reduce_allocated(struct sock *sk, int amount);
16051a24e04eSEric Dumazet void __sk_mem_reclaim(struct sock *sk, int amount);
16061da177e4SLinus Torvalds 
16073ab224beSHideo Aoki #define SK_MEM_SEND	0
16083ab224beSHideo Aoki #define SK_MEM_RECV	1
16091da177e4SLinus Torvalds 
1610e70f3c70SEric Dumazet /* sysctl_mem values are in pages */
sk_prot_mem_limits(const struct sock * sk,int index)1611bd68a2a8SEric Dumazet static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1612bd68a2a8SEric Dumazet {
1613816cd168SJakub Kicinski 	return READ_ONCE(sk->sk_prot->sysctl_mem[index]);
1614bd68a2a8SEric Dumazet }
1615bd68a2a8SEric Dumazet 
sk_mem_pages(int amt)16163ab224beSHideo Aoki static inline int sk_mem_pages(int amt)
16171da177e4SLinus Torvalds {
1618100fdd1fSEric Dumazet 	return (amt + PAGE_SIZE - 1) >> PAGE_SHIFT;
16191da177e4SLinus Torvalds }
16201da177e4SLinus Torvalds 
sk_has_account(struct sock * sk)1621dc6b9b78SEric Dumazet static inline bool sk_has_account(struct sock *sk)
16221da177e4SLinus Torvalds {
16233ab224beSHideo Aoki 	/* return true if protocol supports memory accounting */
16243ab224beSHideo Aoki 	return !!sk->sk_prot->memory_allocated;
16251da177e4SLinus Torvalds }
16261da177e4SLinus Torvalds 
sk_wmem_schedule(struct sock * sk,int size)1627dc6b9b78SEric Dumazet static inline bool sk_wmem_schedule(struct sock *sk, int size)
16281da177e4SLinus Torvalds {
16297c80b038SEric Dumazet 	int delta;
16307c80b038SEric Dumazet 
16313ab224beSHideo Aoki 	if (!sk_has_account(sk))
1632dc6b9b78SEric Dumazet 		return true;
16337c80b038SEric Dumazet 	delta = size - sk->sk_forward_alloc;
16347c80b038SEric Dumazet 	return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND);
16353ab224beSHideo Aoki }
16363ab224beSHideo Aoki 
1637c76562b6SMel Gorman static inline bool
__sk_rmem_schedule(struct sock * sk,int size,bool pfmemalloc)1638*9dbc7e0eSCong Wang __sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc)
16393ab224beSHideo Aoki {
16407c80b038SEric Dumazet 	int delta;
16417c80b038SEric Dumazet 
16423ab224beSHideo Aoki 	if (!sk_has_account(sk))
1643dc6b9b78SEric Dumazet 		return true;
16447c80b038SEric Dumazet 	delta = size - sk->sk_forward_alloc;
16457c80b038SEric Dumazet 	return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
1646*9dbc7e0eSCong Wang 	       pfmemalloc;
1647*9dbc7e0eSCong Wang }
1648*9dbc7e0eSCong Wang 
1649*9dbc7e0eSCong Wang static inline bool
sk_rmem_schedule(struct sock * sk,struct sk_buff * skb,int size)1650*9dbc7e0eSCong Wang sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1651*9dbc7e0eSCong Wang {
1652*9dbc7e0eSCong Wang 	return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb));
16533ab224beSHideo Aoki }
16543ab224beSHideo Aoki 
sk_unused_reserved_mem(const struct sock * sk)16552bb2f5fbSWei Wang static inline int sk_unused_reserved_mem(const struct sock *sk)
16562bb2f5fbSWei Wang {
16572bb2f5fbSWei Wang 	int unused_mem;
16582bb2f5fbSWei Wang 
16592bb2f5fbSWei Wang 	if (likely(!sk->sk_reserved_mem))
16602bb2f5fbSWei Wang 		return 0;
16612bb2f5fbSWei Wang 
16622bb2f5fbSWei Wang 	unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
16632bb2f5fbSWei Wang 			atomic_read(&sk->sk_rmem_alloc);
16642bb2f5fbSWei Wang 
16652bb2f5fbSWei Wang 	return unused_mem > 0 ? unused_mem : 0;
16662bb2f5fbSWei Wang }
16672bb2f5fbSWei Wang 
sk_mem_reclaim(struct sock * sk)16683ab224beSHideo Aoki static inline void sk_mem_reclaim(struct sock *sk)
16693ab224beSHideo Aoki {
16702bb2f5fbSWei Wang 	int reclaimable;
16712bb2f5fbSWei Wang 
16723ab224beSHideo Aoki 	if (!sk_has_account(sk))
16733ab224beSHideo Aoki 		return;
16742bb2f5fbSWei Wang 
16752bb2f5fbSWei Wang 	reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
16762bb2f5fbSWei Wang 
1677100fdd1fSEric Dumazet 	if (reclaimable >= (int)PAGE_SIZE)
16782bb2f5fbSWei Wang 		__sk_mem_reclaim(sk, reclaimable);
16792bb2f5fbSWei Wang }
16802bb2f5fbSWei Wang 
sk_mem_reclaim_final(struct sock * sk)16812bb2f5fbSWei Wang static inline void sk_mem_reclaim_final(struct sock *sk)
16822bb2f5fbSWei Wang {
16832bb2f5fbSWei Wang 	sk->sk_reserved_mem = 0;
16842bb2f5fbSWei Wang 	sk_mem_reclaim(sk);
16853ab224beSHideo Aoki }
16863ab224beSHideo Aoki 
sk_mem_charge(struct sock * sk,int size)16873ab224beSHideo Aoki static inline void sk_mem_charge(struct sock *sk, int size)
16883ab224beSHideo Aoki {
16893ab224beSHideo Aoki 	if (!sk_has_account(sk))
16903ab224beSHideo Aoki 		return;
16915e6300e7SEric Dumazet 	sk_forward_alloc_add(sk, -size);
16923ab224beSHideo Aoki }
16933ab224beSHideo Aoki 
sk_mem_uncharge(struct sock * sk,int size)16943ab224beSHideo Aoki static inline void sk_mem_uncharge(struct sock *sk, int size)
16953ab224beSHideo Aoki {
16963ab224beSHideo Aoki 	if (!sk_has_account(sk))
16973ab224beSHideo Aoki 		return;
16985e6300e7SEric Dumazet 	sk_forward_alloc_add(sk, size);
16994890b686SEric Dumazet 	sk_mem_reclaim(sk);
17003ab224beSHideo Aoki }
17013ab224beSHideo Aoki 
1702ed07536eSPeter Zijlstra /*
1703ed07536eSPeter Zijlstra  * Macro so as to not evaluate some arguments when
1704ed07536eSPeter Zijlstra  * lockdep is not enabled.
1705ed07536eSPeter Zijlstra  *
1706ed07536eSPeter Zijlstra  * Mark both the sk_lock and the sk_lock.slock as a
1707ed07536eSPeter Zijlstra  * per-address-family lock class.
1708ed07536eSPeter Zijlstra  */
1709ed07536eSPeter Zijlstra #define sock_lock_init_class_and_name(sk, sname, skey, name, key)	\
1710ed07536eSPeter Zijlstra do {									\
1711d2e9117cSJohn Heffner 	sk->sk_lock.owned = 0;						\
1712ed07536eSPeter Zijlstra 	init_waitqueue_head(&sk->sk_lock.wq);				\
1713ed07536eSPeter Zijlstra 	spin_lock_init(&(sk)->sk_lock.slock);				\
1714ed07536eSPeter Zijlstra 	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
1715ed07536eSPeter Zijlstra 			sizeof((sk)->sk_lock));				\
1716ed07536eSPeter Zijlstra 	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
1717ed07536eSPeter Zijlstra 				(skey), (sname));				\
1718ed07536eSPeter Zijlstra 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
1719ed07536eSPeter Zijlstra } while (0)
1720ed07536eSPeter Zijlstra 
lockdep_sock_is_held(const struct sock * sk)172105b93801SMatthew Wilcox static inline bool lockdep_sock_is_held(const struct sock *sk)
17221e1d04e6SHannes Frederic Sowa {
17231e1d04e6SHannes Frederic Sowa 	return lockdep_is_held(&sk->sk_lock) ||
17241e1d04e6SHannes Frederic Sowa 	       lockdep_is_held(&sk->sk_lock.slock);
17251e1d04e6SHannes Frederic Sowa }
17261e1d04e6SHannes Frederic Sowa 
172769336bd2SJoe Perches void lock_sock_nested(struct sock *sk, int subclass);
1728fcc70d5fSPeter Zijlstra 
lock_sock(struct sock * sk)1729fcc70d5fSPeter Zijlstra static inline void lock_sock(struct sock *sk)
1730fcc70d5fSPeter Zijlstra {
1731fcc70d5fSPeter Zijlstra 	lock_sock_nested(sk, 0);
1732fcc70d5fSPeter Zijlstra }
1733fcc70d5fSPeter Zijlstra 
1734ad80b0fcSPaolo Abeni void __lock_sock(struct sock *sk);
17358873c064SEric Dumazet void __release_sock(struct sock *sk);
173669336bd2SJoe Perches void release_sock(struct sock *sk);
17371da177e4SLinus Torvalds 
17381da177e4SLinus Torvalds /* BH context may only use the following locking interface. */
17391da177e4SLinus Torvalds #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
1740c6366184SIngo Molnar #define bh_lock_sock_nested(__sk) \
1741c6366184SIngo Molnar 				spin_lock_nested(&((__sk)->sk_lock.slock), \
1742c6366184SIngo Molnar 				SINGLE_DEPTH_NESTING)
17431da177e4SLinus Torvalds #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
17441da177e4SLinus Torvalds 
174549054556SPaolo Abeni bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
174649054556SPaolo Abeni 
174749054556SPaolo Abeni /**
174849054556SPaolo Abeni  * lock_sock_fast - fast version of lock_sock
174949054556SPaolo Abeni  * @sk: socket
175049054556SPaolo Abeni  *
175149054556SPaolo Abeni  * This version should be used for very small section, where process wont block
175249054556SPaolo Abeni  * return false if fast path is taken:
175349054556SPaolo Abeni  *
175449054556SPaolo Abeni  *   sk_lock.slock locked, owned = 0, BH disabled
175549054556SPaolo Abeni  *
175649054556SPaolo Abeni  * return true if slow path is taken:
175749054556SPaolo Abeni  *
175849054556SPaolo Abeni  *   sk_lock.slock unlocked, owned = 1, BH enabled
175949054556SPaolo Abeni  */
lock_sock_fast(struct sock * sk)176049054556SPaolo Abeni static inline bool lock_sock_fast(struct sock *sk)
176149054556SPaolo Abeni {
176249054556SPaolo Abeni 	/* The sk_lock has mutex_lock() semantics here. */
176349054556SPaolo Abeni 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
176449054556SPaolo Abeni 
176549054556SPaolo Abeni 	return __lock_sock_fast(sk);
176649054556SPaolo Abeni }
176749054556SPaolo Abeni 
176849054556SPaolo Abeni /* fast socket lock variant for caller already holding a [different] socket lock */
lock_sock_fast_nested(struct sock * sk)176949054556SPaolo Abeni static inline bool lock_sock_fast_nested(struct sock *sk)
177049054556SPaolo Abeni {
177149054556SPaolo Abeni 	mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
177249054556SPaolo Abeni 
177349054556SPaolo Abeni 	return __lock_sock_fast(sk);
177449054556SPaolo Abeni }
177512f4bd86SPaolo Abeni 
17768a74ad60SEric Dumazet /**
17778a74ad60SEric Dumazet  * unlock_sock_fast - complement of lock_sock_fast
17788a74ad60SEric Dumazet  * @sk: socket
17798a74ad60SEric Dumazet  * @slow: slow mode
17808a74ad60SEric Dumazet  *
17818a74ad60SEric Dumazet  * fast unlock socket for user context.
17828a74ad60SEric Dumazet  * If slow mode is on, we call regular release_sock()
17838a74ad60SEric Dumazet  */
unlock_sock_fast(struct sock * sk,bool slow)17848a74ad60SEric Dumazet static inline void unlock_sock_fast(struct sock *sk, bool slow)
178512f4bd86SPaolo Abeni 	__releases(&sk->sk_lock.slock)
17864b0b72f7SEric Dumazet {
178712f4bd86SPaolo Abeni 	if (slow) {
17888a74ad60SEric Dumazet 		release_sock(sk);
178912f4bd86SPaolo Abeni 		__release(&sk->sk_lock.slock);
179012f4bd86SPaolo Abeni 	} else {
17912dcb96baSThomas Gleixner 		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
17924b0b72f7SEric Dumazet 		spin_unlock_bh(&sk->sk_lock.slock);
17934b0b72f7SEric Dumazet 	}
179412f4bd86SPaolo Abeni }
17954b0b72f7SEric Dumazet 
179624426654SMartin KaFai Lau void sockopt_lock_sock(struct sock *sk);
179724426654SMartin KaFai Lau void sockopt_release_sock(struct sock *sk);
1798e42c7beeSMartin KaFai Lau bool sockopt_ns_capable(struct user_namespace *ns, int cap);
1799e42c7beeSMartin KaFai Lau bool sockopt_capable(int cap);
180024426654SMartin KaFai Lau 
1801fafc4e1eSHannes Frederic Sowa /* Used by processes to "lock" a socket state, so that
1802fafc4e1eSHannes Frederic Sowa  * interrupts and bottom half handlers won't change it
1803fafc4e1eSHannes Frederic Sowa  * from under us. It essentially blocks any incoming
1804fafc4e1eSHannes Frederic Sowa  * packets, so that we won't get any new data or any
1805fafc4e1eSHannes Frederic Sowa  * packets that change the state of the socket.
1806fafc4e1eSHannes Frederic Sowa  *
1807fafc4e1eSHannes Frederic Sowa  * While locked, BH processing will add new packets to
1808fafc4e1eSHannes Frederic Sowa  * the backlog queue.  This queue is processed by the
1809fafc4e1eSHannes Frederic Sowa  * owner of the socket lock right before it is released.
1810fafc4e1eSHannes Frederic Sowa  *
1811fafc4e1eSHannes Frederic Sowa  * Since ~2.3.5 it is also exclusive sleep lock serializing
1812fafc4e1eSHannes Frederic Sowa  * accesses from user process context.
1813fafc4e1eSHannes Frederic Sowa  */
1814fafc4e1eSHannes Frederic Sowa 
sock_owned_by_me(const struct sock * sk)181546cc6e49SEric Dumazet static inline void sock_owned_by_me(const struct sock *sk)
1816fafc4e1eSHannes Frederic Sowa {
1817fafc4e1eSHannes Frederic Sowa #ifdef CONFIG_LOCKDEP
18185e91f6ceSEric Dumazet 	WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1819fafc4e1eSHannes Frederic Sowa #endif
182046cc6e49SEric Dumazet }
182146cc6e49SEric Dumazet 
sock_not_owned_by_me(const struct sock * sk)1822c1ae4d1eSEric Dumazet static inline void sock_not_owned_by_me(const struct sock *sk)
1823c1ae4d1eSEric Dumazet {
1824c1ae4d1eSEric Dumazet #ifdef CONFIG_LOCKDEP
1825c1ae4d1eSEric Dumazet 	WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks);
1826c1ae4d1eSEric Dumazet #endif
1827c1ae4d1eSEric Dumazet }
1828c1ae4d1eSEric Dumazet 
sock_owned_by_user(const struct sock * sk)182946cc6e49SEric Dumazet static inline bool sock_owned_by_user(const struct sock *sk)
183046cc6e49SEric Dumazet {
183146cc6e49SEric Dumazet 	sock_owned_by_me(sk);
1832fafc4e1eSHannes Frederic Sowa 	return sk->sk_lock.owned;
1833fafc4e1eSHannes Frederic Sowa }
1834fafc4e1eSHannes Frederic Sowa 
sock_owned_by_user_nocheck(const struct sock * sk)1835602f7a27STom Herbert static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1836602f7a27STom Herbert {
1837602f7a27STom Herbert 	return sk->sk_lock.owned;
1838602f7a27STom Herbert }
1839602f7a27STom Herbert 
sock_release_ownership(struct sock * sk)184033d60fbdSKuniyuki Iwashima static inline void sock_release_ownership(struct sock *sk)
184133d60fbdSKuniyuki Iwashima {
184233d60fbdSKuniyuki Iwashima 	if (sock_owned_by_user_nocheck(sk)) {
184333d60fbdSKuniyuki Iwashima 		sk->sk_lock.owned = 0;
184433d60fbdSKuniyuki Iwashima 
184533d60fbdSKuniyuki Iwashima 		/* The sk_lock has mutex_unlock() semantics: */
184633d60fbdSKuniyuki Iwashima 		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
184733d60fbdSKuniyuki Iwashima 	}
184833d60fbdSKuniyuki Iwashima }
184933d60fbdSKuniyuki Iwashima 
1850fafc4e1eSHannes Frederic Sowa /* no reclassification while locks are held */
sock_allow_reclassification(const struct sock * csk)1851fafc4e1eSHannes Frederic Sowa static inline bool sock_allow_reclassification(const struct sock *csk)
1852fafc4e1eSHannes Frederic Sowa {
1853fafc4e1eSHannes Frederic Sowa 	struct sock *sk = (struct sock *)csk;
1854fafc4e1eSHannes Frederic Sowa 
185533d60fbdSKuniyuki Iwashima 	return !sock_owned_by_user_nocheck(sk) &&
185633d60fbdSKuniyuki Iwashima 		!spin_is_locked(&sk->sk_lock.slock);
1857fafc4e1eSHannes Frederic Sowa }
18588a74ad60SEric Dumazet 
185969336bd2SJoe Perches struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
186011aa9c28SEric W. Biederman 		      struct proto *prot, int kern);
186169336bd2SJoe Perches void sk_free(struct sock *sk);
1862eb4cb008SCraig Gallek void sk_destruct(struct sock *sk);
186369336bd2SJoe Perches struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
186494352d45SArnaldo Carvalho de Melo void sk_free_unlock_clone(struct sock *sk);
18651da177e4SLinus Torvalds 
186669336bd2SJoe Perches struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1867dd0fc66fSAl Viro 			     gfp_t priority);
18681d2077acSEric Dumazet void __sock_wfree(struct sk_buff *skb);
186969336bd2SJoe Perches void sock_wfree(struct sk_buff *skb);
187098ba0bd5SWillem de Bruijn struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
187198ba0bd5SWillem de Bruijn 			     gfp_t priority);
187269336bd2SJoe Perches void skb_orphan_partial(struct sk_buff *skb);
187369336bd2SJoe Perches void sock_rfree(struct sk_buff *skb);
187462bccb8cSAlexander Duyck void sock_efree(struct sk_buff *skb);
187582eabd9eSAlexander Duyck #ifdef CONFIG_INET
187669336bd2SJoe Perches void sock_edemux(struct sk_buff *skb);
1877cf7fbe66SJoe Stringer void sock_pfree(struct sk_buff *skb);
187882eabd9eSAlexander Duyck #else
1879158f323bSEric Dumazet #define sock_edemux sock_efree
188082eabd9eSAlexander Duyck #endif
18811da177e4SLinus Torvalds 
188229003875SMartin KaFai Lau int sk_setsockopt(struct sock *sk, int level, int optname,
188329003875SMartin KaFai Lau 		  sockptr_t optval, unsigned int optlen);
188469336bd2SJoe Perches int sock_setsockopt(struct socket *sock, int level, int op,
1885c8c1bbb6SChristoph Hellwig 		    sockptr_t optval, unsigned int optlen);
1886e88c16a4SBreno Leitao int do_sock_setsockopt(struct socket *sock, bool compat, int level,
1887e88c16a4SBreno Leitao 		       int optname, sockptr_t optval, int optlen);
18882174a3c3SBreno Leitao int do_sock_getsockopt(struct socket *sock, bool compat, int level,
18892174a3c3SBreno Leitao 		       int optname, sockptr_t optval, sockptr_t optlen);
18901da177e4SLinus Torvalds 
189165ddc82dSMartin KaFai Lau int sk_getsockopt(struct sock *sk, int level, int optname,
189265ddc82dSMartin KaFai Lau 		  sockptr_t optval, sockptr_t optlen);
1893c7cbdbf2SArnd Bergmann int sock_gettstamp(struct socket *sock, void __user *userstamp,
1894c7cbdbf2SArnd Bergmann 		   bool timeval, bool time32);
189569336bd2SJoe Perches struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
189669336bd2SJoe Perches 				     unsigned long data_len, int noblock,
189769336bd2SJoe Perches 				     int *errcode, int max_page_order);
1898de32bc6aSPavel Begunkov 
sock_alloc_send_skb(struct sock * sk,unsigned long size,int noblock,int * errcode)1899de32bc6aSPavel Begunkov static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk,
1900de32bc6aSPavel Begunkov 						  unsigned long size,
1901de32bc6aSPavel Begunkov 						  int noblock, int *errcode)
1902de32bc6aSPavel Begunkov {
1903de32bc6aSPavel Begunkov 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1904de32bc6aSPavel Begunkov }
1905de32bc6aSPavel Begunkov 
190669336bd2SJoe Perches void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
190769336bd2SJoe Perches void sock_kfree_s(struct sock *sk, void *mem, int size);
190879e88659SDaniel Borkmann void sock_kzfree_s(struct sock *sk, void *mem, int size);
190969336bd2SJoe Perches void sk_send_sigurg(struct sock *sk);
19101da177e4SLinus Torvalds 
sock_replace_proto(struct sock * sk,struct proto * proto)1911fee9ac06SPavel Begunkov static inline void sock_replace_proto(struct sock *sk, struct proto *proto)
1912fee9ac06SPavel Begunkov {
1913fee9ac06SPavel Begunkov 	if (sk->sk_socket)
1914fee9ac06SPavel Begunkov 		clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
1915fee9ac06SPavel Begunkov 	WRITE_ONCE(sk->sk_prot, proto);
1916fee9ac06SPavel Begunkov }
1917fee9ac06SPavel Begunkov 
1918f28ea365SEdward Jee struct sockcm_cookie {
191980b14deeSRichard Cochran 	u64 transmit_time;
1920f28ea365SEdward Jee 	u32 mark;
1921b534dc46SWillem de Bruijn 	u32 tsflags;
1922f28ea365SEdward Jee };
1923f28ea365SEdward Jee 
sockcm_init(struct sockcm_cookie * sockc,const struct sock * sk)1924657a0667SWillem de Bruijn static inline void sockcm_init(struct sockcm_cookie *sockc,
1925657a0667SWillem de Bruijn 			       const struct sock *sk)
1926657a0667SWillem de Bruijn {
1927e3390b30SEric Dumazet 	*sockc = (struct sockcm_cookie) {
1928e3390b30SEric Dumazet 		.tsflags = READ_ONCE(sk->sk_tsflags)
1929e3390b30SEric Dumazet 	};
1930657a0667SWillem de Bruijn }
1931657a0667SWillem de Bruijn 
1932233baf9aSxu xin int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
193339771b12SWillem de Bruijn 		     struct sockcm_cookie *sockc);
1934f28ea365SEdward Jee int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1935f28ea365SEdward Jee 		   struct sockcm_cookie *sockc);
1936f28ea365SEdward Jee 
19371da177e4SLinus Torvalds /*
19381da177e4SLinus Torvalds  * Functions to fill in entries in struct proto_ops when a protocol
19391da177e4SLinus Torvalds  * does not implement a particular function.
19401da177e4SLinus Torvalds  */
194169336bd2SJoe Perches int sock_no_bind(struct socket *, struct sockaddr *, int);
194269336bd2SJoe Perches int sock_no_connect(struct socket *, struct sockaddr *, int, int);
194369336bd2SJoe Perches int sock_no_socketpair(struct socket *, struct socket *);
1944cdfbabfbSDavid Howells int sock_no_accept(struct socket *, struct socket *, int, bool);
19459b2c45d4SDenys Vlasenko int sock_no_getname(struct socket *, struct sockaddr *, int);
194669336bd2SJoe Perches int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
194769336bd2SJoe Perches int sock_no_listen(struct socket *, int);
194869336bd2SJoe Perches int sock_no_shutdown(struct socket *, int);
19491b784140SYing Xue int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1950306b13ebSTom Herbert int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
19511b784140SYing Xue int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
195269336bd2SJoe Perches int sock_no_mmap(struct file *file, struct socket *sock,
19531da177e4SLinus Torvalds 		 struct vm_area_struct *vma);
19541da177e4SLinus Torvalds 
19551da177e4SLinus Torvalds /*
19561da177e4SLinus Torvalds  * Functions to fill in entries in struct proto_ops when a protocol
19571da177e4SLinus Torvalds  * uses the inet style.
19581da177e4SLinus Torvalds  */
195969336bd2SJoe Perches int sock_common_getsockopt(struct socket *sock, int level, int optname,
19601da177e4SLinus Torvalds 				  char __user *optval, int __user *optlen);
19611b784140SYing Xue int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
19621b784140SYing Xue 			int flags);
196369336bd2SJoe Perches int sock_common_setsockopt(struct socket *sock, int level, int optname,
1964a7b75c5aSChristoph Hellwig 			   sockptr_t optval, unsigned int optlen);
19651da177e4SLinus Torvalds 
196669336bd2SJoe Perches void sk_common_release(struct sock *sk);
19671da177e4SLinus Torvalds 
19681da177e4SLinus Torvalds /*
19691da177e4SLinus Torvalds  *	Default socket callbacks and setup code
19701da177e4SLinus Torvalds  */
19711da177e4SLinus Torvalds 
1972584f3742SPietro Borrello /* Initialise core socket variables using an explicit uid. */
1973584f3742SPietro Borrello void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
1974584f3742SPietro Borrello 
1975584f3742SPietro Borrello /* Initialise core socket variables.
1976584f3742SPietro Borrello  * Assumes struct socket *sock is embedded in a struct socket_alloc.
1977584f3742SPietro Borrello  */
197869336bd2SJoe Perches void sock_init_data(struct socket *sock, struct sock *sk);
19791da177e4SLinus Torvalds 
19801da177e4SLinus Torvalds /*
19811da177e4SLinus Torvalds  * Socket reference counting postulates.
19821da177e4SLinus Torvalds  *
19831da177e4SLinus Torvalds  * * Each user of socket SHOULD hold a reference count.
19841da177e4SLinus Torvalds  * * Each access point to socket (an hash table bucket, reference from a list,
19851da177e4SLinus Torvalds  *   running timer, skb in flight MUST hold a reference count.
19861da177e4SLinus Torvalds  * * When reference count hits 0, it means it will never increase back.
19871da177e4SLinus Torvalds  * * When reference count hits 0, it means that no references from
19881da177e4SLinus Torvalds  *   outside exist to this socket and current process on current CPU
19891da177e4SLinus Torvalds  *   is last user and may/should destroy this socket.
19901da177e4SLinus Torvalds  * * sk_free is called from any context: process, BH, IRQ. When
19911da177e4SLinus Torvalds  *   it is called, socket has no references from outside -> sk_free
19921da177e4SLinus Torvalds  *   may release descendant resources allocated by the socket, but
19931da177e4SLinus Torvalds  *   to the time when it is called, socket is NOT referenced by any
19941da177e4SLinus Torvalds  *   hash tables, lists etc.
19951da177e4SLinus Torvalds  * * Packets, delivered from outside (from network or from another process)
19961da177e4SLinus Torvalds  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
19971da177e4SLinus Torvalds  *   when they sit in queue. Otherwise, packets will leak to hole, when
19981da177e4SLinus Torvalds  *   socket is looked up by one cpu and unhasing is made by another CPU.
19991da177e4SLinus Torvalds  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
20001da177e4SLinus Torvalds  *   (leak to backlog). Packet socket does all the processing inside
20011da177e4SLinus Torvalds  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
20021da177e4SLinus Torvalds  *   use separate SMP lock, so that they are prone too.
20031da177e4SLinus Torvalds  */
20041da177e4SLinus Torvalds 
20051da177e4SLinus Torvalds /* Ungrab socket and destroy it, if it was the last reference. */
sock_put(struct sock * sk)20061da177e4SLinus Torvalds static inline void sock_put(struct sock *sk)
20071da177e4SLinus Torvalds {
200841c6d650SReshetova, Elena 	if (refcount_dec_and_test(&sk->sk_refcnt))
20091da177e4SLinus Torvalds 		sk_free(sk);
20101da177e4SLinus Torvalds }
201105dbc7b5SEric Dumazet /* Generic version of sock_put(), dealing with all sockets
201241b822c5SEric Dumazet  * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
201305dbc7b5SEric Dumazet  */
201405dbc7b5SEric Dumazet void sock_gen_put(struct sock *sk);
20151da177e4SLinus Torvalds 
20164f0c40d9SWillem de Bruijn int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
2017c3f24cfbSEric Dumazet 		     unsigned int trim_cap, bool refcounted);
sk_receive_skb(struct sock * sk,struct sk_buff * skb,const int nested)20184f0c40d9SWillem de Bruijn static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
20194f0c40d9SWillem de Bruijn 				 const int nested)
20204f0c40d9SWillem de Bruijn {
2021c3f24cfbSEric Dumazet 	return __sk_receive_skb(sk, skb, nested, 1, true);
20224f0c40d9SWillem de Bruijn }
202325995ff5SArnaldo Carvalho de Melo 
sk_tx_queue_set(struct sock * sk,int tx_queue)2024e022f0b4SKrishna Kumar static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
2025e022f0b4SKrishna Kumar {
2026755c31cdSAmritha Nambiar 	/* sk_tx_queue_mapping accept only upto a 16-bit value */
2027755c31cdSAmritha Nambiar 	if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
2028755c31cdSAmritha Nambiar 		return;
2029224f68c5SEric Dumazet 	/* Paired with READ_ONCE() in sk_tx_queue_get() and
2030224f68c5SEric Dumazet 	 * other WRITE_ONCE() because socket lock might be not held.
2031224f68c5SEric Dumazet 	 */
2032224f68c5SEric Dumazet 	WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
2033e022f0b4SKrishna Kumar }
2034e022f0b4SKrishna Kumar 
2035755c31cdSAmritha Nambiar #define NO_QUEUE_MAPPING	USHRT_MAX
2036755c31cdSAmritha Nambiar 
sk_tx_queue_clear(struct sock * sk)2037e022f0b4SKrishna Kumar static inline void sk_tx_queue_clear(struct sock *sk)
2038e022f0b4SKrishna Kumar {
2039224f68c5SEric Dumazet 	/* Paired with READ_ONCE() in sk_tx_queue_get() and
2040224f68c5SEric Dumazet 	 * other WRITE_ONCE() because socket lock might be not held.
2041224f68c5SEric Dumazet 	 */
2042224f68c5SEric Dumazet 	WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
2043e022f0b4SKrishna Kumar }
2044e022f0b4SKrishna Kumar 
sk_tx_queue_get(const struct sock * sk)2045e022f0b4SKrishna Kumar static inline int sk_tx_queue_get(const struct sock *sk)
2046e022f0b4SKrishna Kumar {
2047224f68c5SEric Dumazet 	if (sk) {
2048224f68c5SEric Dumazet 		/* Paired with WRITE_ONCE() in sk_tx_queue_clear()
2049224f68c5SEric Dumazet 		 * and sk_tx_queue_set().
2050224f68c5SEric Dumazet 		 */
2051224f68c5SEric Dumazet 		int val = READ_ONCE(sk->sk_tx_queue_mapping);
2052755c31cdSAmritha Nambiar 
2053224f68c5SEric Dumazet 		if (val != NO_QUEUE_MAPPING)
2054224f68c5SEric Dumazet 			return val;
2055224f68c5SEric Dumazet 	}
2056755c31cdSAmritha Nambiar 	return -1;
2057e022f0b4SKrishna Kumar }
2058e022f0b4SKrishna Kumar 
__sk_rx_queue_set(struct sock * sk,const struct sk_buff * skb,bool force_set)2059a37a0ee4SEric Dumazet static inline void __sk_rx_queue_set(struct sock *sk,
2060a37a0ee4SEric Dumazet 				     const struct sk_buff *skb,
2061a37a0ee4SEric Dumazet 				     bool force_set)
2062c6345ce7SAmritha Nambiar {
20634e1beeccSTariq Toukan #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
2064c6345ce7SAmritha Nambiar 	if (skb_rx_queue_recorded(skb)) {
2065c6345ce7SAmritha Nambiar 		u16 rx_queue = skb_get_rx_queue(skb);
2066c6345ce7SAmritha Nambiar 
2067a37a0ee4SEric Dumazet 		if (force_set ||
2068a37a0ee4SEric Dumazet 		    unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
2069342159eeSEric Dumazet 			WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
2070c6345ce7SAmritha Nambiar 	}
2071c6345ce7SAmritha Nambiar #endif
2072c6345ce7SAmritha Nambiar }
2073c6345ce7SAmritha Nambiar 
sk_rx_queue_set(struct sock * sk,const struct sk_buff * skb)2074a37a0ee4SEric Dumazet static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
2075a37a0ee4SEric Dumazet {
2076a37a0ee4SEric Dumazet 	__sk_rx_queue_set(sk, skb, true);
2077a37a0ee4SEric Dumazet }
2078a37a0ee4SEric Dumazet 
sk_rx_queue_update(struct sock * sk,const struct sk_buff * skb)2079a37a0ee4SEric Dumazet static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
2080a37a0ee4SEric Dumazet {
2081a37a0ee4SEric Dumazet 	__sk_rx_queue_set(sk, skb, false);
2082a37a0ee4SEric Dumazet }
2083a37a0ee4SEric Dumazet 
sk_rx_queue_clear(struct sock * sk)2084c6345ce7SAmritha Nambiar static inline void sk_rx_queue_clear(struct sock *sk)
2085c6345ce7SAmritha Nambiar {
20864e1beeccSTariq Toukan #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
208709b89846SEric Dumazet 	WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
2088c6345ce7SAmritha Nambiar #endif
2089c6345ce7SAmritha Nambiar }
2090c6345ce7SAmritha Nambiar 
sk_rx_queue_get(const struct sock * sk)2091fc9bab24SAmritha Nambiar static inline int sk_rx_queue_get(const struct sock *sk)
2092fc9bab24SAmritha Nambiar {
20934e1beeccSTariq Toukan #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
209409b89846SEric Dumazet 	if (sk) {
209509b89846SEric Dumazet 		int res = READ_ONCE(sk->sk_rx_queue_mapping);
209609b89846SEric Dumazet 
209709b89846SEric Dumazet 		if (res != NO_QUEUE_MAPPING)
209809b89846SEric Dumazet 			return res;
209909b89846SEric Dumazet 	}
21004e1beeccSTariq Toukan #endif
2101fc9bab24SAmritha Nambiar 
2102fc9bab24SAmritha Nambiar 	return -1;
2103fc9bab24SAmritha Nambiar }
2104fc9bab24SAmritha Nambiar 
sk_set_socket(struct sock * sk,struct socket * sock)2105972692e0SDavid S. Miller static inline void sk_set_socket(struct sock *sk, struct socket *sock)
2106972692e0SDavid S. Miller {
2107972692e0SDavid S. Miller 	sk->sk_socket = sock;
2108972692e0SDavid S. Miller }
2109972692e0SDavid S. Miller 
sk_sleep(struct sock * sk)2110aa395145SEric Dumazet static inline wait_queue_head_t *sk_sleep(struct sock *sk)
2111aa395145SEric Dumazet {
2112eaefd110SEric Dumazet 	BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
2113eaefd110SEric Dumazet 	return &rcu_dereference_raw(sk->sk_wq)->wait;
2114aa395145SEric Dumazet }
21151da177e4SLinus Torvalds /* Detach socket from process context.
21161da177e4SLinus Torvalds  * Announce socket dead, detach it from wait queue and inode.
21171da177e4SLinus Torvalds  * Note that parent inode held reference count on this struct sock,
21181da177e4SLinus Torvalds  * we do not release it in this function, because protocol
21191da177e4SLinus Torvalds  * probably wants some additional cleanups or even continuing
21201da177e4SLinus Torvalds  * to work with this socket (TCP).
21211da177e4SLinus Torvalds  */
sock_orphan(struct sock * sk)21221da177e4SLinus Torvalds static inline void sock_orphan(struct sock *sk)
21231da177e4SLinus Torvalds {
21241da177e4SLinus Torvalds 	write_lock_bh(&sk->sk_callback_lock);
21251da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_DEAD);
2126972692e0SDavid S. Miller 	sk_set_socket(sk, NULL);
212743815482SEric Dumazet 	sk->sk_wq  = NULL;
21281da177e4SLinus Torvalds 	write_unlock_bh(&sk->sk_callback_lock);
21291da177e4SLinus Torvalds }
21301da177e4SLinus Torvalds 
sock_graft(struct sock * sk,struct socket * parent)21311da177e4SLinus Torvalds static inline void sock_graft(struct sock *sk, struct socket *parent)
21321da177e4SLinus Torvalds {
21330ffdaf5bSSowmini Varadhan 	WARN_ON(parent->sk);
21341da177e4SLinus Torvalds 	write_lock_bh(&sk->sk_callback_lock);
2135333f7909SAl Viro 	rcu_assign_pointer(sk->sk_wq, &parent->wq);
21361da177e4SLinus Torvalds 	parent->sk = sk;
2137972692e0SDavid S. Miller 	sk_set_socket(sk, parent);
213886741ec2SLorenzo Colitti 	sk->sk_uid = SOCK_INODE(parent)->i_uid;
21394237c75cSVenkat Yekkirala 	security_sock_graft(sk, parent);
21401da177e4SLinus Torvalds 	write_unlock_bh(&sk->sk_callback_lock);
21411da177e4SLinus Torvalds }
21421da177e4SLinus Torvalds 
214369336bd2SJoe Perches kuid_t sock_i_uid(struct sock *sk);
214425a9c8a4SKuniyuki Iwashima unsigned long __sock_i_ino(struct sock *sk);
214569336bd2SJoe Perches unsigned long sock_i_ino(struct sock *sk);
21461da177e4SLinus Torvalds 
sock_net_uid(const struct net * net,const struct sock * sk)214786741ec2SLorenzo Colitti static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
214886741ec2SLorenzo Colitti {
214986741ec2SLorenzo Colitti 	return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
215086741ec2SLorenzo Colitti }
215186741ec2SLorenzo Colitti 
net_tx_rndhash(void)215258d607d3SEric Dumazet static inline u32 net_tx_rndhash(void)
215358d607d3SEric Dumazet {
2154a251c17aSJason A. Donenfeld 	u32 v = get_random_u32();
215558d607d3SEric Dumazet 
215658d607d3SEric Dumazet 	return v ?: 1;
215758d607d3SEric Dumazet }
215858d607d3SEric Dumazet 
sk_set_txhash(struct sock * sk)2159877d1f62STom Herbert static inline void sk_set_txhash(struct sock *sk)
2160877d1f62STom Herbert {
2161b71eaed8SEric Dumazet 	/* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
2162b71eaed8SEric Dumazet 	WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
2163877d1f62STom Herbert }
2164877d1f62STom Herbert 
sk_rethink_txhash(struct sock * sk)21659c30ae83SYuchung Cheng static inline bool sk_rethink_txhash(struct sock *sk)
2166265f94ffSTom Herbert {
216726859240SAkhmat Karakotov 	if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) {
2168265f94ffSTom Herbert 		sk_set_txhash(sk);
21699c30ae83SYuchung Cheng 		return true;
21709c30ae83SYuchung Cheng 	}
21719c30ae83SYuchung Cheng 	return false;
2172265f94ffSTom Herbert }
2173265f94ffSTom Herbert 
21741da177e4SLinus Torvalds static inline struct dst_entry *
__sk_dst_get(const struct sock * sk)217522c8e0b8SEric Dumazet __sk_dst_get(const struct sock *sk)
21761da177e4SLinus Torvalds {
21771e1d04e6SHannes Frederic Sowa 	return rcu_dereference_check(sk->sk_dst_cache,
21781e1d04e6SHannes Frederic Sowa 				     lockdep_sock_is_held(sk));
21791da177e4SLinus Torvalds }
21801da177e4SLinus Torvalds 
21811da177e4SLinus Torvalds static inline struct dst_entry *
sk_dst_get(const struct sock * sk)218222c8e0b8SEric Dumazet sk_dst_get(const struct sock *sk)
21831da177e4SLinus Torvalds {
21841da177e4SLinus Torvalds 	struct dst_entry *dst;
21851da177e4SLinus Torvalds 
2186b6c6712aSEric Dumazet 	rcu_read_lock();
2187b6c6712aSEric Dumazet 	dst = rcu_dereference(sk->sk_dst_cache);
2188bc9d3a9fSThomas Gleixner 	if (dst && !rcuref_get(&dst->__rcuref))
2189f8864972SEric Dumazet 		dst = NULL;
2190b6c6712aSEric Dumazet 	rcu_read_unlock();
21911da177e4SLinus Torvalds 	return dst;
21921da177e4SLinus Torvalds }
21931da177e4SLinus Torvalds 
__dst_negative_advice(struct sock * sk)21949c30ae83SYuchung Cheng static inline void __dst_negative_advice(struct sock *sk)
2195b6c6712aSEric Dumazet {
21965af198c3SEric Dumazet 	struct dst_entry *dst = __sk_dst_get(sk);
2197b6c6712aSEric Dumazet 
21985af198c3SEric Dumazet 	if (dst && dst->ops->negative_advice)
21995af198c3SEric Dumazet 		dst->ops->negative_advice(sk, dst);
2200b6c6712aSEric Dumazet }
2201b6c6712aSEric Dumazet 
dst_negative_advice(struct sock * sk)22029c30ae83SYuchung Cheng static inline void dst_negative_advice(struct sock *sk)
22039c30ae83SYuchung Cheng {
22049c30ae83SYuchung Cheng 	sk_rethink_txhash(sk);
22059c30ae83SYuchung Cheng 	__dst_negative_advice(sk);
22069c30ae83SYuchung Cheng }
22079c30ae83SYuchung Cheng 
22081da177e4SLinus Torvalds static inline void
__sk_dst_set(struct sock * sk,struct dst_entry * dst)22091da177e4SLinus Torvalds __sk_dst_set(struct sock *sk, struct dst_entry *dst)
22101da177e4SLinus Torvalds {
22111da177e4SLinus Torvalds 	struct dst_entry *old_dst;
22121da177e4SLinus Torvalds 
2213e022f0b4SKrishna Kumar 	sk_tx_queue_clear(sk);
221487324a50SEric Dumazet 	WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
221595964c6dSEric Dumazet 	old_dst = rcu_dereference_protected(sk->sk_dst_cache,
221695964c6dSEric Dumazet 					    lockdep_sock_is_held(sk));
2217b6c6712aSEric Dumazet 	rcu_assign_pointer(sk->sk_dst_cache, dst);
22181da177e4SLinus Torvalds 	dst_release(old_dst);
22191da177e4SLinus Torvalds }
22201da177e4SLinus Torvalds 
22211da177e4SLinus Torvalds static inline void
sk_dst_set(struct sock * sk,struct dst_entry * dst)22221da177e4SLinus Torvalds sk_dst_set(struct sock *sk, struct dst_entry *dst)
22231da177e4SLinus Torvalds {
22247f502361SEric Dumazet 	struct dst_entry *old_dst;
22257f502361SEric Dumazet 
22267f502361SEric Dumazet 	sk_tx_queue_clear(sk);
222787324a50SEric Dumazet 	WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
222870530a2fSEric Dumazet 	old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst)));
22297f502361SEric Dumazet 	dst_release(old_dst);
22301da177e4SLinus Torvalds }
22311da177e4SLinus Torvalds 
22321da177e4SLinus Torvalds static inline void
__sk_dst_reset(struct sock * sk)22331da177e4SLinus Torvalds __sk_dst_reset(struct sock *sk)
22341da177e4SLinus Torvalds {
2235b6c6712aSEric Dumazet 	__sk_dst_set(sk, NULL);
22361da177e4SLinus Torvalds }
22371da177e4SLinus Torvalds 
22381da177e4SLinus Torvalds static inline void
sk_dst_reset(struct sock * sk)22391da177e4SLinus Torvalds sk_dst_reset(struct sock *sk)
22401da177e4SLinus Torvalds {
22417f502361SEric Dumazet 	sk_dst_set(sk, NULL);
22421da177e4SLinus Torvalds }
22431da177e4SLinus Torvalds 
224469336bd2SJoe Perches struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
22451da177e4SLinus Torvalds 
224669336bd2SJoe Perches struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
22471da177e4SLinus Torvalds 
sk_dst_confirm(struct sock * sk)22489b8805a3SJulian Anastasov static inline void sk_dst_confirm(struct sock *sk)
22499b8805a3SJulian Anastasov {
225025c7a6d1SEric Dumazet 	if (!READ_ONCE(sk->sk_dst_pending_confirm))
225125c7a6d1SEric Dumazet 		WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
22529b8805a3SJulian Anastasov }
22539b8805a3SJulian Anastasov 
sock_confirm_neigh(struct sk_buff * skb,struct neighbour * n)22544ff06203SJulian Anastasov static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
22554ff06203SJulian Anastasov {
22564ff06203SJulian Anastasov 	if (skb_get_dst_pending_confirm(skb)) {
22574ff06203SJulian Anastasov 		struct sock *sk = skb->sk;
22584ff06203SJulian Anastasov 
225925c7a6d1SEric Dumazet 		if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
226025c7a6d1SEric Dumazet 			WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
22611e84dc6bSYajun Deng 		neigh_confirm(n);
22624ff06203SJulian Anastasov 	}
22634ff06203SJulian Anastasov }
22644ff06203SJulian Anastasov 
2265f60e5990Shannes@stressinduktion.org bool sk_mc_loop(struct sock *sk);
2266f60e5990Shannes@stressinduktion.org 
sk_can_gso(const struct sock * sk)2267dc6b9b78SEric Dumazet static inline bool sk_can_gso(const struct sock *sk)
2268bcd76111SHerbert Xu {
2269bcd76111SHerbert Xu 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
2270bcd76111SHerbert Xu }
2271bcd76111SHerbert Xu 
227269336bd2SJoe Perches void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
22736cbb0df7SArnaldo Carvalho de Melo 
sk_gso_disable(struct sock * sk)2274aba54656SEric Dumazet static inline void sk_gso_disable(struct sock *sk)
2275a465419bSEric Dumazet {
2276aba54656SEric Dumazet 	sk->sk_gso_disabled = 1;
2277aba54656SEric Dumazet 	sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2278a465419bSEric Dumazet }
2279a465419bSEric Dumazet 
skb_do_copy_data_nocache(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,char * to,int copy,int offset)2280c6e1a0d1STom Herbert static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
228157be5bdaSAl Viro 					   struct iov_iter *from, char *to,
2282912d398dSWei Yongjun 					   int copy, int offset)
2283c6e1a0d1STom Herbert {
2284c6e1a0d1STom Herbert 	if (skb->ip_summed == CHECKSUM_NONE) {
228557be5bdaSAl Viro 		__wsum csum = 0;
228615e6cb46SAl Viro 		if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
228757be5bdaSAl Viro 			return -EFAULT;
2288912d398dSWei Yongjun 		skb->csum = csum_block_add(skb->csum, csum, offset);
2289c6e1a0d1STom Herbert 	} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
229015e6cb46SAl Viro 		if (!copy_from_iter_full_nocache(to, copy, from))
2291c6e1a0d1STom Herbert 			return -EFAULT;
229215e6cb46SAl Viro 	} else if (!copy_from_iter_full(to, copy, from))
2293c6e1a0d1STom Herbert 		return -EFAULT;
2294c6e1a0d1STom Herbert 
2295c6e1a0d1STom Herbert 	return 0;
2296c6e1a0d1STom Herbert }
2297c6e1a0d1STom Herbert 
skb_add_data_nocache(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,int copy)2298c6e1a0d1STom Herbert static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
229957be5bdaSAl Viro 				       struct iov_iter *from, int copy)
2300c6e1a0d1STom Herbert {
2301912d398dSWei Yongjun 	int err, offset = skb->len;
2302c6e1a0d1STom Herbert 
2303912d398dSWei Yongjun 	err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
2304912d398dSWei Yongjun 				       copy, offset);
2305c6e1a0d1STom Herbert 	if (err)
2306912d398dSWei Yongjun 		__skb_trim(skb, offset);
2307c6e1a0d1STom Herbert 
2308c6e1a0d1STom Herbert 	return err;
2309c6e1a0d1STom Herbert }
2310c6e1a0d1STom Herbert 
skb_copy_to_page_nocache(struct sock * sk,struct iov_iter * from,struct sk_buff * skb,struct page * page,int off,int copy)231157be5bdaSAl Viro static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
2312c6e1a0d1STom Herbert 					   struct sk_buff *skb,
2313c6e1a0d1STom Herbert 					   struct page *page,
2314c6e1a0d1STom Herbert 					   int off, int copy)
2315c6e1a0d1STom Herbert {
2316c6e1a0d1STom Herbert 	int err;
2317c6e1a0d1STom Herbert 
2318912d398dSWei Yongjun 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
2319912d398dSWei Yongjun 				       copy, skb->len);
2320c6e1a0d1STom Herbert 	if (err)
2321c6e1a0d1STom Herbert 		return err;
2322c6e1a0d1STom Herbert 
2323ede57d58SRichard Gobert 	skb_len_add(skb, copy);
2324ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, copy);
2325c6e1a0d1STom Herbert 	sk_mem_charge(sk, copy);
2326c6e1a0d1STom Herbert 	return 0;
2327c6e1a0d1STom Herbert }
2328c6e1a0d1STom Herbert 
2329c564039fSEric Dumazet /**
2330c564039fSEric Dumazet  * sk_wmem_alloc_get - returns write allocations
2331c564039fSEric Dumazet  * @sk: socket
2332c564039fSEric Dumazet  *
233366256e0bSRandy Dunlap  * Return: sk_wmem_alloc minus initial offset of one
2334c564039fSEric Dumazet  */
sk_wmem_alloc_get(const struct sock * sk)2335c564039fSEric Dumazet static inline int sk_wmem_alloc_get(const struct sock *sk)
2336c564039fSEric Dumazet {
233714afee4bSReshetova, Elena 	return refcount_read(&sk->sk_wmem_alloc) - 1;
2338c564039fSEric Dumazet }
2339c564039fSEric Dumazet 
2340c564039fSEric Dumazet /**
2341c564039fSEric Dumazet  * sk_rmem_alloc_get - returns read allocations
2342c564039fSEric Dumazet  * @sk: socket
2343c564039fSEric Dumazet  *
234466256e0bSRandy Dunlap  * Return: sk_rmem_alloc
2345c564039fSEric Dumazet  */
sk_rmem_alloc_get(const struct sock * sk)2346c564039fSEric Dumazet static inline int sk_rmem_alloc_get(const struct sock *sk)
2347c564039fSEric Dumazet {
2348c564039fSEric Dumazet 	return atomic_read(&sk->sk_rmem_alloc);
2349c564039fSEric Dumazet }
2350c564039fSEric Dumazet 
2351c564039fSEric Dumazet /**
2352c564039fSEric Dumazet  * sk_has_allocations - check if allocations are outstanding
2353c564039fSEric Dumazet  * @sk: socket
2354c564039fSEric Dumazet  *
235566256e0bSRandy Dunlap  * Return: true if socket has write or read allocations
2356c564039fSEric Dumazet  */
sk_has_allocations(const struct sock * sk)2357dc6b9b78SEric Dumazet static inline bool sk_has_allocations(const struct sock *sk)
2358c564039fSEric Dumazet {
2359c564039fSEric Dumazet 	return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2360c564039fSEric Dumazet }
2361c564039fSEric Dumazet 
2362a57de0b4SJiri Olsa /**
23631ce0bf50SHerbert Xu  * skwq_has_sleeper - check if there are any waiting processes
2364acfbe96aSRandy Dunlap  * @wq: struct socket_wq
2365a57de0b4SJiri Olsa  *
236666256e0bSRandy Dunlap  * Return: true if socket_wq has waiting processes
2367a57de0b4SJiri Olsa  *
23681ce0bf50SHerbert Xu  * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
2369a57de0b4SJiri Olsa  * barrier call. They were added due to the race found within the tcp code.
2370a57de0b4SJiri Olsa  *
2371d651983dSMauro Carvalho Chehab  * Consider following tcp code paths::
2372a57de0b4SJiri Olsa  *
2373a57de0b4SJiri Olsa  *   CPU1                CPU2
2374a57de0b4SJiri Olsa  *   sys_select          receive packet
2375a57de0b4SJiri Olsa  *   ...                 ...
2376a57de0b4SJiri Olsa  *   __add_wait_queue    update tp->rcv_nxt
2377a57de0b4SJiri Olsa  *   ...                 ...
2378a57de0b4SJiri Olsa  *   tp->rcv_nxt check   sock_def_readable
2379a57de0b4SJiri Olsa  *   ...                 {
238043815482SEric Dumazet  *   schedule               rcu_read_lock();
238143815482SEric Dumazet  *                          wq = rcu_dereference(sk->sk_wq);
238243815482SEric Dumazet  *                          if (wq && waitqueue_active(&wq->wait))
238343815482SEric Dumazet  *                              wake_up_interruptible(&wq->wait)
2384a57de0b4SJiri Olsa  *                          ...
2385a57de0b4SJiri Olsa  *                       }
2386a57de0b4SJiri Olsa  *
2387a57de0b4SJiri Olsa  * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
2388a57de0b4SJiri Olsa  * in its cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1
2389a57de0b4SJiri Olsa  * could then endup calling schedule and sleep forever if there are no more
2390a57de0b4SJiri Olsa  * data on the socket.
2391ad462769SJiri Olsa  *
2392a57de0b4SJiri Olsa  */
skwq_has_sleeper(struct socket_wq * wq)23931ce0bf50SHerbert Xu static inline bool skwq_has_sleeper(struct socket_wq *wq)
2394a57de0b4SJiri Olsa {
23951ce0bf50SHerbert Xu 	return wq && wq_has_sleeper(&wq->wait);
2396a57de0b4SJiri Olsa }
2397a57de0b4SJiri Olsa 
2398a57de0b4SJiri Olsa /**
2399a57de0b4SJiri Olsa  * sock_poll_wait - place memory barrier behind the poll_wait call.
2400a57de0b4SJiri Olsa  * @filp:           file
240189ab066dSKarsten Graul  * @sock:           socket to wait on
2402a57de0b4SJiri Olsa  * @p:              poll_table
2403a57de0b4SJiri Olsa  *
240443815482SEric Dumazet  * See the comments in the wq_has_sleeper function.
2405a57de0b4SJiri Olsa  */
sock_poll_wait(struct file * filp,struct socket * sock,poll_table * p)240689ab066dSKarsten Graul static inline void sock_poll_wait(struct file *filp, struct socket *sock,
240789ab066dSKarsten Graul 				  poll_table *p)
2408a57de0b4SJiri Olsa {
2409d8bbd13bSChristoph Hellwig 	if (!poll_does_not_wait(p)) {
2410333f7909SAl Viro 		poll_wait(filp, &sock->wq.wait, p);
2411dc6b9b78SEric Dumazet 		/* We need to be sure we are in sync with the
2412a57de0b4SJiri Olsa 		 * socket flags modification.
2413a57de0b4SJiri Olsa 		 *
241443815482SEric Dumazet 		 * This memory barrier is paired in the wq_has_sleeper.
2415a57de0b4SJiri Olsa 		 */
2416a57de0b4SJiri Olsa 		smp_mb();
2417a57de0b4SJiri Olsa 	}
2418a57de0b4SJiri Olsa }
2419a57de0b4SJiri Olsa 
skb_set_hash_from_sk(struct sk_buff * skb,struct sock * sk)2420b73c3d0eSTom Herbert static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2421b73c3d0eSTom Herbert {
2422b71eaed8SEric Dumazet 	/* This pairs with WRITE_ONCE() in sk_set_txhash() */
2423b71eaed8SEric Dumazet 	u32 txhash = READ_ONCE(sk->sk_txhash);
2424b71eaed8SEric Dumazet 
2425b71eaed8SEric Dumazet 	if (txhash) {
2426b73c3d0eSTom Herbert 		skb->l4_hash = 1;
2427b71eaed8SEric Dumazet 		skb->hash = txhash;
2428b73c3d0eSTom Herbert 	}
2429b73c3d0eSTom Herbert }
2430b73c3d0eSTom Herbert 
24319e17f8a4SEric Dumazet void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
24329e17f8a4SEric Dumazet 
24331da177e4SLinus Torvalds /*
24341da177e4SLinus Torvalds  *	Queue a received datagram if it will fit. Stream and sequenced
24351da177e4SLinus Torvalds  *	protocols can't normally use this as they need to fit buffers in
24361da177e4SLinus Torvalds  *	and play with them.
24371da177e4SLinus Torvalds  *
24381da177e4SLinus Torvalds  *	Inlined as it's very short and called for pretty much every
24391da177e4SLinus Torvalds  *	packet ever received.
24401da177e4SLinus Torvalds  */
skb_set_owner_r(struct sk_buff * skb,struct sock * sk)24411da177e4SLinus Torvalds static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
24421da177e4SLinus Torvalds {
2443d55d87fdSHerbert Xu 	skb_orphan(skb);
24441da177e4SLinus Torvalds 	skb->sk = sk;
24451da177e4SLinus Torvalds 	skb->destructor = sock_rfree;
24461da177e4SLinus Torvalds 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
24473ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
24481da177e4SLinus Torvalds }
24491da177e4SLinus Torvalds 
skb_set_owner_sk_safe(struct sk_buff * skb,struct sock * sk)2450098116e7SPaolo Abeni static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
24519adc89afSPaolo Abeni {
24529adc89afSPaolo Abeni 	if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
24539adc89afSPaolo Abeni 		skb_orphan(skb);
24549adc89afSPaolo Abeni 		skb->destructor = sock_efree;
24559adc89afSPaolo Abeni 		skb->sk = sk;
2456098116e7SPaolo Abeni 		return true;
24579adc89afSPaolo Abeni 	}
2458098116e7SPaolo Abeni 	return false;
24599adc89afSPaolo Abeni }
24609adc89afSPaolo Abeni 
skb_clone_and_charge_r(struct sk_buff * skb,struct sock * sk)2461ca43ccf4SKuniyuki Iwashima static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
2462ca43ccf4SKuniyuki Iwashima {
2463ca43ccf4SKuniyuki Iwashima 	skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
2464ca43ccf4SKuniyuki Iwashima 	if (skb) {
2465ca43ccf4SKuniyuki Iwashima 		if (sk_rmem_schedule(sk, skb, skb->truesize)) {
2466ca43ccf4SKuniyuki Iwashima 			skb_set_owner_r(skb, sk);
2467ca43ccf4SKuniyuki Iwashima 			return skb;
2468ca43ccf4SKuniyuki Iwashima 		}
2469ca43ccf4SKuniyuki Iwashima 		__kfree_skb(skb);
2470ca43ccf4SKuniyuki Iwashima 	}
2471ca43ccf4SKuniyuki Iwashima 	return NULL;
2472ca43ccf4SKuniyuki Iwashima }
2473ca43ccf4SKuniyuki Iwashima 
skb_prepare_for_gro(struct sk_buff * skb)24745e10da53SPaolo Abeni static inline void skb_prepare_for_gro(struct sk_buff *skb)
24755e10da53SPaolo Abeni {
24765e10da53SPaolo Abeni 	if (skb->destructor != sock_wfree) {
24775e10da53SPaolo Abeni 		skb_orphan(skb);
24785e10da53SPaolo Abeni 		return;
24795e10da53SPaolo Abeni 	}
24805e10da53SPaolo Abeni 	skb->slow_gro = 1;
24815e10da53SPaolo Abeni }
24825e10da53SPaolo Abeni 
248369336bd2SJoe Perches void sk_reset_timer(struct sock *sk, struct timer_list *timer,
24841da177e4SLinus Torvalds 		    unsigned long expires);
24851da177e4SLinus Torvalds 
248669336bd2SJoe Perches void sk_stop_timer(struct sock *sk, struct timer_list *timer);
24871da177e4SLinus Torvalds 
248808b81d87SGeliang Tang void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
248908b81d87SGeliang Tang 
249065101aecSPaolo Abeni int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
249165101aecSPaolo Abeni 			struct sk_buff *skb, unsigned int flags,
249269629464SEric Dumazet 			void (*destructor)(struct sock *sk,
249369629464SEric Dumazet 					   struct sk_buff *skb));
2494e6afc8acSsamanthakumar int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2495c1b8a567SMenglong Dong 
2496c1b8a567SMenglong Dong int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
2497c1b8a567SMenglong Dong 			      enum skb_drop_reason *reason);
2498c1b8a567SMenglong Dong 
sock_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)2499c1b8a567SMenglong Dong static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2500c1b8a567SMenglong Dong {
2501c1b8a567SMenglong Dong 	return sock_queue_rcv_skb_reason(sk, skb, NULL);
2502c1b8a567SMenglong Dong }
25031da177e4SLinus Torvalds 
250469336bd2SJoe Perches int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2505364a9e93SWillem de Bruijn struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
25061da177e4SLinus Torvalds 
25071da177e4SLinus Torvalds /*
25081da177e4SLinus Torvalds  *	Recover an error report and clear atomically
25091da177e4SLinus Torvalds  */
25101da177e4SLinus Torvalds 
sock_error(struct sock * sk)25111da177e4SLinus Torvalds static inline int sock_error(struct sock *sk)
25121da177e4SLinus Torvalds {
2513c1cbe4b7SBenjamin LaHaise 	int err;
2514f13ef100SEric Dumazet 
2515f13ef100SEric Dumazet 	/* Avoid an atomic operation for the common case.
2516f13ef100SEric Dumazet 	 * This is racy since another cpu/thread can change sk_err under us.
2517f13ef100SEric Dumazet 	 */
2518f13ef100SEric Dumazet 	if (likely(data_race(!sk->sk_err)))
2519c1cbe4b7SBenjamin LaHaise 		return 0;
2520f13ef100SEric Dumazet 
2521c1cbe4b7SBenjamin LaHaise 	err = xchg(&sk->sk_err, 0);
25221da177e4SLinus Torvalds 	return -err;
25231da177e4SLinus Torvalds }
25241da177e4SLinus Torvalds 
2525e3ae2365SAlexander Aring void sk_error_report(struct sock *sk);
2526e3ae2365SAlexander Aring 
sock_wspace(struct sock * sk)25271da177e4SLinus Torvalds static inline unsigned long sock_wspace(struct sock *sk)
25281da177e4SLinus Torvalds {
25291da177e4SLinus Torvalds 	int amt = 0;
25301da177e4SLinus Torvalds 
25311da177e4SLinus Torvalds 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
253214afee4bSReshetova, Elena 		amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
25331da177e4SLinus Torvalds 		if (amt < 0)
25341da177e4SLinus Torvalds 			amt = 0;
25351da177e4SLinus Torvalds 	}
25361da177e4SLinus Torvalds 	return amt;
25371da177e4SLinus Torvalds }
25381da177e4SLinus Torvalds 
2539ceb5d58bSEric Dumazet /* Note:
2540ceb5d58bSEric Dumazet  *  We use sk->sk_wq_raw, from contexts knowing this
2541ceb5d58bSEric Dumazet  *  pointer is not NULL and cannot disappear/change.
2542ceb5d58bSEric Dumazet  */
sk_set_bit(int nr,struct sock * sk)25439cd3e072SEric Dumazet static inline void sk_set_bit(int nr, struct sock *sk)
25441da177e4SLinus Torvalds {
25454be73522SEric Dumazet 	if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
25464be73522SEric Dumazet 	    !sock_flag(sk, SOCK_FASYNC))
25479317bb69SEric Dumazet 		return;
25489317bb69SEric Dumazet 
2549ceb5d58bSEric Dumazet 	set_bit(nr, &sk->sk_wq_raw->flags);
25509cd3e072SEric Dumazet }
25519cd3e072SEric Dumazet 
sk_clear_bit(int nr,struct sock * sk)25529cd3e072SEric Dumazet static inline void sk_clear_bit(int nr, struct sock *sk)
25539cd3e072SEric Dumazet {
25544be73522SEric Dumazet 	if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
25554be73522SEric Dumazet 	    !sock_flag(sk, SOCK_FASYNC))
25569317bb69SEric Dumazet 		return;
25579317bb69SEric Dumazet 
2558ceb5d58bSEric Dumazet 	clear_bit(nr, &sk->sk_wq_raw->flags);
25599cd3e072SEric Dumazet }
25609cd3e072SEric Dumazet 
sk_wake_async(const struct sock * sk,int how,int band)2561ceb5d58bSEric Dumazet static inline void sk_wake_async(const struct sock *sk, int how, int band)
25621da177e4SLinus Torvalds {
2563ceb5d58bSEric Dumazet 	if (sock_flag(sk, SOCK_FASYNC)) {
2564ceb5d58bSEric Dumazet 		rcu_read_lock();
2565ceb5d58bSEric Dumazet 		sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2566ceb5d58bSEric Dumazet 		rcu_read_unlock();
2567ceb5d58bSEric Dumazet 	}
25681da177e4SLinus Torvalds }
25691da177e4SLinus Torvalds 
2570eea86af6SDaniel Borkmann /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
2571eea86af6SDaniel Borkmann  * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
2572eea86af6SDaniel Borkmann  * Note: for send buffers, TCP works better if we can build two skbs at
2573eea86af6SDaniel Borkmann  * minimum.
25747a91b434SEric Dumazet  */
25759eb5bf83SEric Dumazet #define TCP_SKB_MIN_TRUESIZE	(2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2576eea86af6SDaniel Borkmann 
2577eea86af6SDaniel Borkmann #define SOCK_MIN_SNDBUF		(TCP_SKB_MIN_TRUESIZE * 2)
2578eea86af6SDaniel Borkmann #define SOCK_MIN_RCVBUF		 TCP_SKB_MIN_TRUESIZE
25791da177e4SLinus Torvalds 
sk_stream_moderate_sndbuf(struct sock * sk)25801da177e4SLinus Torvalds static inline void sk_stream_moderate_sndbuf(struct sock *sk)
25811da177e4SLinus Torvalds {
2582e292f05eSEric Dumazet 	u32 val;
2583e292f05eSEric Dumazet 
2584e292f05eSEric Dumazet 	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
2585e292f05eSEric Dumazet 		return;
2586e292f05eSEric Dumazet 
2587e292f05eSEric Dumazet 	val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2588ca057051SWei Wang 	val = max_t(u32, val, sk_unused_reserved_mem(sk));
2589e292f05eSEric Dumazet 
2590e292f05eSEric Dumazet 	WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
25911da177e4SLinus Torvalds }
25921da177e4SLinus Torvalds 
25935640f768SEric Dumazet /**
25945640f768SEric Dumazet  * sk_page_frag - return an appropriate page_frag
25955640f768SEric Dumazet  * @sk: socket
25965640f768SEric Dumazet  *
259720eb4f29STejun Heo  * Use the per task page_frag instead of the per socket one for
2598dacb5d88SPaolo Abeni  * optimization when we know that we're in process context and own
259920eb4f29STejun Heo  * everything that's associated with %current.
260020eb4f29STejun Heo  *
2601dacb5d88SPaolo Abeni  * Both direct reclaim and page faults can nest inside other
2602dacb5d88SPaolo Abeni  * socket operations and end up recursing into sk_page_frag()
2603dacb5d88SPaolo Abeni  * while it's already in use: explicitly avoid task page_frag
260408f65892SBenjamin Coddington  * when users disable sk_use_task_frag.
260566256e0bSRandy Dunlap  *
260666256e0bSRandy Dunlap  * Return: a per task page_frag if context allows that,
260766256e0bSRandy Dunlap  * otherwise a per socket one.
26085640f768SEric Dumazet  */
sk_page_frag(struct sock * sk)26095640f768SEric Dumazet static inline struct page_frag *sk_page_frag(struct sock *sk)
26101da177e4SLinus Torvalds {
261108f65892SBenjamin Coddington 	if (sk->sk_use_task_frag)
26125640f768SEric Dumazet 		return &current->task_frag;
26131da177e4SLinus Torvalds 
26145640f768SEric Dumazet 	return &sk->sk_frag;
26151da177e4SLinus Torvalds }
26165640f768SEric Dumazet 
261769336bd2SJoe Perches bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
26181da177e4SLinus Torvalds 
26191da177e4SLinus Torvalds /*
26201da177e4SLinus Torvalds  *	Default write policy as shown to user space via poll/select/SIGIO
26211da177e4SLinus Torvalds  */
sock_writeable(const struct sock * sk)2622dc6b9b78SEric Dumazet static inline bool sock_writeable(const struct sock *sk)
26231da177e4SLinus Torvalds {
2624e292f05eSEric Dumazet 	return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
26251da177e4SLinus Torvalds }
26261da177e4SLinus Torvalds 
gfp_any(void)2627dd0fc66fSAl Viro static inline gfp_t gfp_any(void)
26281da177e4SLinus Torvalds {
262999709372SAndrew Morton 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
26301da177e4SLinus Torvalds }
26311da177e4SLinus Torvalds 
gfp_memcg_charge(void)26324b1327beSWei Wang static inline gfp_t gfp_memcg_charge(void)
26334b1327beSWei Wang {
2634720ca52bSJakub Kicinski 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
26354b1327beSWei Wang }
26364b1327beSWei Wang 
sock_rcvtimeo(const struct sock * sk,bool noblock)2637dc6b9b78SEric Dumazet static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
26381da177e4SLinus Torvalds {
26391da177e4SLinus Torvalds 	return noblock ? 0 : sk->sk_rcvtimeo;
26401da177e4SLinus Torvalds }
26411da177e4SLinus Torvalds 
sock_sndtimeo(const struct sock * sk,bool noblock)2642dc6b9b78SEric Dumazet static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
26431da177e4SLinus Torvalds {
26441da177e4SLinus Torvalds 	return noblock ? 0 : sk->sk_sndtimeo;
26451da177e4SLinus Torvalds }
26461da177e4SLinus Torvalds 
sock_rcvlowat(const struct sock * sk,int waitall,int len)26471da177e4SLinus Torvalds static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
26481da177e4SLinus Torvalds {
2649eac66402SEric Dumazet 	int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
2650eac66402SEric Dumazet 
2651eac66402SEric Dumazet 	return v ?: 1;
26521da177e4SLinus Torvalds }
26531da177e4SLinus Torvalds 
26541da177e4SLinus Torvalds /* Alas, with timeout socket operations are not restartable.
26551da177e4SLinus Torvalds  * Compare this to poll().
26561da177e4SLinus Torvalds  */
sock_intr_errno(long timeo)26571da177e4SLinus Torvalds static inline int sock_intr_errno(long timeo)
26581da177e4SLinus Torvalds {
26591da177e4SLinus Torvalds 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
26601da177e4SLinus Torvalds }
26611da177e4SLinus Torvalds 
2662744d5a3eSEyal Birger struct sock_skb_cb {
2663744d5a3eSEyal Birger 	u32 dropcount;
2664744d5a3eSEyal Birger };
2665744d5a3eSEyal Birger 
2666744d5a3eSEyal Birger /* Store sock_skb_cb at the end of skb->cb[] so protocol families
2667744d5a3eSEyal Birger  * using skb->cb[] would keep using it directly and utilize its
2668744d5a3eSEyal Birger  * alignement guarantee.
2669744d5a3eSEyal Birger  */
2670c593642cSPankaj Bharadiya #define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
2671744d5a3eSEyal Birger 			    sizeof(struct sock_skb_cb)))
2672744d5a3eSEyal Birger 
2673744d5a3eSEyal Birger #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2674744d5a3eSEyal Birger 			    SOCK_SKB_CB_OFFSET))
2675744d5a3eSEyal Birger 
2676b4772ef8SEyal Birger #define sock_skb_cb_check_size(size) \
2677744d5a3eSEyal Birger 	BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2678b4772ef8SEyal Birger 
26793bc3b96fSEyal Birger static inline void
sock_skb_set_dropcount(const struct sock * sk,struct sk_buff * skb)26803bc3b96fSEyal Birger sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
26813bc3b96fSEyal Birger {
26823665f381SEric Dumazet 	SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
26833665f381SEric Dumazet 						atomic_read(&sk->sk_drops) : 0;
26843bc3b96fSEyal Birger }
26853bc3b96fSEyal Birger 
sk_drops_add(struct sock * sk,const struct sk_buff * skb)2686532182cdSEric Dumazet static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2687532182cdSEric Dumazet {
2688532182cdSEric Dumazet 	int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2689532182cdSEric Dumazet 
2690532182cdSEric Dumazet 	atomic_add(segs, &sk->sk_drops);
2691532182cdSEric Dumazet }
2692532182cdSEric Dumazet 
sock_read_timestamp(struct sock * sk)26933a0ed3e9SDeepa Dinamani static inline ktime_t sock_read_timestamp(struct sock *sk)
26943a0ed3e9SDeepa Dinamani {
26953a0ed3e9SDeepa Dinamani #if BITS_PER_LONG==32
26963a0ed3e9SDeepa Dinamani 	unsigned int seq;
26973a0ed3e9SDeepa Dinamani 	ktime_t kt;
26983a0ed3e9SDeepa Dinamani 
26993a0ed3e9SDeepa Dinamani 	do {
27003a0ed3e9SDeepa Dinamani 		seq = read_seqbegin(&sk->sk_stamp_seq);
27013a0ed3e9SDeepa Dinamani 		kt = sk->sk_stamp;
27023a0ed3e9SDeepa Dinamani 	} while (read_seqretry(&sk->sk_stamp_seq, seq));
27033a0ed3e9SDeepa Dinamani 
27043a0ed3e9SDeepa Dinamani 	return kt;
27053a0ed3e9SDeepa Dinamani #else
2706f75359f3SEric Dumazet 	return READ_ONCE(sk->sk_stamp);
27073a0ed3e9SDeepa Dinamani #endif
27083a0ed3e9SDeepa Dinamani }
27093a0ed3e9SDeepa Dinamani 
sock_write_timestamp(struct sock * sk,ktime_t kt)27103a0ed3e9SDeepa Dinamani static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
27113a0ed3e9SDeepa Dinamani {
27123a0ed3e9SDeepa Dinamani #if BITS_PER_LONG==32
27133a0ed3e9SDeepa Dinamani 	write_seqlock(&sk->sk_stamp_seq);
27143a0ed3e9SDeepa Dinamani 	sk->sk_stamp = kt;
27153a0ed3e9SDeepa Dinamani 	write_sequnlock(&sk->sk_stamp_seq);
27163a0ed3e9SDeepa Dinamani #else
2717f75359f3SEric Dumazet 	WRITE_ONCE(sk->sk_stamp, kt);
27183a0ed3e9SDeepa Dinamani #endif
27193a0ed3e9SDeepa Dinamani }
27203a0ed3e9SDeepa Dinamani 
272169336bd2SJoe Perches void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
272292f37fd2SEric Dumazet 			   struct sk_buff *skb);
272369336bd2SJoe Perches void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
27246e3e939fSJohannes Berg 			     struct sk_buff *skb);
272592f37fd2SEric Dumazet 
2726dc6b9b78SEric Dumazet static inline void
sock_recv_timestamp(struct msghdr * msg,struct sock * sk,struct sk_buff * skb)27271da177e4SLinus Torvalds sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
27281da177e4SLinus Torvalds {
272920d49473SPatrick Ohly 	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2730e3390b30SEric Dumazet 	u32 tsflags = READ_ONCE(sk->sk_tsflags);
2731e3390b30SEric Dumazet 	ktime_t kt = skb->tstamp;
273220d49473SPatrick Ohly 	/*
273320d49473SPatrick Ohly 	 * generate control messages if
2734b9f40e21SWillem de Bruijn 	 * - receive time stamping in software requested
273520d49473SPatrick Ohly 	 * - software time stamp available and wanted
273620d49473SPatrick Ohly 	 * - hardware time stamps available and wanted
273720d49473SPatrick Ohly 	 */
273820d49473SPatrick Ohly 	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2739e3390b30SEric Dumazet 	    (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2740e3390b30SEric Dumazet 	    (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
27412456e855SThomas Gleixner 	    (hwtstamps->hwtstamp &&
2742e3390b30SEric Dumazet 	     (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
274392f37fd2SEric Dumazet 		__sock_recv_timestamp(msg, sk, skb);
274492f37fd2SEric Dumazet 	else
27453a0ed3e9SDeepa Dinamani 		sock_write_timestamp(sk, kt);
27466e3e939fSJohannes Berg 
2747eb6fba75SJakub Kicinski 	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb_wifi_acked_valid(skb))
27486e3e939fSJohannes Berg 		__sock_recv_wifi_status(msg, sk, skb);
27491da177e4SLinus Torvalds }
27501da177e4SLinus Torvalds 
27516fd1d51cSErin MacNeil void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
2752767dd033SEric Dumazet 		       struct sk_buff *skb);
2753767dd033SEric Dumazet 
27546c7c98baSPaolo Abeni #define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
sock_recv_cmsgs(struct msghdr * msg,struct sock * sk,struct sk_buff * skb)27556fd1d51cSErin MacNeil static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
2756767dd033SEric Dumazet 				   struct sk_buff *skb)
2757767dd033SEric Dumazet {
27586fd1d51cSErin MacNeil #define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL)			| \
27596fd1d51cSErin MacNeil 			   (1UL << SOCK_RCVTSTAMP)			| \
27606fd1d51cSErin MacNeil 			   (1UL << SOCK_RCVMARK))
2761b9f40e21SWillem de Bruijn #define TSFLAGS_ANY	  (SOF_TIMESTAMPING_SOFTWARE			| \
2762b9f40e21SWillem de Bruijn 			   SOF_TIMESTAMPING_RAW_HARDWARE)
2763767dd033SEric Dumazet 
2764e3390b30SEric Dumazet 	if (sk->sk_flags & FLAGS_RECV_CMSGS ||
2765e3390b30SEric Dumazet 	    READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
27666fd1d51cSErin MacNeil 		__sock_recv_cmsgs(msg, sk, skb);
2767d3fbff30SEric Dumazet 	else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
27683a0ed3e9SDeepa Dinamani 		sock_write_timestamp(sk, skb->tstamp);
2769dfd9248cSKuniyuki Iwashima 	else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
27703a0ed3e9SDeepa Dinamani 		sock_write_timestamp(sk, 0);
2771767dd033SEric Dumazet }
27723b885787SNeil Horman 
2773c14ac945SSoheil Hassas Yeganeh void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
277467cc0d40SWillem de Bruijn 
27751da177e4SLinus Torvalds /**
27768f932f76SWillem de Bruijn  * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
277720d49473SPatrick Ohly  * @sk:		socket sending this packet
2778c14ac945SSoheil Hassas Yeganeh  * @tsflags:	timestamping flags to use
2779140c55d4SEric Dumazet  * @tx_flags:	completed with instructions for time stamping
27808f932f76SWillem de Bruijn  * @tskey:      filled in with next sk_tskey (not for TCP, which uses seqno)
2781140c55d4SEric Dumazet  *
2782d651983dSMauro Carvalho Chehab  * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
278320d49473SPatrick Ohly  */
_sock_tx_timestamp(struct sock * sk,__u16 tsflags,__u8 * tx_flags,__u32 * tskey)27848f932f76SWillem de Bruijn static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
27858f932f76SWillem de Bruijn 				      __u8 *tx_flags, __u32 *tskey)
278667cc0d40SWillem de Bruijn {
27878f932f76SWillem de Bruijn 	if (unlikely(tsflags)) {
2788c14ac945SSoheil Hassas Yeganeh 		__sock_tx_timestamp(tsflags, tx_flags);
27898f932f76SWillem de Bruijn 		if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
27908f932f76SWillem de Bruijn 		    tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2791a1cdec57SEric Dumazet 			*tskey = atomic_inc_return(&sk->sk_tskey) - 1;
27928f932f76SWillem de Bruijn 	}
279367cc0d40SWillem de Bruijn 	if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
279467cc0d40SWillem de Bruijn 		*tx_flags |= SKBTX_WIFI_STATUS;
279567cc0d40SWillem de Bruijn }
279620d49473SPatrick Ohly 
sock_tx_timestamp(struct sock * sk,__u16 tsflags,__u8 * tx_flags)27978f932f76SWillem de Bruijn static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
27988f932f76SWillem de Bruijn 				     __u8 *tx_flags)
27998f932f76SWillem de Bruijn {
28008f932f76SWillem de Bruijn 	_sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
28018f932f76SWillem de Bruijn }
28028f932f76SWillem de Bruijn 
skb_setup_tx_timestamp(struct sk_buff * skb,__u16 tsflags)28038f932f76SWillem de Bruijn static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
28048f932f76SWillem de Bruijn {
28058f932f76SWillem de Bruijn 	_sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
28068f932f76SWillem de Bruijn 			   &skb_shinfo(skb)->tskey);
28078f932f76SWillem de Bruijn }
28088f932f76SWillem de Bruijn 
sk_is_inet(const struct sock * sk)2809ef8ad307SEric Dumazet static inline bool sk_is_inet(const struct sock *sk)
2810ef8ad307SEric Dumazet {
2811ef8ad307SEric Dumazet 	int family = READ_ONCE(sk->sk_family);
2812ef8ad307SEric Dumazet 
2813ef8ad307SEric Dumazet 	return family == AF_INET || family == AF_INET6;
2814ef8ad307SEric Dumazet }
2815ef8ad307SEric Dumazet 
sk_is_tcp(const struct sock * sk)281642f67eeaSEric Dumazet static inline bool sk_is_tcp(const struct sock *sk)
281742f67eeaSEric Dumazet {
2818ef8ad307SEric Dumazet 	return sk_is_inet(sk) &&
2819ef8ad307SEric Dumazet 	       sk->sk_type == SOCK_STREAM &&
2820ef8ad307SEric Dumazet 	       sk->sk_protocol == IPPROTO_TCP;
2821ef8ad307SEric Dumazet }
2822ef8ad307SEric Dumazet 
sk_is_udp(const struct sock * sk)2823ef8ad307SEric Dumazet static inline bool sk_is_udp(const struct sock *sk)
2824ef8ad307SEric Dumazet {
2825ef8ad307SEric Dumazet 	return sk_is_inet(sk) &&
2826ef8ad307SEric Dumazet 	       sk->sk_type == SOCK_DGRAM &&
2827ef8ad307SEric Dumazet 	       sk->sk_protocol == IPPROTO_UDP;
282842f67eeaSEric Dumazet }
282942f67eeaSEric Dumazet 
sk_is_stream_unix(const struct sock * sk)2830bcc5b2d8SJohn Fastabend static inline bool sk_is_stream_unix(const struct sock *sk)
2831bcc5b2d8SJohn Fastabend {
2832bcc5b2d8SJohn Fastabend 	return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
2833bcc5b2d8SJohn Fastabend }
2834bcc5b2d8SJohn Fastabend 
sk_is_vsock(const struct sock * sk)283512c3e619SMichal Luczaj static inline bool sk_is_vsock(const struct sock *sk)
283612c3e619SMichal Luczaj {
283712c3e619SMichal Luczaj 	return sk->sk_family == AF_VSOCK;
283812c3e619SMichal Luczaj }
283912c3e619SMichal Luczaj 
284020d49473SPatrick Ohly /**
28411da177e4SLinus Torvalds  * sk_eat_skb - Release a skb if it is no longer needed
28424dc3b16bSPavel Pisa  * @sk: socket to eat this skb from
28434dc3b16bSPavel Pisa  * @skb: socket buffer to eat
28441da177e4SLinus Torvalds  *
28451da177e4SLinus Torvalds  * This routine must be called with interrupts disabled or with the socket
28461da177e4SLinus Torvalds  * locked so that the sk_buff queue operation is ok.
28471da177e4SLinus Torvalds */
sk_eat_skb(struct sock * sk,struct sk_buff * skb)28487bced397SDan Williams static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
28491da177e4SLinus Torvalds {
28501da177e4SLinus Torvalds 	__skb_unlink(skb, &sk->sk_receive_queue);
28511da177e4SLinus Torvalds 	__kfree_skb(skb);
28521da177e4SLinus Torvalds }
28531da177e4SLinus Torvalds 
2854cf7fbe66SJoe Stringer static inline bool
skb_sk_is_prefetched(struct sk_buff * skb)2855cf7fbe66SJoe Stringer skb_sk_is_prefetched(struct sk_buff *skb)
2856cf7fbe66SJoe Stringer {
2857cf7fbe66SJoe Stringer #ifdef CONFIG_INET
2858cf7fbe66SJoe Stringer 	return skb->destructor == sock_pfree;
2859cf7fbe66SJoe Stringer #else
2860cf7fbe66SJoe Stringer 	return false;
2861cf7fbe66SJoe Stringer #endif /* CONFIG_INET */
2862cf7fbe66SJoe Stringer }
2863cf7fbe66SJoe Stringer 
28647ae215d2SJoe Stringer /* This helper checks if a socket is a full socket,
28657ae215d2SJoe Stringer  * ie _not_ a timewait or request socket.
28667ae215d2SJoe Stringer  */
sk_fullsock(const struct sock * sk)28677ae215d2SJoe Stringer static inline bool sk_fullsock(const struct sock *sk)
28687ae215d2SJoe Stringer {
28697ae215d2SJoe Stringer 	return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
28707ae215d2SJoe Stringer }
28717ae215d2SJoe Stringer 
28727ae215d2SJoe Stringer static inline bool
sk_is_refcounted(struct sock * sk)28737ae215d2SJoe Stringer sk_is_refcounted(struct sock *sk)
28747ae215d2SJoe Stringer {
28757ae215d2SJoe Stringer 	/* Only full sockets have sk->sk_flags. */
28767ae215d2SJoe Stringer 	return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
28777ae215d2SJoe Stringer }
28787ae215d2SJoe Stringer 
287971489e21SJoe Stringer /**
2880045065f0SLothar Rubusch  * skb_steal_sock - steal a socket from an sk_buff
2881045065f0SLothar Rubusch  * @skb: sk_buff to steal the socket from
2882045065f0SLothar Rubusch  * @refcounted: is set to true if the socket is reference-counted
28839c02bec9SLorenz Bauer  * @prefetched: is set to true if the socket was assigned from bpf
288471489e21SJoe Stringer  */
288571489e21SJoe Stringer static inline struct sock *
skb_steal_sock(struct sk_buff * skb,bool * refcounted,bool * prefetched)28869c02bec9SLorenz Bauer skb_steal_sock(struct sk_buff *skb, bool *refcounted, bool *prefetched)
288723542618SKOVACS Krisztian {
2888efc27f8cSVijay Subramanian 	if (skb->sk) {
288923542618SKOVACS Krisztian 		struct sock *sk = skb->sk;
289023542618SKOVACS Krisztian 
289171489e21SJoe Stringer 		*refcounted = true;
28929c02bec9SLorenz Bauer 		*prefetched = skb_sk_is_prefetched(skb);
28939c02bec9SLorenz Bauer 		if (*prefetched)
28947ae215d2SJoe Stringer 			*refcounted = sk_is_refcounted(sk);
289523542618SKOVACS Krisztian 		skb->destructor = NULL;
289623542618SKOVACS Krisztian 		skb->sk = NULL;
289723542618SKOVACS Krisztian 		return sk;
289823542618SKOVACS Krisztian 	}
28999c02bec9SLorenz Bauer 	*prefetched = false;
290071489e21SJoe Stringer 	*refcounted = false;
290123542618SKOVACS Krisztian 	return NULL;
290223542618SKOVACS Krisztian }
290323542618SKOVACS Krisztian 
2904ebf4e808SIlya Lesokhin /* Checks if this SKB belongs to an HW offloaded socket
2905ebf4e808SIlya Lesokhin  * and whether any SW fallbacks are required based on dev.
290641477662SJakub Kicinski  * Check decrypted mark in case skb_orphan() cleared socket.
2907ebf4e808SIlya Lesokhin  */
sk_validate_xmit_skb(struct sk_buff * skb,struct net_device * dev)2908ebf4e808SIlya Lesokhin static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2909ebf4e808SIlya Lesokhin 						   struct net_device *dev)
2910ebf4e808SIlya Lesokhin {
2911ebf4e808SIlya Lesokhin #ifdef CONFIG_SOCK_VALIDATE_XMIT
2912ebf4e808SIlya Lesokhin 	struct sock *sk = skb->sk;
2913ebf4e808SIlya Lesokhin 
291441477662SJakub Kicinski 	if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2915ebf4e808SIlya Lesokhin 		skb = sk->sk_validate_xmit_skb(sk, dev, skb);
291641477662SJakub Kicinski #ifdef CONFIG_TLS_DEVICE
291741477662SJakub Kicinski 	} else if (unlikely(skb->decrypted)) {
291841477662SJakub Kicinski 		pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
291941477662SJakub Kicinski 		kfree_skb(skb);
292041477662SJakub Kicinski 		skb = NULL;
292141477662SJakub Kicinski #endif
292241477662SJakub Kicinski 	}
2923ebf4e808SIlya Lesokhin #endif
2924ebf4e808SIlya Lesokhin 
2925ebf4e808SIlya Lesokhin 	return skb;
2926ebf4e808SIlya Lesokhin }
2927ebf4e808SIlya Lesokhin 
2928e446f9dfSEric Dumazet /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
2929e446f9dfSEric Dumazet  * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
2930e446f9dfSEric Dumazet  */
sk_listener(const struct sock * sk)2931e446f9dfSEric Dumazet static inline bool sk_listener(const struct sock *sk)
2932e446f9dfSEric Dumazet {
2933e446f9dfSEric Dumazet 	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2934e446f9dfSEric Dumazet }
2935e446f9dfSEric Dumazet 
2936193d357dSAlexey Dobriyan void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
293769336bd2SJoe Perches int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
293869336bd2SJoe Perches 		       int type);
29391da177e4SLinus Torvalds 
2940a3b299daSEric W. Biederman bool sk_ns_capable(const struct sock *sk,
2941a3b299daSEric W. Biederman 		   struct user_namespace *user_ns, int cap);
2942a3b299daSEric W. Biederman bool sk_capable(const struct sock *sk, int cap);
2943a3b299daSEric W. Biederman bool sk_net_capable(const struct sock *sk, int cap);
2944a3b299daSEric W. Biederman 
2945a2d133b1SJosh Hunt void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2946a2d133b1SJosh Hunt 
2947eaa72dc4SEric Dumazet /* Take into consideration the size of the struct sk_buff overhead in the
2948eaa72dc4SEric Dumazet  * determination of these values, since that is non-constant across
2949eaa72dc4SEric Dumazet  * platforms.  This makes socket queueing behavior and performance
2950eaa72dc4SEric Dumazet  * not depend upon such differences.
2951eaa72dc4SEric Dumazet  */
2952eaa72dc4SEric Dumazet #define _SK_MEM_PACKETS		256
2953eaa72dc4SEric Dumazet #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
2954eaa72dc4SEric Dumazet #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2955eaa72dc4SEric Dumazet #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2956eaa72dc4SEric Dumazet 
29571da177e4SLinus Torvalds extern __u32 sysctl_wmem_max;
29581da177e4SLinus Torvalds extern __u32 sysctl_rmem_max;
29591da177e4SLinus Torvalds 
2960b245be1fSWillem de Bruijn extern int sysctl_tstamp_allow_data;
29616baf1f41SDavid S. Miller extern int sysctl_optmem_max;
29626baf1f41SDavid S. Miller 
296320380731SArnaldo Carvalho de Melo extern __u32 sysctl_wmem_default;
296420380731SArnaldo Carvalho de Melo extern __u32 sysctl_rmem_default;
296520380731SArnaldo Carvalho de Melo 
2966723783d0SYunsheng Lin #define SKB_FRAG_PAGE_ORDER	get_order(32768)
2967ce27ec60SEric Dumazet DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2968ce27ec60SEric Dumazet 
sk_get_wmem0(const struct sock * sk,const struct proto * proto)2969a3dcaf17SEric Dumazet static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2970a3dcaf17SEric Dumazet {
2971a3dcaf17SEric Dumazet 	/* Does this proto have per netns sysctl_wmem ? */
2972a3dcaf17SEric Dumazet 	if (proto->sysctl_wmem_offset)
297302739545SKuniyuki Iwashima 		return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
2974a3dcaf17SEric Dumazet 
297502739545SKuniyuki Iwashima 	return READ_ONCE(*proto->sysctl_wmem);
2976a3dcaf17SEric Dumazet }
2977a3dcaf17SEric Dumazet 
sk_get_rmem0(const struct sock * sk,const struct proto * proto)2978a3dcaf17SEric Dumazet static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2979a3dcaf17SEric Dumazet {
2980a3dcaf17SEric Dumazet 	/* Does this proto have per netns sysctl_rmem ? */
2981a3dcaf17SEric Dumazet 	if (proto->sysctl_rmem_offset)
298202739545SKuniyuki Iwashima 		return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
2983a3dcaf17SEric Dumazet 
298402739545SKuniyuki Iwashima 	return READ_ONCE(*proto->sysctl_rmem);
2985a3dcaf17SEric Dumazet }
2986a3dcaf17SEric Dumazet 
2987c9f1f58dSEric Dumazet /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
2988c9f1f58dSEric Dumazet  * Some wifi drivers need to tweak it to get more chunks.
2989c9f1f58dSEric Dumazet  * They can use this helper from their ndo_start_xmit()
2990c9f1f58dSEric Dumazet  */
sk_pacing_shift_update(struct sock * sk,int val)2991c9f1f58dSEric Dumazet static inline void sk_pacing_shift_update(struct sock *sk, int val)
2992c9f1f58dSEric Dumazet {
29937c68fa2bSEric Dumazet 	if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
2994c9f1f58dSEric Dumazet 		return;
29957c68fa2bSEric Dumazet 	WRITE_ONCE(sk->sk_pacing_shift, val);
2996c9f1f58dSEric Dumazet }
2997c9f1f58dSEric Dumazet 
299854dc3e33SDavid Ahern /* if a socket is bound to a device, check that the given device
299954dc3e33SDavid Ahern  * index is either the same or that the socket is bound to an L3
300054dc3e33SDavid Ahern  * master device and the given device index is also enslaved to
300154dc3e33SDavid Ahern  * that L3 master
300254dc3e33SDavid Ahern  */
sk_dev_equal_l3scope(struct sock * sk,int dif)300354dc3e33SDavid Ahern static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
300454dc3e33SDavid Ahern {
30054c971d2fSEric Dumazet 	int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
300654dc3e33SDavid Ahern 	int mdif;
300754dc3e33SDavid Ahern 
30084c971d2fSEric Dumazet 	if (!bound_dev_if || bound_dev_if == dif)
300954dc3e33SDavid Ahern 		return true;
301054dc3e33SDavid Ahern 
301154dc3e33SDavid Ahern 	mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
30124c971d2fSEric Dumazet 	if (mdif && mdif == bound_dev_if)
301354dc3e33SDavid Ahern 		return true;
301454dc3e33SDavid Ahern 
301554dc3e33SDavid Ahern 	return false;
301654dc3e33SDavid Ahern }
301754dc3e33SDavid Ahern 
301843a825afSBjörn Töpel void sock_def_readable(struct sock *sk);
301943a825afSBjörn Töpel 
30208ea204c2SFerenc Fejes int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
3021371087aaSFlorian Westphal void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
3022d463126eSYangbo Lu int sock_set_timestamping(struct sock *sk, int optname,
3023d463126eSYangbo Lu 			  struct so_timestamping timestamping);
3024ced122d9SFlorian Westphal 
3025783da70eSChristoph Hellwig void sock_enable_timestamps(struct sock *sk);
3026c433594cSChristoph Hellwig void sock_no_linger(struct sock *sk);
3027ce3d9544SChristoph Hellwig void sock_set_keepalive(struct sock *sk);
30286e434967SChristoph Hellwig void sock_set_priority(struct sock *sk, u32 priority);
302926cfabf9SChristoph Hellwig void sock_set_rcvbuf(struct sock *sk, int val);
303084d1c617SAlexander Aring void sock_set_mark(struct sock *sk, u32 val);
3031b58f0e8fSChristoph Hellwig void sock_set_reuseaddr(struct sock *sk);
3032fe31a326SChristoph Hellwig void sock_set_reuseport(struct sock *sk);
303376ee0785SChristoph Hellwig void sock_set_sndtimeo(struct sock *sk, s64 secs);
3034b58f0e8fSChristoph Hellwig 
3035c0425a42SChristoph Hellwig int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
3036c0425a42SChristoph Hellwig 
30374c1e34c0SRichard Palethorpe int sock_get_timeout(long timeo, void *optval, bool old_timeval);
30384c1e34c0SRichard Palethorpe int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
30394c1e34c0SRichard Palethorpe 			   sockptr_t optval, int optlen, bool old_timeval);
30404c1e34c0SRichard Palethorpe 
3041e1d001faSBreno Leitao int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
3042e1d001faSBreno Leitao 		     void __user *arg, void *karg, size_t size);
3043e1d001faSBreno Leitao int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
sk_is_readable(struct sock * sk)30447b50ecfcSCong Wang static inline bool sk_is_readable(struct sock *sk)
30457b50ecfcSCong Wang {
30467b50ecfcSCong Wang 	if (sk->sk_prot->sock_is_readable)
30477b50ecfcSCong Wang 		return sk->sk_prot->sock_is_readable(sk);
30487b50ecfcSCong Wang 	return false;
30497b50ecfcSCong Wang }
30501da177e4SLinus Torvalds #endif	/* _SOCK_H */
3051