sock.h (db6da59cf27b5661ced03754ae0550f8914eda9e) sock.h (e1d001fa5b477c4da46a29be1fcece91db7c7c6f)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the AF_INET socket handler.
8 *

--- 322 unchanged lines hidden (view full) ---

331 * @sk_peek_off: current peek_offset value
332 * @sk_send_head: front of stuff to transmit
333 * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
334 * @sk_security: used by security modules
335 * @sk_mark: generic packet mark
336 * @sk_cgrp_data: cgroup data for this cgroup
337 * @sk_memcg: this socket's memory cgroup association
338 * @sk_write_pending: a write to stream socket waits to start
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the AF_INET socket handler.
8 *

--- 322 unchanged lines hidden (view full) ---

331 * @sk_peek_off: current peek_offset value
332 * @sk_send_head: front of stuff to transmit
333 * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
334 * @sk_security: used by security modules
335 * @sk_mark: generic packet mark
336 * @sk_cgrp_data: cgroup data for this cgroup
337 * @sk_memcg: this socket's memory cgroup association
338 * @sk_write_pending: a write to stream socket waits to start
339 * @sk_wait_pending: number of threads blocked on this socket
339 * @sk_state_change: callback to indicate change in the state of the sock
340 * @sk_data_ready: callback to indicate there is data to be processed
341 * @sk_write_space: callback to indicate there is bf sending space available
342 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
343 * @sk_backlog_rcv: callback to process the backlog
344 * @sk_validate_xmit_skb: ptr to an optional validate function
345 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
346 * @sk_reuseport_cb: reuseport group container

--- 76 unchanged lines hidden (view full) ---

423 int sk_forward_alloc;
424 u32 sk_reserved_mem;
425#ifdef CONFIG_NET_RX_BUSY_POLL
426 unsigned int sk_ll_usec;
427 /* ===== mostly read cache line ===== */
428 unsigned int sk_napi_id;
429#endif
430 int sk_rcvbuf;
340 * @sk_state_change: callback to indicate change in the state of the sock
341 * @sk_data_ready: callback to indicate there is data to be processed
342 * @sk_write_space: callback to indicate there is bf sending space available
343 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
344 * @sk_backlog_rcv: callback to process the backlog
345 * @sk_validate_xmit_skb: ptr to an optional validate function
346 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
347 * @sk_reuseport_cb: reuseport group container

--- 76 unchanged lines hidden (view full) ---

424 int sk_forward_alloc;
425 u32 sk_reserved_mem;
426#ifdef CONFIG_NET_RX_BUSY_POLL
427 unsigned int sk_ll_usec;
428 /* ===== mostly read cache line ===== */
429 unsigned int sk_napi_id;
430#endif
431 int sk_rcvbuf;
432 int sk_wait_pending;
431
432 struct sk_filter __rcu *sk_filter;
433 union {
434 struct socket_wq __rcu *sk_wq;
435 /* private: */
436 struct socket_wq *sk_wq_raw;
437 /* public: */
438 };

--- 706 unchanged lines hidden (view full) ---

1145 *
1146 * TCP_ESTABLISHED does cover almost all states where RFS
1147 * might be useful, and is cheaper [1] than testing :
1148 * IPv4: inet_sk(sk)->inet_daddr
1149 * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
1150 * OR an additional socket flag
1151 * [1] : sk_state and sk_prot are in the same cache line.
1152 */
433
434 struct sk_filter __rcu *sk_filter;
435 union {
436 struct socket_wq __rcu *sk_wq;
437 /* private: */
438 struct socket_wq *sk_wq_raw;
439 /* public: */
440 };

--- 706 unchanged lines hidden (view full) ---

1147 *
1148 * TCP_ESTABLISHED does cover almost all states where RFS
1149 * might be useful, and is cheaper [1] than testing :
1150 * IPv4: inet_sk(sk)->inet_daddr
1151 * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
1152 * OR an additional socket flag
1153 * [1] : sk_state and sk_prot are in the same cache line.
1154 */
1153 if (sk->sk_state == TCP_ESTABLISHED)
1154 sock_rps_record_flow_hash(sk->sk_rxhash);
1155 if (sk->sk_state == TCP_ESTABLISHED) {
1156 /* This READ_ONCE() is paired with the WRITE_ONCE()
1157 * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
1158 */
1159 sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
1160 }
1155 }
1156#endif
1157}
1158
1159static inline void sock_rps_save_rxhash(struct sock *sk,
1160 const struct sk_buff *skb)
1161{
1162#ifdef CONFIG_RPS
1161 }
1162#endif
1163}
1164
1165static inline void sock_rps_save_rxhash(struct sock *sk,
1166 const struct sk_buff *skb)
1167{
1168#ifdef CONFIG_RPS
1163 if (unlikely(sk->sk_rxhash != skb->hash))
1164 sk->sk_rxhash = skb->hash;
1169 /* The following WRITE_ONCE() is paired with the READ_ONCE()
1170 * here, and another one in sock_rps_record_flow().
1171 */
1172 if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
1173 WRITE_ONCE(sk->sk_rxhash, skb->hash);
1165#endif
1166}
1167
1168static inline void sock_rps_reset_rxhash(struct sock *sk)
1169{
1170#ifdef CONFIG_RPS
1174#endif
1175}
1176
1177static inline void sock_rps_reset_rxhash(struct sock *sk)
1178{
1179#ifdef CONFIG_RPS
1171 sk->sk_rxhash = 0;
1180 /* Paired with READ_ONCE() in sock_rps_record_flow() */
1181 WRITE_ONCE(sk->sk_rxhash, 0);
1172#endif
1173}
1174
1175#define sk_wait_event(__sk, __timeo, __condition, __wait) \
1176 ({ int __rc; \
1182#endif
1183}
1184
1185#define sk_wait_event(__sk, __timeo, __condition, __wait) \
1186 ({ int __rc; \
1187 __sk->sk_wait_pending++; \
1177 release_sock(__sk); \
1178 __rc = __condition; \
1179 if (!__rc) { \
1180 *(__timeo) = wait_woken(__wait, \
1181 TASK_INTERRUPTIBLE, \
1182 *(__timeo)); \
1183 } \
1184 sched_annotate_sleep(); \
1185 lock_sock(__sk); \
1188 release_sock(__sk); \
1189 __rc = __condition; \
1190 if (!__rc) { \
1191 *(__timeo) = wait_woken(__wait, \
1192 TASK_INTERRUPTIBLE, \
1193 *(__timeo)); \
1194 } \
1195 sched_annotate_sleep(); \
1196 lock_sock(__sk); \
1197 __sk->sk_wait_pending--; \
1186 __rc = __condition; \
1187 __rc; \
1188 })
1189
1190int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1191int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1192void sk_stream_wait_close(struct sock *sk, long timeo_p);
1193int sk_stream_error(struct sock *sk, int flags, int err);

--- 47 unchanged lines hidden (view full) ---

1241 struct sockaddr *uaddr,
1242 int addr_len);
1243 int (*disconnect)(struct sock *sk, int flags);
1244
1245 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1246 bool kern);
1247
1248 int (*ioctl)(struct sock *sk, int cmd,
1198 __rc = __condition; \
1199 __rc; \
1200 })
1201
1202int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1203int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1204void sk_stream_wait_close(struct sock *sk, long timeo_p);
1205int sk_stream_error(struct sock *sk, int flags, int err);

--- 47 unchanged lines hidden (view full) ---

1253 struct sockaddr *uaddr,
1254 int addr_len);
1255 int (*disconnect)(struct sock *sk, int flags);
1256
1257 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1258 bool kern);
1259
1260 int (*ioctl)(struct sock *sk, int cmd,
1249 unsigned long arg);
1261 int *karg);
1250 int (*init)(struct sock *sk);
1251 void (*destroy)(struct sock *sk);
1252 void (*shutdown)(struct sock *sk, int how);
1253 int (*setsockopt)(struct sock *sk, int level,
1254 int optname, sockptr_t optval,
1255 unsigned int optlen);
1256 int (*getsockopt)(struct sock *sk, int level,
1257 int optname, char __user *optval,

--- 4 unchanged lines hidden (view full) ---

1262 unsigned int cmd, unsigned long arg);
1263#endif
1264 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1265 size_t len);
1266 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1267 size_t len, int flags, int *addr_len);
1268 int (*sendpage)(struct sock *sk, struct page *page,
1269 int offset, size_t size, int flags);
1262 int (*init)(struct sock *sk);
1263 void (*destroy)(struct sock *sk);
1264 void (*shutdown)(struct sock *sk, int how);
1265 int (*setsockopt)(struct sock *sk, int level,
1266 int optname, sockptr_t optval,
1267 unsigned int optlen);
1268 int (*getsockopt)(struct sock *sk, int level,
1269 int optname, char __user *optval,

--- 4 unchanged lines hidden (view full) ---

1274 unsigned int cmd, unsigned long arg);
1275#endif
1276 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1277 size_t len);
1278 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1279 size_t len, int flags, int *addr_len);
1280 int (*sendpage)(struct sock *sk, struct page *page,
1281 int offset, size_t size, int flags);
1282 void (*splice_eof)(struct socket *sock);
1270 int (*bind)(struct sock *sk,
1271 struct sockaddr *addr, int addr_len);
1272 int (*bind_add)(struct sock *sk,
1273 struct sockaddr *addr, int addr_len);
1274
1275 int (*backlog_rcv) (struct sock *sk,
1276 struct sk_buff *skb);
1277 bool (*bpf_bypass_getsockopt)(int level,

--- 1678 unchanged lines hidden (view full) ---

2956void sock_set_sndtimeo(struct sock *sk, s64 secs);
2957
2958int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
2959
2960int sock_get_timeout(long timeo, void *optval, bool old_timeval);
2961int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
2962 sockptr_t optval, int optlen, bool old_timeval);
2963
1283 int (*bind)(struct sock *sk,
1284 struct sockaddr *addr, int addr_len);
1285 int (*bind_add)(struct sock *sk,
1286 struct sockaddr *addr, int addr_len);
1287
1288 int (*backlog_rcv) (struct sock *sk,
1289 struct sk_buff *skb);
1290 bool (*bpf_bypass_getsockopt)(int level,

--- 1678 unchanged lines hidden (view full) ---

2969void sock_set_sndtimeo(struct sock *sk, s64 secs);
2970
2971int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
2972
2973int sock_get_timeout(long timeo, void *optval, bool old_timeval);
2974int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
2975 sockptr_t optval, int optlen, bool old_timeval);
2976
2977int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
2978 void __user *arg, void *karg, size_t size);
2979int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
2964static inline bool sk_is_readable(struct sock *sk)
2965{
2966 if (sk->sk_prot->sock_is_readable)
2967 return sk->sk_prot->sock_is_readable(sk);
2968 return false;
2969}
2970#endif /* _SOCK_H */
2980static inline bool sk_is_readable(struct sock *sk)
2981{
2982 if (sk->sk_prot->sock_is_readable)
2983 return sk->sk_prot->sock_is_readable(sk);
2984 return false;
2985}
2986#endif /* _SOCK_H */