sock.h (448cc2fb3a7b327823a9afd374808c37b8e6194f) sock.h (1e84dc6b7bbfc4d1dd846decece4611b7e035772)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the AF_INET socket handler.
8 *

--- 49 unchanged lines hidden (view full) ---

58#include <linux/rbtree.h>
59#include <linux/filter.h>
60#include <linux/rculist_nulls.h>
61#include <linux/poll.h>
62#include <linux/sockptr.h>
63#include <linux/indirect_call_wrapper.h>
64#include <linux/atomic.h>
65#include <linux/refcount.h>
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the AF_INET socket handler.
8 *

--- 49 unchanged lines hidden (view full) ---

58#include <linux/rbtree.h>
59#include <linux/filter.h>
60#include <linux/rculist_nulls.h>
61#include <linux/poll.h>
62#include <linux/sockptr.h>
63#include <linux/indirect_call_wrapper.h>
64#include <linux/atomic.h>
65#include <linux/refcount.h>
66#include <linux/llist.h>
66#include <net/dst.h>
67#include <net/checksum.h>
68#include <net/tcp_states.h>
69#include <linux/net_tstamp.h>
70#include <net/l3mdev.h>
71#include <uapi/linux/socket.h>
72
73/*

--- 205 unchanged lines hidden (view full) ---

279 * @sk_pacing_status: Pacing status (requested, handled by sch_fq)
280 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
281 * @sk_sndbuf: size of send buffer in bytes
282 * @__sk_flags_offset: empty field used to determine location of bitfield
283 * @sk_padding: unused element for alignment
284 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
285 * @sk_no_check_rx: allow zero checksum in RX packets
286 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
67#include <net/dst.h>
68#include <net/checksum.h>
69#include <net/tcp_states.h>
70#include <linux/net_tstamp.h>
71#include <net/l3mdev.h>
72#include <uapi/linux/socket.h>
73
74/*

--- 205 unchanged lines hidden (view full) ---

280 * @sk_pacing_status: Pacing status (requested, handled by sch_fq)
281 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
282 * @sk_sndbuf: size of send buffer in bytes
283 * @__sk_flags_offset: empty field used to determine location of bitfield
284 * @sk_padding: unused element for alignment
285 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
286 * @sk_no_check_rx: allow zero checksum in RX packets
287 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
287 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
288 * @sk_route_forced_caps: static, forced route capabilities
289 * (set in tcp_init_sock())
288 * @sk_gso_disabled: if set, NETIF_F_GSO_MASK is forbidden.
290 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
291 * @sk_gso_max_size: Maximum GSO segment size to build
292 * @sk_gso_max_segs: Maximum number of GSO segments
293 * @sk_pacing_shift: scaling factor for TCP Small Queues
294 * @sk_lingertime: %SO_LINGER l_linger setting
295 * @sk_backlog: always used with the per-socket spinlock held
289 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
290 * @sk_gso_max_size: Maximum GSO segment size to build
291 * @sk_gso_max_segs: Maximum number of GSO segments
292 * @sk_pacing_shift: scaling factor for TCP Small Queues
293 * @sk_lingertime: %SO_LINGER l_linger setting
294 * @sk_backlog: always used with the per-socket spinlock held
295 * @defer_list: head of llist storing skbs to be freed
296 * @sk_callback_lock: used with the callbacks in the end of this struct
297 * @sk_error_queue: rarely used
298 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
299 * IPV6_ADDRFORM for instance)
300 * @sk_err: last error
301 * @sk_err_soft: errors that don't cause failure but are the cause of a
302 * persistent failure not just 'timed out'
303 * @sk_drops: raw/udp drops counter

--- 82 unchanged lines hidden (view full) ---

386#define sk_net __sk_common.skc_net
387#define sk_v6_daddr __sk_common.skc_v6_daddr
388#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
389#define sk_cookie __sk_common.skc_cookie
390#define sk_incoming_cpu __sk_common.skc_incoming_cpu
391#define sk_flags __sk_common.skc_flags
392#define sk_rxhash __sk_common.skc_rxhash
393
296 * @sk_callback_lock: used with the callbacks in the end of this struct
297 * @sk_error_queue: rarely used
298 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
299 * IPV6_ADDRFORM for instance)
300 * @sk_err: last error
301 * @sk_err_soft: errors that don't cause failure but are the cause of a
302 * persistent failure not just 'timed out'
303 * @sk_drops: raw/udp drops counter

--- 82 unchanged lines hidden (view full) ---

386#define sk_net __sk_common.skc_net
387#define sk_v6_daddr __sk_common.skc_v6_daddr
388#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
389#define sk_cookie __sk_common.skc_cookie
390#define sk_incoming_cpu __sk_common.skc_incoming_cpu
391#define sk_flags __sk_common.skc_flags
392#define sk_rxhash __sk_common.skc_rxhash
393
394 /* early demux fields */
395 struct dst_entry *sk_rx_dst;
396 int sk_rx_dst_ifindex;
397 u32 sk_rx_dst_cookie;
398
394 socket_lock_t sk_lock;
395 atomic_t sk_drops;
396 int sk_rcvlowat;
397 struct sk_buff_head sk_error_queue;
398 struct sk_buff_head sk_receive_queue;
399 /*
400 * The backlog queue is special, it is always used with
401 * the per-socket spinlock held and requires low latency
402 * access. Therefore we special case it's implementation.
403 * Note : rmem_alloc is in this structure to fill a hole
404 * on 64bit arches, not because its logically part of
405 * backlog.
406 */
407 struct {
408 atomic_t rmem_alloc;
409 int len;
410 struct sk_buff *head;
411 struct sk_buff *tail;
412 } sk_backlog;
399 socket_lock_t sk_lock;
400 atomic_t sk_drops;
401 int sk_rcvlowat;
402 struct sk_buff_head sk_error_queue;
403 struct sk_buff_head sk_receive_queue;
404 /*
405 * The backlog queue is special, it is always used with
406 * the per-socket spinlock held and requires low latency
407 * access. Therefore we special case it's implementation.
408 * Note : rmem_alloc is in this structure to fill a hole
409 * on 64bit arches, not because its logically part of
410 * backlog.
411 */
412 struct {
413 atomic_t rmem_alloc;
414 int len;
415 struct sk_buff *head;
416 struct sk_buff *tail;
417 } sk_backlog;
418 struct llist_head defer_list;
419
413#define sk_rmem_alloc sk_backlog.rmem_alloc
414
415 int sk_forward_alloc;
416 u32 sk_reserved_mem;
417#ifdef CONFIG_NET_RX_BUSY_POLL
418 unsigned int sk_ll_usec;
419 /* ===== mostly read cache line ===== */
420 unsigned int sk_napi_id;

--- 5 unchanged lines hidden (view full) ---

426 struct socket_wq __rcu *sk_wq;
427 /* private: */
428 struct socket_wq *sk_wq_raw;
429 /* public: */
430 };
431#ifdef CONFIG_XFRM
432 struct xfrm_policy __rcu *sk_policy[2];
433#endif
420#define sk_rmem_alloc sk_backlog.rmem_alloc
421
422 int sk_forward_alloc;
423 u32 sk_reserved_mem;
424#ifdef CONFIG_NET_RX_BUSY_POLL
425 unsigned int sk_ll_usec;
426 /* ===== mostly read cache line ===== */
427 unsigned int sk_napi_id;

--- 5 unchanged lines hidden (view full) ---

433 struct socket_wq __rcu *sk_wq;
434 /* private: */
435 struct socket_wq *sk_wq_raw;
436 /* public: */
437 };
438#ifdef CONFIG_XFRM
439 struct xfrm_policy __rcu *sk_policy[2];
440#endif
434 struct dst_entry *sk_rx_dst;
435 int sk_rx_dst_ifindex;
436 u32 sk_rx_dst_cookie;
437
438 struct dst_entry __rcu *sk_dst_cache;
439 atomic_t sk_omem_alloc;
440 int sk_sndbuf;
441
442 /* ===== cache line for TX ===== */
443 int sk_wmem_queued;
444 refcount_t sk_wmem_alloc;

--- 10 unchanged lines hidden (view full) ---

455 long sk_sndtimeo;
456 struct timer_list sk_timer;
457 __u32 sk_priority;
458 __u32 sk_mark;
459 unsigned long sk_pacing_rate; /* bytes per second */
460 unsigned long sk_max_pacing_rate;
461 struct page_frag sk_frag;
462 netdev_features_t sk_route_caps;
441
442 struct dst_entry __rcu *sk_dst_cache;
443 atomic_t sk_omem_alloc;
444 int sk_sndbuf;
445
446 /* ===== cache line for TX ===== */
447 int sk_wmem_queued;
448 refcount_t sk_wmem_alloc;

--- 10 unchanged lines hidden (view full) ---

459 long sk_sndtimeo;
460 struct timer_list sk_timer;
461 __u32 sk_priority;
462 __u32 sk_mark;
463 unsigned long sk_pacing_rate; /* bytes per second */
464 unsigned long sk_max_pacing_rate;
465 struct page_frag sk_frag;
466 netdev_features_t sk_route_caps;
463 netdev_features_t sk_route_nocaps;
464 netdev_features_t sk_route_forced_caps;
465 int sk_gso_type;
466 unsigned int sk_gso_max_size;
467 gfp_t sk_allocation;
468 __u32 sk_txhash;
469
470 /*
471 * Because of non atomicity rules, all
472 * changes are protected by socket lock.
473 */
467 int sk_gso_type;
468 unsigned int sk_gso_max_size;
469 gfp_t sk_allocation;
470 __u32 sk_txhash;
471
472 /*
473 * Because of non atomicity rules, all
474 * changes are protected by socket lock.
475 */
474 u8 sk_padding : 1,
476 u8 sk_gso_disabled : 1,
475 sk_kern_sock : 1,
476 sk_no_check_tx : 1,
477 sk_no_check_rx : 1,
478 sk_userlocks : 4;
479 u8 sk_pacing_shift;
480 u16 sk_type;
481 u16 sk_protocol;
482 u16 sk_gso_max_segs;

--- 5 unchanged lines hidden (view full) ---

488 u32 sk_ack_backlog;
489 u32 sk_max_ack_backlog;
490 kuid_t sk_uid;
491#ifdef CONFIG_NET_RX_BUSY_POLL
492 u8 sk_prefer_busy_poll;
493 u16 sk_busy_poll_budget;
494#endif
495 spinlock_t sk_peer_lock;
477 sk_kern_sock : 1,
478 sk_no_check_tx : 1,
479 sk_no_check_rx : 1,
480 sk_userlocks : 4;
481 u8 sk_pacing_shift;
482 u16 sk_type;
483 u16 sk_protocol;
484 u16 sk_gso_max_segs;

--- 5 unchanged lines hidden (view full) ---

490 u32 sk_ack_backlog;
491 u32 sk_max_ack_backlog;
492 kuid_t sk_uid;
493#ifdef CONFIG_NET_RX_BUSY_POLL
494 u8 sk_prefer_busy_poll;
495 u16 sk_busy_poll_budget;
496#endif
497 spinlock_t sk_peer_lock;
498 int sk_bind_phc;
496 struct pid *sk_peer_pid;
497 const struct cred *sk_peer_cred;
498
499 long sk_rcvtimeo;
500 ktime_t sk_stamp;
501#if BITS_PER_LONG==32
502 seqlock_t sk_stamp_seq;
503#endif
504 u16 sk_tsflags;
499 struct pid *sk_peer_pid;
500 const struct cred *sk_peer_cred;
501
502 long sk_rcvtimeo;
503 ktime_t sk_stamp;
504#if BITS_PER_LONG==32
505 seqlock_t sk_stamp_seq;
506#endif
507 u16 sk_tsflags;
505 int sk_bind_phc;
506 u8 sk_shutdown;
507 u32 sk_tskey;
508 atomic_t sk_zckey;
509
510 u8 sk_clockid;
511 u8 sk_txtime_deadline_mode : 1,
512 sk_txtime_report_errors : 1,
513 sk_txtime_unused : 6;

--- 503 unchanged lines hidden (view full) ---

1017
1018 __sk_add_backlog(sk, skb);
1019 sk->sk_backlog.len += skb->truesize;
1020 return 0;
1021}
1022
1023int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1024
508 u8 sk_shutdown;
509 u32 sk_tskey;
510 atomic_t sk_zckey;
511
512 u8 sk_clockid;
513 u8 sk_txtime_deadline_mode : 1,
514 sk_txtime_report_errors : 1,
515 sk_txtime_unused : 6;

--- 503 unchanged lines hidden (view full) ---

1019
1020 __sk_add_backlog(sk, skb);
1021 sk->sk_backlog.len += skb->truesize;
1022 return 0;
1023}
1024
1025int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1026
1027INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
1028INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
1029
1025static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1026{
1027 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
1028 return __sk_backlog_rcv(sk, skb);
1029
1030static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1031{
1032 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
1033 return __sk_backlog_rcv(sk, skb);
1034
1030 return sk->sk_backlog_rcv(sk, skb);
1035 return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
1036 tcp_v6_do_rcv,
1037 tcp_v4_do_rcv,
1038 sk, skb);
1031}
1032
1033static inline void sk_incoming_cpu_update(struct sock *sk)
1034{
1035 int cpu = raw_smp_processor_id();
1036
1037 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
1038 WRITE_ONCE(sk->sk_incoming_cpu, cpu);

--- 166 unchanged lines hidden (view full) ---

1205 bool restore);
1206#endif
1207
1208 /* Keeping track of sockets in use */
1209#ifdef CONFIG_PROC_FS
1210 unsigned int inuse_idx;
1211#endif
1212
1039}
1040
1041static inline void sk_incoming_cpu_update(struct sock *sk)
1042{
1043 int cpu = raw_smp_processor_id();
1044
1045 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
1046 WRITE_ONCE(sk->sk_incoming_cpu, cpu);

--- 166 unchanged lines hidden (view full) ---

1213 bool restore);
1214#endif
1215
1216 /* Keeping track of sockets in use */
1217#ifdef CONFIG_PROC_FS
1218 unsigned int inuse_idx;
1219#endif
1220
1221#if IS_ENABLED(CONFIG_MPTCP)
1213 int (*forward_alloc_get)(const struct sock *sk);
1222 int (*forward_alloc_get)(const struct sock *sk);
1223#endif
1214
1215 bool (*stream_memory_free)(const struct sock *sk, int wake);
1216 bool (*sock_is_readable)(struct sock *sk);
1217 /* Memory pressure */
1218 void (*enter_memory_pressure)(struct sock *sk);
1219 void (*leave_memory_pressure)(struct sock *sk);
1220 atomic_long_t *memory_allocated; /* Current allocated memory. */
1221 struct percpu_counter *sockets_allocated; /* Current number of sockets. */

--- 72 unchanged lines hidden (view full) ---

1294#define sk_refcnt_debug_dec(sk) do { } while (0)
1295#define sk_refcnt_debug_release(sk) do { } while (0)
1296#endif /* SOCK_REFCNT_DEBUG */
1297
1298INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
1299
1300static inline int sk_forward_alloc_get(const struct sock *sk)
1301{
1224
1225 bool (*stream_memory_free)(const struct sock *sk, int wake);
1226 bool (*sock_is_readable)(struct sock *sk);
1227 /* Memory pressure */
1228 void (*enter_memory_pressure)(struct sock *sk);
1229 void (*leave_memory_pressure)(struct sock *sk);
1230 atomic_long_t *memory_allocated; /* Current allocated memory. */
1231 struct percpu_counter *sockets_allocated; /* Current number of sockets. */

--- 72 unchanged lines hidden (view full) ---

1304#define sk_refcnt_debug_dec(sk) do { } while (0)
1305#define sk_refcnt_debug_release(sk) do { } while (0)
1306#endif /* SOCK_REFCNT_DEBUG */
1307
1308INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
1309
1310static inline int sk_forward_alloc_get(const struct sock *sk)
1311{
1302 if (!sk->sk_prot->forward_alloc_get)
1303 return sk->sk_forward_alloc;
1304
1305 return sk->sk_prot->forward_alloc_get(sk);
1312#if IS_ENABLED(CONFIG_MPTCP)
1313 if (sk->sk_prot->forward_alloc_get)
1314 return sk->sk_prot->forward_alloc_get(sk);
1315#endif
1316 return sk->sk_forward_alloc;
1306}
1307
1308static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1309{
1310 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1311 return false;
1312
1313 return sk->sk_prot->stream_memory_free ?

--- 100 unchanged lines hidden (view full) ---

1414{
1415 if (!prot->memory_pressure)
1416 return false;
1417 return !!*prot->memory_pressure;
1418}
1419
1420
1421#ifdef CONFIG_PROC_FS
1317}
1318
1319static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1320{
1321 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1322 return false;
1323
1324 return sk->sk_prot->stream_memory_free ?

--- 100 unchanged lines hidden (view full) ---

1425{
1426 if (!prot->memory_pressure)
1427 return false;
1428 return !!*prot->memory_pressure;
1429}
1430
1431
1432#ifdef CONFIG_PROC_FS
1422/* Called with local bh disabled */
1423void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1433#define PROTO_INUSE_NR 64 /* should be enough for the first time */
1434struct prot_inuse {
1435 int all;
1436 int val[PROTO_INUSE_NR];
1437};
1438
1439static inline void sock_prot_inuse_add(const struct net *net,
1440 const struct proto *prot, int val)
1441{
1442 this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
1443}
1444
1445static inline void sock_inuse_add(const struct net *net, int val)
1446{
1447 this_cpu_add(net->core.prot_inuse->all, val);
1448}
1449
1424int sock_prot_inuse_get(struct net *net, struct proto *proto);
1425int sock_inuse_get(struct net *net);
1426#else
1450int sock_prot_inuse_get(struct net *net, struct proto *proto);
1451int sock_inuse_get(struct net *net);
1452#else
1427static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1428 int inc)
1453static inline void sock_prot_inuse_add(const struct net *net,
1454 const struct proto *prot, int val)
1429{
1430}
1455{
1456}
1457
1458static inline void sock_inuse_add(const struct net *net, int val)
1459{
1460}
1431#endif
1432
1433
1434/* With per-bucket locks this operation is not-atomic, so that
1435 * this version is not worse.
1436 */
1437static inline int __sk_prot_rehash(struct sock *sk)
1438{

--- 661 unchanged lines hidden (view full) ---

2100 if (!READ_ONCE(sk->sk_dst_pending_confirm))
2101 WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
2102}
2103
2104static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
2105{
2106 if (skb_get_dst_pending_confirm(skb)) {
2107 struct sock *sk = skb->sk;
1461#endif
1462
1463
1464/* With per-bucket locks this operation is not-atomic, so that
1465 * this version is not worse.
1466 */
1467static inline int __sk_prot_rehash(struct sock *sk)
1468{

--- 661 unchanged lines hidden (view full) ---

2130 if (!READ_ONCE(sk->sk_dst_pending_confirm))
2131 WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
2132}
2133
2134static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
2135{
2136 if (skb_get_dst_pending_confirm(skb)) {
2137 struct sock *sk = skb->sk;
2108 unsigned long now = jiffies;
2109
2138
2110 /* avoid dirtying neighbour */
2111 if (READ_ONCE(n->confirmed) != now)
2112 WRITE_ONCE(n->confirmed, now);
2113 if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
2114 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
2139 if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
2140 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
2141 neigh_confirm(n);
2115 }
2116}
2117
2118bool sk_mc_loop(struct sock *sk);
2119
2120static inline bool sk_can_gso(const struct sock *sk)
2121{
2122 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
2123}
2124
2125void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2126
2142 }
2143}
2144
2145bool sk_mc_loop(struct sock *sk);
2146
2147static inline bool sk_can_gso(const struct sock *sk)
2148{
2149 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
2150}
2151
2152void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2153
2127static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
2154static inline void sk_gso_disable(struct sock *sk)
2128{
2155{
2129 sk->sk_route_nocaps |= flags;
2130 sk->sk_route_caps &= ~flags;
2156 sk->sk_gso_disabled = 1;
2157 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2131}
2132
2133static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
2134 struct iov_iter *from, char *to,
2135 int copy, int offset)
2136{
2137 if (skb->ip_summed == CHECKSUM_NONE) {
2138 __wsum csum = 0;

--- 494 unchanged lines hidden (view full) ---

2633}
2634
2635static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2636{
2637 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2638 &skb_shinfo(skb)->tskey);
2639}
2640
2158}
2159
2160static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
2161 struct iov_iter *from, char *to,
2162 int copy, int offset)
2163{
2164 if (skb->ip_summed == CHECKSUM_NONE) {
2165 __wsum csum = 0;

--- 494 unchanged lines hidden (view full) ---

2660}
2661
2662static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2663{
2664 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2665 &skb_shinfo(skb)->tskey);
2666}
2667
2668static inline bool sk_is_tcp(const struct sock *sk)
2669{
2670 return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
2671}
2672
2641/**
2642 * sk_eat_skb - Release a skb if it is no longer needed
2643 * @sk: socket to eat this skb from
2644 * @skb: socket buffer to eat
2645 *
2646 * This routine must be called with interrupts disabled or with the socket
2647 * locked so that the sk_buff queue operation is ok.
2648*/

--- 208 unchanged lines hidden ---
2673/**
2674 * sk_eat_skb - Release a skb if it is no longer needed
2675 * @sk: socket to eat this skb from
2676 * @skb: socket buffer to eat
2677 *
2678 * This routine must be called with interrupts disabled or with the socket
2679 * locked so that the sk_buff queue operation is ok.
2680*/

--- 208 unchanged lines hidden ---