xref: /openbmc/linux/include/net/sock.h (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the AF_INET socket handler.
7  *
8  * Version:	@(#)sock.h	1.0.4	05/13/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche <flla@stud.uni-sb.de>
14  *
15  * Fixes:
16  *		Alan Cox	:	Volatiles in skbuff pointers. See
17  *					skbuff comments. May be overdone,
18  *					better to prove they can be removed
19  *					than the reverse.
20  *		Alan Cox	:	Added a zapped field for tcp to note
21  *					a socket is reset and must stay shut up
22  *		Alan Cox	:	New fields for options
23  *	Pauline Middelink	:	identd support
24  *		Alan Cox	:	Eliminate low level recv/recvfrom
25  *		David S. Miller	:	New socket lookup architecture.
26  *              Steve Whitehouse:       Default routines for sock_ops
27  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
28  *              			protinfo be just a void pointer, as the
29  *              			protocol specific parts were moved to
30  *              			respective headers and ipv4/v6, etc now
31  *              			use private slabcaches for its socks
32  *              Pedro Hortas	:	New flags field for socket options
33  *
34  *
35  *		This program is free software; you can redistribute it and/or
36  *		modify it under the terms of the GNU General Public License
37  *		as published by the Free Software Foundation; either version
38  *		2 of the License, or (at your option) any later version.
39  */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42 
43 #include <linux/list.h>
44 #include <linux/timer.h>
45 #include <linux/cache.h>
46 #include <linux/module.h>
47 #include <linux/lockdep.h>
48 #include <linux/netdevice.h>
49 #include <linux/skbuff.h>	/* struct sk_buff */
50 #include <linux/mm.h>
51 #include <linux/security.h>
52 
53 #include <linux/filter.h>
54 
55 #include <asm/atomic.h>
56 #include <net/dst.h>
57 #include <net/checksum.h>
58 
59 /*
60  * This structure really needs to be cleaned up.
61  * Most of it is for TCP, and not used by any of
62  * the other protocols.
63  */
64 
65 /* Define this to get the SOCK_DBG debugging facility. */
66 #define SOCK_DEBUGGING
67 #ifdef SOCK_DEBUGGING
68 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
69 					printk(KERN_DEBUG msg); } while (0)
70 #else
71 #define SOCK_DEBUG(sk, msg...) do { } while (0)
72 #endif
73 
74 /* This is the per-socket lock.  The spinlock provides a synchronization
75  * between user contexts and software interrupt processing, whereas the
76  * mini-semaphore synchronizes multiple users amongst themselves.
77  */
78 struct sock_iocb;
79 typedef struct {
80 	spinlock_t		slock;
81 	struct sock_iocb	*owner;
82 	wait_queue_head_t	wq;
83 	/*
84 	 * We express the mutex-alike socket_lock semantics
85 	 * to the lock validator by explicitly managing
86 	 * the slock as a lock variant (in addition to
87 	 * the slock itself):
88 	 */
89 #ifdef CONFIG_DEBUG_LOCK_ALLOC
90 	struct lockdep_map dep_map;
91 #endif
92 } socket_lock_t;
93 
94 struct sock;
95 struct proto;
96 
97 /**
98  *	struct sock_common - minimal network layer representation of sockets
99  *	@skc_family: network address family
100  *	@skc_state: Connection state
101  *	@skc_reuse: %SO_REUSEADDR setting
102  *	@skc_bound_dev_if: bound device index if != 0
103  *	@skc_node: main hash linkage for various protocol lookup tables
104  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
105  *	@skc_refcnt: reference count
106  *	@skc_hash: hash value used with various protocol lookup tables
107  *	@skc_prot: protocol handlers inside a network family
108  *
109  *	This is the minimal network layer representation of sockets, the header
110  *	for struct sock and struct inet_timewait_sock.
111  */
112 struct sock_common {
113 	unsigned short		skc_family;
114 	volatile unsigned char	skc_state;
115 	unsigned char		skc_reuse;
116 	int			skc_bound_dev_if;
117 	struct hlist_node	skc_node;
118 	struct hlist_node	skc_bind_node;
119 	atomic_t		skc_refcnt;
120 	unsigned int		skc_hash;
121 	struct proto		*skc_prot;
122 };
123 
124 /**
125   *	struct sock - network layer representation of sockets
126   *	@__sk_common: shared layout with inet_timewait_sock
127   *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
128   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
129   *	@sk_lock:	synchronizer
130   *	@sk_rcvbuf: size of receive buffer in bytes
131   *	@sk_sleep: sock wait queue
132   *	@sk_dst_cache: destination cache
133   *	@sk_dst_lock: destination cache lock
134   *	@sk_policy: flow policy
135   *	@sk_rmem_alloc: receive queue bytes committed
136   *	@sk_receive_queue: incoming packets
137   *	@sk_wmem_alloc: transmit queue bytes committed
138   *	@sk_write_queue: Packet sending queue
139   *	@sk_async_wait_queue: DMA copied packets
140   *	@sk_omem_alloc: "o" is "option" or "other"
141   *	@sk_wmem_queued: persistent queue size
142   *	@sk_forward_alloc: space allocated forward
143   *	@sk_allocation: allocation mode
144   *	@sk_sndbuf: size of send buffer in bytes
145   *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
146   *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
147   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
148   *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
149   *	@sk_lingertime: %SO_LINGER l_linger setting
150   *	@sk_backlog: always used with the per-socket spinlock held
151   *	@sk_callback_lock: used with the callbacks in the end of this struct
152   *	@sk_error_queue: rarely used
153   *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
154   *	@sk_err: last error
155   *	@sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
156   *	@sk_ack_backlog: current listen backlog
157   *	@sk_max_ack_backlog: listen backlog set in listen()
158   *	@sk_priority: %SO_PRIORITY setting
159   *	@sk_type: socket type (%SOCK_STREAM, etc)
160   *	@sk_protocol: which protocol this socket belongs in this network family
161   *	@sk_peercred: %SO_PEERCRED setting
162   *	@sk_rcvlowat: %SO_RCVLOWAT setting
163   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
164   *	@sk_sndtimeo: %SO_SNDTIMEO setting
165   *	@sk_filter: socket filtering instructions
166   *	@sk_protinfo: private area, net family specific, when not using slab
167   *	@sk_timer: sock cleanup timer
168   *	@sk_stamp: time stamp of last packet received
169   *	@sk_socket: Identd and reporting IO signals
170   *	@sk_user_data: RPC layer private data
171   *	@sk_sndmsg_page: cached page for sendmsg
172   *	@sk_sndmsg_off: cached offset for sendmsg
173   *	@sk_send_head: front of stuff to transmit
174   *	@sk_security: used by security modules
175   *	@sk_write_pending: a write to stream socket waits to start
176   *	@sk_state_change: callback to indicate change in the state of the sock
177   *	@sk_data_ready: callback to indicate there is data to be processed
178   *	@sk_write_space: callback to indicate there is bf sending space available
179   *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
180   *	@sk_backlog_rcv: callback to process the backlog
181   *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
182  */
183 struct sock {
184 	/*
185 	 * Now struct inet_timewait_sock also uses sock_common, so please just
186 	 * don't add nothing before this first member (__sk_common) --acme
187 	 */
188 	struct sock_common	__sk_common;
189 #define sk_family		__sk_common.skc_family
190 #define sk_state		__sk_common.skc_state
191 #define sk_reuse		__sk_common.skc_reuse
192 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
193 #define sk_node			__sk_common.skc_node
194 #define sk_bind_node		__sk_common.skc_bind_node
195 #define sk_refcnt		__sk_common.skc_refcnt
196 #define sk_hash			__sk_common.skc_hash
197 #define sk_prot			__sk_common.skc_prot
198 	unsigned char		sk_shutdown : 2,
199 				sk_no_check : 2,
200 				sk_userlocks : 4;
201 	unsigned char		sk_protocol;
202 	unsigned short		sk_type;
203 	int			sk_rcvbuf;
204 	socket_lock_t		sk_lock;
205 	/*
206 	 * The backlog queue is special, it is always used with
207 	 * the per-socket spinlock held and requires low latency
208 	 * access. Therefore we special case it's implementation.
209 	 */
210 	struct {
211 		struct sk_buff *head;
212 		struct sk_buff *tail;
213 	} sk_backlog;
214 	wait_queue_head_t	*sk_sleep;
215 	struct dst_entry	*sk_dst_cache;
216 	struct xfrm_policy	*sk_policy[2];
217 	rwlock_t		sk_dst_lock;
218 	atomic_t		sk_rmem_alloc;
219 	atomic_t		sk_wmem_alloc;
220 	atomic_t		sk_omem_alloc;
221 	struct sk_buff_head	sk_receive_queue;
222 	struct sk_buff_head	sk_write_queue;
223 	struct sk_buff_head	sk_async_wait_queue;
224 	int			sk_wmem_queued;
225 	int			sk_forward_alloc;
226 	gfp_t			sk_allocation;
227 	int			sk_sndbuf;
228 	int			sk_route_caps;
229 	int			sk_gso_type;
230 	int			sk_rcvlowat;
231 	unsigned long 		sk_flags;
232 	unsigned long	        sk_lingertime;
233 	struct sk_buff_head	sk_error_queue;
234 	struct proto		*sk_prot_creator;
235 	rwlock_t		sk_callback_lock;
236 	int			sk_err,
237 				sk_err_soft;
238 	unsigned short		sk_ack_backlog;
239 	unsigned short		sk_max_ack_backlog;
240 	__u32			sk_priority;
241 	struct ucred		sk_peercred;
242 	long			sk_rcvtimeo;
243 	long			sk_sndtimeo;
244 	struct sk_filter      	*sk_filter;
245 	void			*sk_protinfo;
246 	struct timer_list	sk_timer;
247 	ktime_t			sk_stamp;
248 	struct socket		*sk_socket;
249 	void			*sk_user_data;
250 	struct page		*sk_sndmsg_page;
251 	struct sk_buff		*sk_send_head;
252 	__u32			sk_sndmsg_off;
253 	int			sk_write_pending;
254 	void			*sk_security;
255 	void			(*sk_state_change)(struct sock *sk);
256 	void			(*sk_data_ready)(struct sock *sk, int bytes);
257 	void			(*sk_write_space)(struct sock *sk);
258 	void			(*sk_error_report)(struct sock *sk);
259   	int			(*sk_backlog_rcv)(struct sock *sk,
260 						  struct sk_buff *skb);
261 	void                    (*sk_destruct)(struct sock *sk);
262 };
263 
264 /*
265  * Hashed lists helper routines
266  */
267 static inline struct sock *__sk_head(const struct hlist_head *head)
268 {
269 	return hlist_entry(head->first, struct sock, sk_node);
270 }
271 
272 static inline struct sock *sk_head(const struct hlist_head *head)
273 {
274 	return hlist_empty(head) ? NULL : __sk_head(head);
275 }
276 
277 static inline struct sock *sk_next(const struct sock *sk)
278 {
279 	return sk->sk_node.next ?
280 		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
281 }
282 
283 static inline int sk_unhashed(const struct sock *sk)
284 {
285 	return hlist_unhashed(&sk->sk_node);
286 }
287 
288 static inline int sk_hashed(const struct sock *sk)
289 {
290 	return !sk_unhashed(sk);
291 }
292 
293 static __inline__ void sk_node_init(struct hlist_node *node)
294 {
295 	node->pprev = NULL;
296 }
297 
298 static __inline__ void __sk_del_node(struct sock *sk)
299 {
300 	__hlist_del(&sk->sk_node);
301 }
302 
303 static __inline__ int __sk_del_node_init(struct sock *sk)
304 {
305 	if (sk_hashed(sk)) {
306 		__sk_del_node(sk);
307 		sk_node_init(&sk->sk_node);
308 		return 1;
309 	}
310 	return 0;
311 }
312 
313 /* Grab socket reference count. This operation is valid only
314    when sk is ALREADY grabbed f.e. it is found in hash table
315    or a list and the lookup is made under lock preventing hash table
316    modifications.
317  */
318 
319 static inline void sock_hold(struct sock *sk)
320 {
321 	atomic_inc(&sk->sk_refcnt);
322 }
323 
324 /* Ungrab socket in the context, which assumes that socket refcnt
325    cannot hit zero, f.e. it is true in context of any socketcall.
326  */
327 static inline void __sock_put(struct sock *sk)
328 {
329 	atomic_dec(&sk->sk_refcnt);
330 }
331 
332 static __inline__ int sk_del_node_init(struct sock *sk)
333 {
334 	int rc = __sk_del_node_init(sk);
335 
336 	if (rc) {
337 		/* paranoid for a while -acme */
338 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
339 		__sock_put(sk);
340 	}
341 	return rc;
342 }
343 
344 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
345 {
346 	hlist_add_head(&sk->sk_node, list);
347 }
348 
349 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
350 {
351 	sock_hold(sk);
352 	__sk_add_node(sk, list);
353 }
354 
355 static __inline__ void __sk_del_bind_node(struct sock *sk)
356 {
357 	__hlist_del(&sk->sk_bind_node);
358 }
359 
360 static __inline__ void sk_add_bind_node(struct sock *sk,
361 					struct hlist_head *list)
362 {
363 	hlist_add_head(&sk->sk_bind_node, list);
364 }
365 
366 #define sk_for_each(__sk, node, list) \
367 	hlist_for_each_entry(__sk, node, list, sk_node)
368 #define sk_for_each_from(__sk, node) \
369 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
370 		hlist_for_each_entry_from(__sk, node, sk_node)
371 #define sk_for_each_continue(__sk, node) \
372 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
373 		hlist_for_each_entry_continue(__sk, node, sk_node)
374 #define sk_for_each_safe(__sk, node, tmp, list) \
375 	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
376 #define sk_for_each_bound(__sk, node, list) \
377 	hlist_for_each_entry(__sk, node, list, sk_bind_node)
378 
379 /* Sock flags */
380 enum sock_flags {
381 	SOCK_DEAD,
382 	SOCK_DONE,
383 	SOCK_URGINLINE,
384 	SOCK_KEEPOPEN,
385 	SOCK_LINGER,
386 	SOCK_DESTROY,
387 	SOCK_BROADCAST,
388 	SOCK_TIMESTAMP,
389 	SOCK_ZAPPED,
390 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
391 	SOCK_DBG, /* %SO_DEBUG setting */
392 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
393 	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
394 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
395 	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
396 };
397 
398 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
399 {
400 	nsk->sk_flags = osk->sk_flags;
401 }
402 
403 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
404 {
405 	__set_bit(flag, &sk->sk_flags);
406 }
407 
408 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
409 {
410 	__clear_bit(flag, &sk->sk_flags);
411 }
412 
413 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
414 {
415 	return test_bit(flag, &sk->sk_flags);
416 }
417 
418 static inline void sk_acceptq_removed(struct sock *sk)
419 {
420 	sk->sk_ack_backlog--;
421 }
422 
423 static inline void sk_acceptq_added(struct sock *sk)
424 {
425 	sk->sk_ack_backlog++;
426 }
427 
428 static inline int sk_acceptq_is_full(struct sock *sk)
429 {
430 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
431 }
432 
433 /*
434  * Compute minimal free write space needed to queue new packets.
435  */
436 static inline int sk_stream_min_wspace(struct sock *sk)
437 {
438 	return sk->sk_wmem_queued / 2;
439 }
440 
441 static inline int sk_stream_wspace(struct sock *sk)
442 {
443 	return sk->sk_sndbuf - sk->sk_wmem_queued;
444 }
445 
446 extern void sk_stream_write_space(struct sock *sk);
447 
448 static inline int sk_stream_memory_free(struct sock *sk)
449 {
450 	return sk->sk_wmem_queued < sk->sk_sndbuf;
451 }
452 
453 extern void sk_stream_rfree(struct sk_buff *skb);
454 
455 static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
456 {
457 	skb->sk = sk;
458 	skb->destructor = sk_stream_rfree;
459 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
460 	sk->sk_forward_alloc -= skb->truesize;
461 }
462 
463 static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
464 {
465 	skb_truesize_check(skb);
466 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
467 	sk->sk_wmem_queued   -= skb->truesize;
468 	sk->sk_forward_alloc += skb->truesize;
469 	__kfree_skb(skb);
470 }
471 
472 /* The per-socket spinlock must be held here. */
473 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
474 {
475 	if (!sk->sk_backlog.tail) {
476 		sk->sk_backlog.head = sk->sk_backlog.tail = skb;
477 	} else {
478 		sk->sk_backlog.tail->next = skb;
479 		sk->sk_backlog.tail = skb;
480 	}
481 	skb->next = NULL;
482 }
483 
484 #define sk_wait_event(__sk, __timeo, __condition)		\
485 ({	int rc;							\
486 	release_sock(__sk);					\
487 	rc = __condition;					\
488 	if (!rc) {						\
489 		*(__timeo) = schedule_timeout(*(__timeo));	\
490 	}							\
491 	lock_sock(__sk);					\
492 	rc = __condition;					\
493 	rc;							\
494 })
495 
496 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
497 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
498 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
499 extern int sk_stream_error(struct sock *sk, int flags, int err);
500 extern void sk_stream_kill_queues(struct sock *sk);
501 
502 extern int sk_wait_data(struct sock *sk, long *timeo);
503 
504 struct request_sock_ops;
505 struct timewait_sock_ops;
506 
507 /* Networking protocol blocks we attach to sockets.
508  * socket layer -> transport layer interface
509  * transport -> network interface is defined by struct inet_proto
510  */
511 struct proto {
512 	void			(*close)(struct sock *sk,
513 					long timeout);
514 	int			(*connect)(struct sock *sk,
515 				        struct sockaddr *uaddr,
516 					int addr_len);
517 	int			(*disconnect)(struct sock *sk, int flags);
518 
519 	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
520 
521 	int			(*ioctl)(struct sock *sk, int cmd,
522 					 unsigned long arg);
523 	int			(*init)(struct sock *sk);
524 	int			(*destroy)(struct sock *sk);
525 	void			(*shutdown)(struct sock *sk, int how);
526 	int			(*setsockopt)(struct sock *sk, int level,
527 					int optname, char __user *optval,
528 					int optlen);
529 	int			(*getsockopt)(struct sock *sk, int level,
530 					int optname, char __user *optval,
531 					int __user *option);
532 	int			(*compat_setsockopt)(struct sock *sk,
533 					int level,
534 					int optname, char __user *optval,
535 					int optlen);
536 	int			(*compat_getsockopt)(struct sock *sk,
537 					int level,
538 					int optname, char __user *optval,
539 					int __user *option);
540 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
541 					   struct msghdr *msg, size_t len);
542 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
543 					   struct msghdr *msg,
544 					size_t len, int noblock, int flags,
545 					int *addr_len);
546 	int			(*sendpage)(struct sock *sk, struct page *page,
547 					int offset, size_t size, int flags);
548 	int			(*bind)(struct sock *sk,
549 					struct sockaddr *uaddr, int addr_len);
550 
551 	int			(*backlog_rcv) (struct sock *sk,
552 						struct sk_buff *skb);
553 
554 	/* Keeping track of sk's, looking them up, and port selection methods. */
555 	void			(*hash)(struct sock *sk);
556 	void			(*unhash)(struct sock *sk);
557 	int			(*get_port)(struct sock *sk, unsigned short snum);
558 
559 	/* Memory pressure */
560 	void			(*enter_memory_pressure)(void);
561 	atomic_t		*memory_allocated;	/* Current allocated memory. */
562 	atomic_t		*sockets_allocated;	/* Current number of sockets. */
563 	/*
564 	 * Pressure flag: try to collapse.
565 	 * Technical note: it is used by multiple contexts non atomically.
566 	 * All the sk_stream_mem_schedule() is of this nature: accounting
567 	 * is strict, actions are advisory and have some latency.
568 	 */
569 	int			*memory_pressure;
570 	int			*sysctl_mem;
571 	int			*sysctl_wmem;
572 	int			*sysctl_rmem;
573 	int			max_header;
574 
575 	struct kmem_cache		*slab;
576 	unsigned int		obj_size;
577 
578 	atomic_t		*orphan_count;
579 
580 	struct request_sock_ops	*rsk_prot;
581 	struct timewait_sock_ops *twsk_prot;
582 
583 	struct module		*owner;
584 
585 	char			name[32];
586 
587 	struct list_head	node;
588 #ifdef SOCK_REFCNT_DEBUG
589 	atomic_t		socks;
590 #endif
591 	struct {
592 		int inuse;
593 		u8  __pad[SMP_CACHE_BYTES - sizeof(int)];
594 	} stats[NR_CPUS];
595 };
596 
597 extern int proto_register(struct proto *prot, int alloc_slab);
598 extern void proto_unregister(struct proto *prot);
599 
600 #ifdef SOCK_REFCNT_DEBUG
601 static inline void sk_refcnt_debug_inc(struct sock *sk)
602 {
603 	atomic_inc(&sk->sk_prot->socks);
604 }
605 
606 static inline void sk_refcnt_debug_dec(struct sock *sk)
607 {
608 	atomic_dec(&sk->sk_prot->socks);
609 	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
610 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
611 }
612 
613 static inline void sk_refcnt_debug_release(const struct sock *sk)
614 {
615 	if (atomic_read(&sk->sk_refcnt) != 1)
616 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
617 		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
618 }
619 #else /* SOCK_REFCNT_DEBUG */
620 #define sk_refcnt_debug_inc(sk) do { } while (0)
621 #define sk_refcnt_debug_dec(sk) do { } while (0)
622 #define sk_refcnt_debug_release(sk) do { } while (0)
623 #endif /* SOCK_REFCNT_DEBUG */
624 
625 /* Called with local bh disabled */
626 static __inline__ void sock_prot_inc_use(struct proto *prot)
627 {
628 	prot->stats[smp_processor_id()].inuse++;
629 }
630 
631 static __inline__ void sock_prot_dec_use(struct proto *prot)
632 {
633 	prot->stats[smp_processor_id()].inuse--;
634 }
635 
636 /* With per-bucket locks this operation is not-atomic, so that
637  * this version is not worse.
638  */
639 static inline void __sk_prot_rehash(struct sock *sk)
640 {
641 	sk->sk_prot->unhash(sk);
642 	sk->sk_prot->hash(sk);
643 }
644 
645 /* About 10 seconds */
646 #define SOCK_DESTROY_TIME (10*HZ)
647 
648 /* Sockets 0-1023 can't be bound to unless you are superuser */
649 #define PROT_SOCK	1024
650 
651 #define SHUTDOWN_MASK	3
652 #define RCV_SHUTDOWN	1
653 #define SEND_SHUTDOWN	2
654 
655 #define SOCK_SNDBUF_LOCK	1
656 #define SOCK_RCVBUF_LOCK	2
657 #define SOCK_BINDADDR_LOCK	4
658 #define SOCK_BINDPORT_LOCK	8
659 
660 /* sock_iocb: used to kick off async processing of socket ios */
661 struct sock_iocb {
662 	struct list_head	list;
663 
664 	int			flags;
665 	int			size;
666 	struct socket		*sock;
667 	struct sock		*sk;
668 	struct scm_cookie	*scm;
669 	struct msghdr		*msg, async_msg;
670 	struct kiocb		*kiocb;
671 };
672 
673 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
674 {
675 	return (struct sock_iocb *)iocb->private;
676 }
677 
678 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
679 {
680 	return si->kiocb;
681 }
682 
683 struct socket_alloc {
684 	struct socket socket;
685 	struct inode vfs_inode;
686 };
687 
688 static inline struct socket *SOCKET_I(struct inode *inode)
689 {
690 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
691 }
692 
693 static inline struct inode *SOCK_INODE(struct socket *socket)
694 {
695 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
696 }
697 
698 extern void __sk_stream_mem_reclaim(struct sock *sk);
699 extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
700 
701 #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
702 
703 static inline int sk_stream_pages(int amt)
704 {
705 	return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM;
706 }
707 
708 static inline void sk_stream_mem_reclaim(struct sock *sk)
709 {
710 	if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
711 		__sk_stream_mem_reclaim(sk);
712 }
713 
714 static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
715 {
716 	return (int)skb->truesize <= sk->sk_forward_alloc ||
717 		sk_stream_mem_schedule(sk, skb->truesize, 1);
718 }
719 
720 static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
721 {
722 	return size <= sk->sk_forward_alloc ||
723 	       sk_stream_mem_schedule(sk, size, 0);
724 }
725 
726 /* Used by processes to "lock" a socket state, so that
727  * interrupts and bottom half handlers won't change it
728  * from under us. It essentially blocks any incoming
729  * packets, so that we won't get any new data or any
730  * packets that change the state of the socket.
731  *
732  * While locked, BH processing will add new packets to
733  * the backlog queue.  This queue is processed by the
734  * owner of the socket lock right before it is released.
735  *
736  * Since ~2.3.5 it is also exclusive sleep lock serializing
737  * accesses from user process context.
738  */
739 #define sock_owned_by_user(sk)	((sk)->sk_lock.owner)
740 
741 /*
742  * Macro so as to not evaluate some arguments when
743  * lockdep is not enabled.
744  *
745  * Mark both the sk_lock and the sk_lock.slock as a
746  * per-address-family lock class.
747  */
748 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\
749 do {									\
750 	sk->sk_lock.owner = NULL;					\
751 	init_waitqueue_head(&sk->sk_lock.wq);				\
752 	spin_lock_init(&(sk)->sk_lock.slock);				\
753 	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
754 			sizeof((sk)->sk_lock));				\
755 	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
756 		       	(skey), (sname));				\
757 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
758 } while (0)
759 
760 extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
761 
762 static inline void lock_sock(struct sock *sk)
763 {
764 	lock_sock_nested(sk, 0);
765 }
766 
767 extern void FASTCALL(release_sock(struct sock *sk));
768 
769 /* BH context may only use the following locking interface. */
770 #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
771 #define bh_lock_sock_nested(__sk) \
772 				spin_lock_nested(&((__sk)->sk_lock.slock), \
773 				SINGLE_DEPTH_NESTING)
774 #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
775 
776 extern struct sock		*sk_alloc(int family,
777 					  gfp_t priority,
778 					  struct proto *prot, int zero_it);
779 extern void			sk_free(struct sock *sk);
780 extern struct sock		*sk_clone(const struct sock *sk,
781 					  const gfp_t priority);
782 
783 extern struct sk_buff		*sock_wmalloc(struct sock *sk,
784 					      unsigned long size, int force,
785 					      gfp_t priority);
786 extern struct sk_buff		*sock_rmalloc(struct sock *sk,
787 					      unsigned long size, int force,
788 					      gfp_t priority);
789 extern void			sock_wfree(struct sk_buff *skb);
790 extern void			sock_rfree(struct sk_buff *skb);
791 
792 extern int			sock_setsockopt(struct socket *sock, int level,
793 						int op, char __user *optval,
794 						int optlen);
795 
796 extern int			sock_getsockopt(struct socket *sock, int level,
797 						int op, char __user *optval,
798 						int __user *optlen);
799 extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
800 						     unsigned long size,
801 						     int noblock,
802 						     int *errcode);
803 extern void *sock_kmalloc(struct sock *sk, int size,
804 			  gfp_t priority);
805 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
806 extern void sk_send_sigurg(struct sock *sk);
807 
808 /*
809  * Functions to fill in entries in struct proto_ops when a protocol
810  * does not implement a particular function.
811  */
812 extern int                      sock_no_bind(struct socket *,
813 					     struct sockaddr *, int);
814 extern int                      sock_no_connect(struct socket *,
815 						struct sockaddr *, int, int);
816 extern int                      sock_no_socketpair(struct socket *,
817 						   struct socket *);
818 extern int                      sock_no_accept(struct socket *,
819 					       struct socket *, int);
820 extern int                      sock_no_getname(struct socket *,
821 						struct sockaddr *, int *, int);
822 extern unsigned int             sock_no_poll(struct file *, struct socket *,
823 					     struct poll_table_struct *);
824 extern int                      sock_no_ioctl(struct socket *, unsigned int,
825 					      unsigned long);
826 extern int			sock_no_listen(struct socket *, int);
827 extern int                      sock_no_shutdown(struct socket *, int);
828 extern int			sock_no_getsockopt(struct socket *, int , int,
829 						   char __user *, int __user *);
830 extern int			sock_no_setsockopt(struct socket *, int, int,
831 						   char __user *, int);
832 extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
833 						struct msghdr *, size_t);
834 extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
835 						struct msghdr *, size_t, int);
836 extern int			sock_no_mmap(struct file *file,
837 					     struct socket *sock,
838 					     struct vm_area_struct *vma);
839 extern ssize_t			sock_no_sendpage(struct socket *sock,
840 						struct page *page,
841 						int offset, size_t size,
842 						int flags);
843 
844 /*
845  * Functions to fill in entries in struct proto_ops when a protocol
846  * uses the inet style.
847  */
848 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
849 				  char __user *optval, int __user *optlen);
850 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
851 			       struct msghdr *msg, size_t size, int flags);
852 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
853 				  char __user *optval, int optlen);
854 extern int compat_sock_common_getsockopt(struct socket *sock, int level,
855 		int optname, char __user *optval, int __user *optlen);
856 extern int compat_sock_common_setsockopt(struct socket *sock, int level,
857 		int optname, char __user *optval, int optlen);
858 
859 extern void sk_common_release(struct sock *sk);
860 
861 /*
862  *	Default socket callbacks and setup code
863  */
864 
865 /* Initialise core socket variables */
866 extern void sock_init_data(struct socket *sock, struct sock *sk);
867 
868 /**
869  *	sk_filter - run a packet through a socket filter
870  *	@sk: sock associated with &sk_buff
871  *	@skb: buffer to filter
872  *	@needlock: set to 1 if the sock is not locked by caller.
873  *
874  * Run the filter code and then cut skb->data to correct size returned by
875  * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
876  * than pkt_len we keep whole skb->data. This is the socket level
877  * wrapper to sk_run_filter. It returns 0 if the packet should
878  * be accepted or -EPERM if the packet should be tossed.
879  *
880  */
881 
882 static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
883 {
884 	int err;
885 	struct sk_filter *filter;
886 
887 	err = security_sock_rcv_skb(sk, skb);
888 	if (err)
889 		return err;
890 
891 	rcu_read_lock_bh();
892 	filter = sk->sk_filter;
893 	if (filter) {
894 		unsigned int pkt_len = sk_run_filter(skb, filter->insns,
895 				filter->len);
896 		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
897 	}
898  	rcu_read_unlock_bh();
899 
900 	return err;
901 }
902 
903 /**
904  * 	sk_filter_rcu_free: Free a socket filter
905  *	@rcu: rcu_head that contains the sk_filter to free
906  */
907 static inline void sk_filter_rcu_free(struct rcu_head *rcu)
908 {
909 	struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
910 	kfree(fp);
911 }
912 
913 /**
914  *	sk_filter_release: Release a socket filter
915  *	@sk: socket
916  *	@fp: filter to remove
917  *
918  *	Remove a filter from a socket and release its resources.
919  */
920 
921 static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
922 {
923 	unsigned int size = sk_filter_len(fp);
924 
925 	atomic_sub(size, &sk->sk_omem_alloc);
926 
927 	if (atomic_dec_and_test(&fp->refcnt))
928 		call_rcu_bh(&fp->rcu, sk_filter_rcu_free);
929 }
930 
931 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
932 {
933 	atomic_inc(&fp->refcnt);
934 	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
935 }
936 
937 /*
938  * Socket reference counting postulates.
939  *
940  * * Each user of socket SHOULD hold a reference count.
941  * * Each access point to socket (an hash table bucket, reference from a list,
942  *   running timer, skb in flight MUST hold a reference count.
943  * * When reference count hits 0, it means it will never increase back.
944  * * When reference count hits 0, it means that no references from
945  *   outside exist to this socket and current process on current CPU
946  *   is last user and may/should destroy this socket.
947  * * sk_free is called from any context: process, BH, IRQ. When
948  *   it is called, socket has no references from outside -> sk_free
949  *   may release descendant resources allocated by the socket, but
950  *   to the time when it is called, socket is NOT referenced by any
951  *   hash tables, lists etc.
952  * * Packets, delivered from outside (from network or from another process)
953  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
954  *   when they sit in queue. Otherwise, packets will leak to hole, when
955  *   socket is looked up by one cpu and unhasing is made by another CPU.
956  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
957  *   (leak to backlog). Packet socket does all the processing inside
958  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
959  *   use separate SMP lock, so that they are prone too.
960  */
961 
962 /* Ungrab socket and destroy it, if it was the last reference. */
963 static inline void sock_put(struct sock *sk)
964 {
965 	if (atomic_dec_and_test(&sk->sk_refcnt))
966 		sk_free(sk);
967 }
968 
969 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
970 			  const int nested);
971 
972 /* Detach socket from process context.
973  * Announce socket dead, detach it from wait queue and inode.
974  * Note that parent inode held reference count on this struct sock,
975  * we do not release it in this function, because protocol
976  * probably wants some additional cleanups or even continuing
977  * to work with this socket (TCP).
978  */
979 static inline void sock_orphan(struct sock *sk)
980 {
981 	write_lock_bh(&sk->sk_callback_lock);
982 	sock_set_flag(sk, SOCK_DEAD);
983 	sk->sk_socket = NULL;
984 	sk->sk_sleep  = NULL;
985 	write_unlock_bh(&sk->sk_callback_lock);
986 }
987 
988 static inline void sock_graft(struct sock *sk, struct socket *parent)
989 {
990 	write_lock_bh(&sk->sk_callback_lock);
991 	sk->sk_sleep = &parent->wait;
992 	parent->sk = sk;
993 	sk->sk_socket = parent;
994 	security_sock_graft(sk, parent);
995 	write_unlock_bh(&sk->sk_callback_lock);
996 }
997 
998 static inline void sock_copy(struct sock *nsk, const struct sock *osk)
999 {
1000 #ifdef CONFIG_SECURITY_NETWORK
1001 	void *sptr = nsk->sk_security;
1002 #endif
1003 
1004 	memcpy(nsk, osk, osk->sk_prot->obj_size);
1005 #ifdef CONFIG_SECURITY_NETWORK
1006 	nsk->sk_security = sptr;
1007 	security_sk_clone(osk, nsk);
1008 #endif
1009 }
1010 
1011 extern int sock_i_uid(struct sock *sk);
1012 extern unsigned long sock_i_ino(struct sock *sk);
1013 
1014 static inline struct dst_entry *
1015 __sk_dst_get(struct sock *sk)
1016 {
1017 	return sk->sk_dst_cache;
1018 }
1019 
1020 static inline struct dst_entry *
1021 sk_dst_get(struct sock *sk)
1022 {
1023 	struct dst_entry *dst;
1024 
1025 	read_lock(&sk->sk_dst_lock);
1026 	dst = sk->sk_dst_cache;
1027 	if (dst)
1028 		dst_hold(dst);
1029 	read_unlock(&sk->sk_dst_lock);
1030 	return dst;
1031 }
1032 
1033 static inline void
1034 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1035 {
1036 	struct dst_entry *old_dst;
1037 
1038 	old_dst = sk->sk_dst_cache;
1039 	sk->sk_dst_cache = dst;
1040 	dst_release(old_dst);
1041 }
1042 
1043 static inline void
1044 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1045 {
1046 	write_lock(&sk->sk_dst_lock);
1047 	__sk_dst_set(sk, dst);
1048 	write_unlock(&sk->sk_dst_lock);
1049 }
1050 
1051 static inline void
1052 __sk_dst_reset(struct sock *sk)
1053 {
1054 	struct dst_entry *old_dst;
1055 
1056 	old_dst = sk->sk_dst_cache;
1057 	sk->sk_dst_cache = NULL;
1058 	dst_release(old_dst);
1059 }
1060 
1061 static inline void
1062 sk_dst_reset(struct sock *sk)
1063 {
1064 	write_lock(&sk->sk_dst_lock);
1065 	__sk_dst_reset(sk);
1066 	write_unlock(&sk->sk_dst_lock);
1067 }
1068 
1069 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1070 
1071 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1072 
1073 static inline int sk_can_gso(const struct sock *sk)
1074 {
1075 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1076 }
1077 
1078 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1079 
1080 static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
1081 {
1082 	sk->sk_wmem_queued   += skb->truesize;
1083 	sk->sk_forward_alloc -= skb->truesize;
1084 }
1085 
1086 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1087 				   struct sk_buff *skb, struct page *page,
1088 				   int off, int copy)
1089 {
1090 	if (skb->ip_summed == CHECKSUM_NONE) {
1091 		int err = 0;
1092 		__wsum csum = csum_and_copy_from_user(from,
1093 						     page_address(page) + off,
1094 							    copy, 0, &err);
1095 		if (err)
1096 			return err;
1097 		skb->csum = csum_block_add(skb->csum, csum, skb->len);
1098 	} else if (copy_from_user(page_address(page) + off, from, copy))
1099 		return -EFAULT;
1100 
1101 	skb->len	     += copy;
1102 	skb->data_len	     += copy;
1103 	skb->truesize	     += copy;
1104 	sk->sk_wmem_queued   += copy;
1105 	sk->sk_forward_alloc -= copy;
1106 	return 0;
1107 }
1108 
1109 /*
1110  * 	Queue a received datagram if it will fit. Stream and sequenced
1111  *	protocols can't normally use this as they need to fit buffers in
1112  *	and play with them.
1113  *
1114  * 	Inlined as it's very short and called for pretty much every
1115  *	packet ever received.
1116  */
1117 
1118 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1119 {
1120 	sock_hold(sk);
1121 	skb->sk = sk;
1122 	skb->destructor = sock_wfree;
1123 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1124 }
1125 
1126 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1127 {
1128 	skb->sk = sk;
1129 	skb->destructor = sock_rfree;
1130 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1131 }
1132 
1133 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1134 			   unsigned long expires);
1135 
1136 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1137 
1138 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1139 
1140 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1141 {
1142 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1143 	   number of warnings when compiling with -W --ANK
1144 	 */
1145 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1146 	    (unsigned)sk->sk_rcvbuf)
1147 		return -ENOMEM;
1148 	skb_set_owner_r(skb, sk);
1149 	skb_queue_tail(&sk->sk_error_queue, skb);
1150 	if (!sock_flag(sk, SOCK_DEAD))
1151 		sk->sk_data_ready(sk, skb->len);
1152 	return 0;
1153 }
1154 
1155 /*
1156  *	Recover an error report and clear atomically
1157  */
1158 
1159 static inline int sock_error(struct sock *sk)
1160 {
1161 	int err;
1162 	if (likely(!sk->sk_err))
1163 		return 0;
1164 	err = xchg(&sk->sk_err, 0);
1165 	return -err;
1166 }
1167 
1168 static inline unsigned long sock_wspace(struct sock *sk)
1169 {
1170 	int amt = 0;
1171 
1172 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1173 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1174 		if (amt < 0)
1175 			amt = 0;
1176 	}
1177 	return amt;
1178 }
1179 
1180 static inline void sk_wake_async(struct sock *sk, int how, int band)
1181 {
1182 	if (sk->sk_socket && sk->sk_socket->fasync_list)
1183 		sock_wake_async(sk->sk_socket, how, band);
1184 }
1185 
1186 #define SOCK_MIN_SNDBUF 2048
1187 #define SOCK_MIN_RCVBUF 256
1188 
1189 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1190 {
1191 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1192 		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
1193 		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1194 	}
1195 }
1196 
1197 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1198 						   int size, int mem,
1199 						   gfp_t gfp)
1200 {
1201 	struct sk_buff *skb;
1202 	int hdr_len;
1203 
1204 	hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
1205 	skb = alloc_skb_fclone(size + hdr_len, gfp);
1206 	if (skb) {
1207 		skb->truesize += mem;
1208 		if (sk_stream_wmem_schedule(sk, skb->truesize)) {
1209 			skb_reserve(skb, hdr_len);
1210 			return skb;
1211 		}
1212 		__kfree_skb(skb);
1213 	} else {
1214 		sk->sk_prot->enter_memory_pressure();
1215 		sk_stream_moderate_sndbuf(sk);
1216 	}
1217 	return NULL;
1218 }
1219 
1220 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
1221 						  int size,
1222 						  gfp_t gfp)
1223 {
1224 	return sk_stream_alloc_pskb(sk, size, 0, gfp);
1225 }
1226 
1227 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1228 {
1229 	struct page *page = NULL;
1230 
1231 	page = alloc_pages(sk->sk_allocation, 0);
1232 	if (!page) {
1233 		sk->sk_prot->enter_memory_pressure();
1234 		sk_stream_moderate_sndbuf(sk);
1235 	}
1236 	return page;
1237 }
1238 
1239 /*
1240  *	Default write policy as shown to user space via poll/select/SIGIO
1241  */
1242 static inline int sock_writeable(const struct sock *sk)
1243 {
1244 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
1245 }
1246 
1247 static inline gfp_t gfp_any(void)
1248 {
1249 	return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
1250 }
1251 
1252 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1253 {
1254 	return noblock ? 0 : sk->sk_rcvtimeo;
1255 }
1256 
1257 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1258 {
1259 	return noblock ? 0 : sk->sk_sndtimeo;
1260 }
1261 
1262 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1263 {
1264 	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1265 }
1266 
1267 /* Alas, with timeout socket operations are not restartable.
1268  * Compare this to poll().
1269  */
1270 static inline int sock_intr_errno(long timeo)
1271 {
1272 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1273 }
1274 
1275 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1276 	struct sk_buff *skb);
1277 
1278 static __inline__ void
1279 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1280 {
1281 	ktime_t kt = skb->tstamp;
1282 
1283 	if (sock_flag(sk, SOCK_RCVTSTAMP))
1284 		__sock_recv_timestamp(msg, sk, skb);
1285 	else
1286 		sk->sk_stamp = kt;
1287 }
1288 
1289 /**
1290  * sk_eat_skb - Release a skb if it is no longer needed
1291  * @sk: socket to eat this skb from
1292  * @skb: socket buffer to eat
1293  * @copied_early: flag indicating whether DMA operations copied this data early
1294  *
1295  * This routine must be called with interrupts disabled or with the socket
1296  * locked so that the sk_buff queue operation is ok.
1297 */
1298 #ifdef CONFIG_NET_DMA
1299 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1300 {
1301 	__skb_unlink(skb, &sk->sk_receive_queue);
1302 	if (!copied_early)
1303 		__kfree_skb(skb);
1304 	else
1305 		__skb_queue_tail(&sk->sk_async_wait_queue, skb);
1306 }
1307 #else
1308 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1309 {
1310 	__skb_unlink(skb, &sk->sk_receive_queue);
1311 	__kfree_skb(skb);
1312 }
1313 #endif
1314 
1315 extern void sock_enable_timestamp(struct sock *sk);
1316 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1317 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1318 
1319 /*
1320  *	Enable debug/info messages
1321  */
1322 extern int net_msg_warn;
1323 #define NETDEBUG(fmt, args...) \
1324 	do { if (net_msg_warn) printk(fmt,##args); } while (0)
1325 
1326 #define LIMIT_NETDEBUG(fmt, args...) \
1327 	do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1328 
1329 /*
1330  * Macros for sleeping on a socket. Use them like this:
1331  *
1332  * SOCK_SLEEP_PRE(sk)
1333  * if (condition)
1334  * 	schedule();
1335  * SOCK_SLEEP_POST(sk)
1336  *
1337  * N.B. These are now obsolete and were, afaik, only ever used in DECnet
1338  * and when the last use of them in DECnet has gone, I'm intending to
1339  * remove them.
1340  */
1341 
1342 #define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \
1343 				DECLARE_WAITQUEUE(wait, tsk); \
1344 				tsk->state = TASK_INTERRUPTIBLE; \
1345 				add_wait_queue((sk)->sk_sleep, &wait); \
1346 				release_sock(sk);
1347 
1348 #define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \
1349 				remove_wait_queue((sk)->sk_sleep, &wait); \
1350 				lock_sock(sk); \
1351 				}
1352 
1353 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1354 {
1355 	if (valbool)
1356 		sock_set_flag(sk, bit);
1357 	else
1358 		sock_reset_flag(sk, bit);
1359 }
1360 
1361 extern __u32 sysctl_wmem_max;
1362 extern __u32 sysctl_rmem_max;
1363 
1364 extern void sk_init(void);
1365 
1366 #ifdef CONFIG_SYSCTL
1367 extern struct ctl_table core_table[];
1368 #endif
1369 
1370 extern int sysctl_optmem_max;
1371 
1372 extern __u32 sysctl_wmem_default;
1373 extern __u32 sysctl_rmem_default;
1374 
1375 #endif	/* _SOCK_H */
1376