xref: /openbmc/linux/include/net/sock.h (revision 87c2ce3b)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the AF_INET socket handler.
7  *
8  * Version:	@(#)sock.h	1.0.4	05/13/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche <flla@stud.uni-sb.de>
14  *
15  * Fixes:
16  *		Alan Cox	:	Volatiles in skbuff pointers. See
17  *					skbuff comments. May be overdone,
18  *					better to prove they can be removed
19  *					than the reverse.
20  *		Alan Cox	:	Added a zapped field for tcp to note
21  *					a socket is reset and must stay shut up
22  *		Alan Cox	:	New fields for options
23  *	Pauline Middelink	:	identd support
24  *		Alan Cox	:	Eliminate low level recv/recvfrom
25  *		David S. Miller	:	New socket lookup architecture.
26  *              Steve Whitehouse:       Default routines for sock_ops
27  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
28  *              			protinfo be just a void pointer, as the
29  *              			protocol specific parts were moved to
30  *              			respective headers and ipv4/v6, etc now
31  *              			use private slabcaches for its socks
32  *              Pedro Hortas	:	New flags field for socket options
33  *
34  *
35  *		This program is free software; you can redistribute it and/or
36  *		modify it under the terms of the GNU General Public License
37  *		as published by the Free Software Foundation; either version
38  *		2 of the License, or (at your option) any later version.
39  */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42 
43 #include <linux/config.h>
44 #include <linux/list.h>
45 #include <linux/timer.h>
46 #include <linux/cache.h>
47 #include <linux/module.h>
48 #include <linux/netdevice.h>
49 #include <linux/skbuff.h>	/* struct sk_buff */
50 #include <linux/security.h>
51 
52 #include <linux/filter.h>
53 
54 #include <asm/atomic.h>
55 #include <net/dst.h>
56 #include <net/checksum.h>
57 
58 /*
59  * This structure really needs to be cleaned up.
60  * Most of it is for TCP, and not used by any of
61  * the other protocols.
62  */
63 
64 /* Define this to get the SOCK_DBG debugging facility. */
65 #define SOCK_DEBUGGING
66 #ifdef SOCK_DEBUGGING
67 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
68 					printk(KERN_DEBUG msg); } while (0)
69 #else
70 #define SOCK_DEBUG(sk, msg...) do { } while (0)
71 #endif
72 
73 /* This is the per-socket lock.  The spinlock provides a synchronization
74  * between user contexts and software interrupt processing, whereas the
75  * mini-semaphore synchronizes multiple users amongst themselves.
76  */
77 struct sock_iocb;
78 typedef struct {
79 	spinlock_t		slock;
80 	struct sock_iocb	*owner;
81 	wait_queue_head_t	wq;
82 } socket_lock_t;
83 
84 #define sock_lock_init(__sk) \
85 do {	spin_lock_init(&((__sk)->sk_lock.slock)); \
86 	(__sk)->sk_lock.owner = NULL; \
87 	init_waitqueue_head(&((__sk)->sk_lock.wq)); \
88 } while(0)
89 
90 struct sock;
91 struct proto;
92 
93 /**
94  *	struct sock_common - minimal network layer representation of sockets
95  *	@skc_family: network address family
96  *	@skc_state: Connection state
97  *	@skc_reuse: %SO_REUSEADDR setting
98  *	@skc_bound_dev_if: bound device index if != 0
99  *	@skc_node: main hash linkage for various protocol lookup tables
100  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
101  *	@skc_refcnt: reference count
102  *	@skc_hash: hash value used with various protocol lookup tables
103  *	@skc_prot: protocol handlers inside a network family
104  *
105  *	This is the minimal network layer representation of sockets, the header
106  *	for struct sock and struct inet_timewait_sock.
107  */
108 struct sock_common {
109 	unsigned short		skc_family;
110 	volatile unsigned char	skc_state;
111 	unsigned char		skc_reuse;
112 	int			skc_bound_dev_if;
113 	struct hlist_node	skc_node;
114 	struct hlist_node	skc_bind_node;
115 	atomic_t		skc_refcnt;
116 	unsigned int		skc_hash;
117 	struct proto		*skc_prot;
118 };
119 
120 /**
121   *	struct sock - network layer representation of sockets
122   *	@__sk_common: shared layout with inet_timewait_sock
123   *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
124   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
125   *	@sk_lock:	synchronizer
126   *	@sk_rcvbuf: size of receive buffer in bytes
127   *	@sk_sleep: sock wait queue
128   *	@sk_dst_cache: destination cache
129   *	@sk_dst_lock: destination cache lock
130   *	@sk_policy: flow policy
131   *	@sk_rmem_alloc: receive queue bytes committed
132   *	@sk_receive_queue: incoming packets
133   *	@sk_wmem_alloc: transmit queue bytes committed
134   *	@sk_write_queue: Packet sending queue
135   *	@sk_omem_alloc: "o" is "option" or "other"
136   *	@sk_wmem_queued: persistent queue size
137   *	@sk_forward_alloc: space allocated forward
138   *	@sk_allocation: allocation mode
139   *	@sk_sndbuf: size of send buffer in bytes
140   *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
141   *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
142   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
143   *	@sk_lingertime: %SO_LINGER l_linger setting
144   *	@sk_backlog: always used with the per-socket spinlock held
145   *	@sk_callback_lock: used with the callbacks in the end of this struct
146   *	@sk_error_queue: rarely used
147   *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
148   *	@sk_err: last error
149   *	@sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
150   *	@sk_ack_backlog: current listen backlog
151   *	@sk_max_ack_backlog: listen backlog set in listen()
152   *	@sk_priority: %SO_PRIORITY setting
153   *	@sk_type: socket type (%SOCK_STREAM, etc)
154   *	@sk_protocol: which protocol this socket belongs in this network family
155   *	@sk_peercred: %SO_PEERCRED setting
156   *	@sk_rcvlowat: %SO_RCVLOWAT setting
157   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
158   *	@sk_sndtimeo: %SO_SNDTIMEO setting
159   *	@sk_filter: socket filtering instructions
160   *	@sk_protinfo: private area, net family specific, when not using slab
161   *	@sk_timer: sock cleanup timer
162   *	@sk_stamp: time stamp of last packet received
163   *	@sk_socket: Identd and reporting IO signals
164   *	@sk_user_data: RPC layer private data
165   *	@sk_sndmsg_page: cached page for sendmsg
166   *	@sk_sndmsg_off: cached offset for sendmsg
167   *	@sk_send_head: front of stuff to transmit
168   *	@sk_security: used by security modules
169   *	@sk_write_pending: a write to stream socket waits to start
170   *	@sk_state_change: callback to indicate change in the state of the sock
171   *	@sk_data_ready: callback to indicate there is data to be processed
172   *	@sk_write_space: callback to indicate there is bf sending space available
173   *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
174   *	@sk_backlog_rcv: callback to process the backlog
175   *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
176  */
177 struct sock {
178 	/*
179 	 * Now struct inet_timewait_sock also uses sock_common, so please just
180 	 * don't add nothing before this first member (__sk_common) --acme
181 	 */
182 	struct sock_common	__sk_common;
183 #define sk_family		__sk_common.skc_family
184 #define sk_state		__sk_common.skc_state
185 #define sk_reuse		__sk_common.skc_reuse
186 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
187 #define sk_node			__sk_common.skc_node
188 #define sk_bind_node		__sk_common.skc_bind_node
189 #define sk_refcnt		__sk_common.skc_refcnt
190 #define sk_hash			__sk_common.skc_hash
191 #define sk_prot			__sk_common.skc_prot
192 	unsigned char		sk_shutdown : 2,
193 				sk_no_check : 2,
194 				sk_userlocks : 4;
195 	unsigned char		sk_protocol;
196 	unsigned short		sk_type;
197 	int			sk_rcvbuf;
198 	socket_lock_t		sk_lock;
199 	wait_queue_head_t	*sk_sleep;
200 	struct dst_entry	*sk_dst_cache;
201 	struct xfrm_policy	*sk_policy[2];
202 	rwlock_t		sk_dst_lock;
203 	atomic_t		sk_rmem_alloc;
204 	atomic_t		sk_wmem_alloc;
205 	atomic_t		sk_omem_alloc;
206 	struct sk_buff_head	sk_receive_queue;
207 	struct sk_buff_head	sk_write_queue;
208 	int			sk_wmem_queued;
209 	int			sk_forward_alloc;
210 	gfp_t			sk_allocation;
211 	int			sk_sndbuf;
212 	int			sk_route_caps;
213 	unsigned long 		sk_flags;
214 	unsigned long	        sk_lingertime;
215 	/*
216 	 * The backlog queue is special, it is always used with
217 	 * the per-socket spinlock held and requires low latency
218 	 * access. Therefore we special case it's implementation.
219 	 */
220 	struct {
221 		struct sk_buff *head;
222 		struct sk_buff *tail;
223 	} sk_backlog;
224 	struct sk_buff_head	sk_error_queue;
225 	struct proto		*sk_prot_creator;
226 	rwlock_t		sk_callback_lock;
227 	int			sk_err,
228 				sk_err_soft;
229 	unsigned short		sk_ack_backlog;
230 	unsigned short		sk_max_ack_backlog;
231 	__u32			sk_priority;
232 	struct ucred		sk_peercred;
233 	int			sk_rcvlowat;
234 	long			sk_rcvtimeo;
235 	long			sk_sndtimeo;
236 	struct sk_filter      	*sk_filter;
237 	void			*sk_protinfo;
238 	struct timer_list	sk_timer;
239 	struct timeval		sk_stamp;
240 	struct socket		*sk_socket;
241 	void			*sk_user_data;
242 	struct page		*sk_sndmsg_page;
243 	struct sk_buff		*sk_send_head;
244 	__u32			sk_sndmsg_off;
245 	int			sk_write_pending;
246 	void			*sk_security;
247 	void			(*sk_state_change)(struct sock *sk);
248 	void			(*sk_data_ready)(struct sock *sk, int bytes);
249 	void			(*sk_write_space)(struct sock *sk);
250 	void			(*sk_error_report)(struct sock *sk);
251   	int			(*sk_backlog_rcv)(struct sock *sk,
252 						  struct sk_buff *skb);
253 	void                    (*sk_destruct)(struct sock *sk);
254 };
255 
256 /*
257  * Hashed lists helper routines
258  */
259 static inline struct sock *__sk_head(const struct hlist_head *head)
260 {
261 	return hlist_entry(head->first, struct sock, sk_node);
262 }
263 
264 static inline struct sock *sk_head(const struct hlist_head *head)
265 {
266 	return hlist_empty(head) ? NULL : __sk_head(head);
267 }
268 
269 static inline struct sock *sk_next(const struct sock *sk)
270 {
271 	return sk->sk_node.next ?
272 		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
273 }
274 
275 static inline int sk_unhashed(const struct sock *sk)
276 {
277 	return hlist_unhashed(&sk->sk_node);
278 }
279 
280 static inline int sk_hashed(const struct sock *sk)
281 {
282 	return sk->sk_node.pprev != NULL;
283 }
284 
285 static __inline__ void sk_node_init(struct hlist_node *node)
286 {
287 	node->pprev = NULL;
288 }
289 
290 static __inline__ void __sk_del_node(struct sock *sk)
291 {
292 	__hlist_del(&sk->sk_node);
293 }
294 
295 static __inline__ int __sk_del_node_init(struct sock *sk)
296 {
297 	if (sk_hashed(sk)) {
298 		__sk_del_node(sk);
299 		sk_node_init(&sk->sk_node);
300 		return 1;
301 	}
302 	return 0;
303 }
304 
305 /* Grab socket reference count. This operation is valid only
306    when sk is ALREADY grabbed f.e. it is found in hash table
307    or a list and the lookup is made under lock preventing hash table
308    modifications.
309  */
310 
311 static inline void sock_hold(struct sock *sk)
312 {
313 	atomic_inc(&sk->sk_refcnt);
314 }
315 
316 /* Ungrab socket in the context, which assumes that socket refcnt
317    cannot hit zero, f.e. it is true in context of any socketcall.
318  */
319 static inline void __sock_put(struct sock *sk)
320 {
321 	atomic_dec(&sk->sk_refcnt);
322 }
323 
324 static __inline__ int sk_del_node_init(struct sock *sk)
325 {
326 	int rc = __sk_del_node_init(sk);
327 
328 	if (rc) {
329 		/* paranoid for a while -acme */
330 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
331 		__sock_put(sk);
332 	}
333 	return rc;
334 }
335 
336 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
337 {
338 	hlist_add_head(&sk->sk_node, list);
339 }
340 
341 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
342 {
343 	sock_hold(sk);
344 	__sk_add_node(sk, list);
345 }
346 
347 static __inline__ void __sk_del_bind_node(struct sock *sk)
348 {
349 	__hlist_del(&sk->sk_bind_node);
350 }
351 
352 static __inline__ void sk_add_bind_node(struct sock *sk,
353 					struct hlist_head *list)
354 {
355 	hlist_add_head(&sk->sk_bind_node, list);
356 }
357 
358 #define sk_for_each(__sk, node, list) \
359 	hlist_for_each_entry(__sk, node, list, sk_node)
360 #define sk_for_each_from(__sk, node) \
361 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
362 		hlist_for_each_entry_from(__sk, node, sk_node)
363 #define sk_for_each_continue(__sk, node) \
364 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
365 		hlist_for_each_entry_continue(__sk, node, sk_node)
366 #define sk_for_each_safe(__sk, node, tmp, list) \
367 	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
368 #define sk_for_each_bound(__sk, node, list) \
369 	hlist_for_each_entry(__sk, node, list, sk_bind_node)
370 
371 /* Sock flags */
372 enum sock_flags {
373 	SOCK_DEAD,
374 	SOCK_DONE,
375 	SOCK_URGINLINE,
376 	SOCK_KEEPOPEN,
377 	SOCK_LINGER,
378 	SOCK_DESTROY,
379 	SOCK_BROADCAST,
380 	SOCK_TIMESTAMP,
381 	SOCK_ZAPPED,
382 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
383 	SOCK_DBG, /* %SO_DEBUG setting */
384 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
385 	SOCK_NO_LARGESEND, /* whether to sent large segments or not */
386 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
387 	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
388 };
389 
390 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
391 {
392 	nsk->sk_flags = osk->sk_flags;
393 }
394 
395 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
396 {
397 	__set_bit(flag, &sk->sk_flags);
398 }
399 
400 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
401 {
402 	__clear_bit(flag, &sk->sk_flags);
403 }
404 
405 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
406 {
407 	return test_bit(flag, &sk->sk_flags);
408 }
409 
410 static inline void sk_acceptq_removed(struct sock *sk)
411 {
412 	sk->sk_ack_backlog--;
413 }
414 
415 static inline void sk_acceptq_added(struct sock *sk)
416 {
417 	sk->sk_ack_backlog++;
418 }
419 
420 static inline int sk_acceptq_is_full(struct sock *sk)
421 {
422 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
423 }
424 
425 /*
426  * Compute minimal free write space needed to queue new packets.
427  */
428 static inline int sk_stream_min_wspace(struct sock *sk)
429 {
430 	return sk->sk_wmem_queued / 2;
431 }
432 
433 static inline int sk_stream_wspace(struct sock *sk)
434 {
435 	return sk->sk_sndbuf - sk->sk_wmem_queued;
436 }
437 
438 extern void sk_stream_write_space(struct sock *sk);
439 
440 static inline int sk_stream_memory_free(struct sock *sk)
441 {
442 	return sk->sk_wmem_queued < sk->sk_sndbuf;
443 }
444 
445 extern void sk_stream_rfree(struct sk_buff *skb);
446 
447 static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
448 {
449 	skb->sk = sk;
450 	skb->destructor = sk_stream_rfree;
451 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
452 	sk->sk_forward_alloc -= skb->truesize;
453 }
454 
455 static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
456 {
457 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
458 	sk->sk_wmem_queued   -= skb->truesize;
459 	sk->sk_forward_alloc += skb->truesize;
460 	__kfree_skb(skb);
461 }
462 
463 /* The per-socket spinlock must be held here. */
464 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
465 {
466 	if (!sk->sk_backlog.tail) {
467 		sk->sk_backlog.head = sk->sk_backlog.tail = skb;
468 	} else {
469 		sk->sk_backlog.tail->next = skb;
470 		sk->sk_backlog.tail = skb;
471 	}
472 	skb->next = NULL;
473 }
474 
475 #define sk_wait_event(__sk, __timeo, __condition)		\
476 ({	int rc;							\
477 	release_sock(__sk);					\
478 	rc = __condition;					\
479 	if (!rc) {						\
480 		*(__timeo) = schedule_timeout(*(__timeo));	\
481 		rc = __condition;				\
482 	}							\
483 	lock_sock(__sk);					\
484 	rc;							\
485 })
486 
487 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
488 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
489 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
490 extern int sk_stream_error(struct sock *sk, int flags, int err);
491 extern void sk_stream_kill_queues(struct sock *sk);
492 
493 extern int sk_wait_data(struct sock *sk, long *timeo);
494 
495 struct request_sock_ops;
496 struct timewait_sock_ops;
497 
498 /* Networking protocol blocks we attach to sockets.
499  * socket layer -> transport layer interface
500  * transport -> network interface is defined by struct inet_proto
501  */
502 struct proto {
503 	void			(*close)(struct sock *sk,
504 					long timeout);
505 	int			(*connect)(struct sock *sk,
506 				        struct sockaddr *uaddr,
507 					int addr_len);
508 	int			(*disconnect)(struct sock *sk, int flags);
509 
510 	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
511 
512 	int			(*ioctl)(struct sock *sk, int cmd,
513 					 unsigned long arg);
514 	int			(*init)(struct sock *sk);
515 	int			(*destroy)(struct sock *sk);
516 	void			(*shutdown)(struct sock *sk, int how);
517 	int			(*setsockopt)(struct sock *sk, int level,
518 					int optname, char __user *optval,
519 					int optlen);
520 	int			(*getsockopt)(struct sock *sk, int level,
521 					int optname, char __user *optval,
522 					int __user *option);
523 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
524 					   struct msghdr *msg, size_t len);
525 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
526 					   struct msghdr *msg,
527 					size_t len, int noblock, int flags,
528 					int *addr_len);
529 	int			(*sendpage)(struct sock *sk, struct page *page,
530 					int offset, size_t size, int flags);
531 	int			(*bind)(struct sock *sk,
532 					struct sockaddr *uaddr, int addr_len);
533 
534 	int			(*backlog_rcv) (struct sock *sk,
535 						struct sk_buff *skb);
536 
537 	/* Keeping track of sk's, looking them up, and port selection methods. */
538 	void			(*hash)(struct sock *sk);
539 	void			(*unhash)(struct sock *sk);
540 	int			(*get_port)(struct sock *sk, unsigned short snum);
541 
542 	/* Memory pressure */
543 	void			(*enter_memory_pressure)(void);
544 	atomic_t		*memory_allocated;	/* Current allocated memory. */
545 	atomic_t		*sockets_allocated;	/* Current number of sockets. */
546 	/*
547 	 * Pressure flag: try to collapse.
548 	 * Technical note: it is used by multiple contexts non atomically.
549 	 * All the sk_stream_mem_schedule() is of this nature: accounting
550 	 * is strict, actions are advisory and have some latency.
551 	 */
552 	int			*memory_pressure;
553 	int			*sysctl_mem;
554 	int			*sysctl_wmem;
555 	int			*sysctl_rmem;
556 	int			max_header;
557 
558 	kmem_cache_t		*slab;
559 	unsigned int		obj_size;
560 
561 	atomic_t		*orphan_count;
562 
563 	struct request_sock_ops	*rsk_prot;
564 	struct timewait_sock_ops *twsk_prot;
565 
566 	struct module		*owner;
567 
568 	char			name[32];
569 
570 	struct list_head	node;
571 #ifdef SOCK_REFCNT_DEBUG
572 	atomic_t		socks;
573 #endif
574 	struct {
575 		int inuse;
576 		u8  __pad[SMP_CACHE_BYTES - sizeof(int)];
577 	} stats[NR_CPUS];
578 };
579 
580 extern int proto_register(struct proto *prot, int alloc_slab);
581 extern void proto_unregister(struct proto *prot);
582 
583 #ifdef SOCK_REFCNT_DEBUG
584 static inline void sk_refcnt_debug_inc(struct sock *sk)
585 {
586 	atomic_inc(&sk->sk_prot->socks);
587 }
588 
589 static inline void sk_refcnt_debug_dec(struct sock *sk)
590 {
591 	atomic_dec(&sk->sk_prot->socks);
592 	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
593 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
594 }
595 
596 static inline void sk_refcnt_debug_release(const struct sock *sk)
597 {
598 	if (atomic_read(&sk->sk_refcnt) != 1)
599 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
600 		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
601 }
602 #else /* SOCK_REFCNT_DEBUG */
603 #define sk_refcnt_debug_inc(sk) do { } while (0)
604 #define sk_refcnt_debug_dec(sk) do { } while (0)
605 #define sk_refcnt_debug_release(sk) do { } while (0)
606 #endif /* SOCK_REFCNT_DEBUG */
607 
608 /* Called with local bh disabled */
609 static __inline__ void sock_prot_inc_use(struct proto *prot)
610 {
611 	prot->stats[smp_processor_id()].inuse++;
612 }
613 
614 static __inline__ void sock_prot_dec_use(struct proto *prot)
615 {
616 	prot->stats[smp_processor_id()].inuse--;
617 }
618 
619 /* With per-bucket locks this operation is not-atomic, so that
620  * this version is not worse.
621  */
622 static inline void __sk_prot_rehash(struct sock *sk)
623 {
624 	sk->sk_prot->unhash(sk);
625 	sk->sk_prot->hash(sk);
626 }
627 
628 /* About 10 seconds */
629 #define SOCK_DESTROY_TIME (10*HZ)
630 
631 /* Sockets 0-1023 can't be bound to unless you are superuser */
632 #define PROT_SOCK	1024
633 
634 #define SHUTDOWN_MASK	3
635 #define RCV_SHUTDOWN	1
636 #define SEND_SHUTDOWN	2
637 
638 #define SOCK_SNDBUF_LOCK	1
639 #define SOCK_RCVBUF_LOCK	2
640 #define SOCK_BINDADDR_LOCK	4
641 #define SOCK_BINDPORT_LOCK	8
642 
643 /* sock_iocb: used to kick off async processing of socket ios */
644 struct sock_iocb {
645 	struct list_head	list;
646 
647 	int			flags;
648 	int			size;
649 	struct socket		*sock;
650 	struct sock		*sk;
651 	struct scm_cookie	*scm;
652 	struct msghdr		*msg, async_msg;
653 	struct iovec		async_iov;
654 	struct kiocb		*kiocb;
655 };
656 
657 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
658 {
659 	return (struct sock_iocb *)iocb->private;
660 }
661 
662 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
663 {
664 	return si->kiocb;
665 }
666 
667 struct socket_alloc {
668 	struct socket socket;
669 	struct inode vfs_inode;
670 };
671 
672 static inline struct socket *SOCKET_I(struct inode *inode)
673 {
674 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
675 }
676 
677 static inline struct inode *SOCK_INODE(struct socket *socket)
678 {
679 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
680 }
681 
682 extern void __sk_stream_mem_reclaim(struct sock *sk);
683 extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
684 
685 #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
686 
687 static inline int sk_stream_pages(int amt)
688 {
689 	return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM;
690 }
691 
692 static inline void sk_stream_mem_reclaim(struct sock *sk)
693 {
694 	if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
695 		__sk_stream_mem_reclaim(sk);
696 }
697 
698 static inline void sk_stream_writequeue_purge(struct sock *sk)
699 {
700 	struct sk_buff *skb;
701 
702 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
703 		sk_stream_free_skb(sk, skb);
704 	sk_stream_mem_reclaim(sk);
705 }
706 
707 static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
708 {
709 	return (int)skb->truesize <= sk->sk_forward_alloc ||
710 		sk_stream_mem_schedule(sk, skb->truesize, 1);
711 }
712 
713 static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
714 {
715 	return size <= sk->sk_forward_alloc ||
716 	       sk_stream_mem_schedule(sk, size, 0);
717 }
718 
719 /* Used by processes to "lock" a socket state, so that
720  * interrupts and bottom half handlers won't change it
721  * from under us. It essentially blocks any incoming
722  * packets, so that we won't get any new data or any
723  * packets that change the state of the socket.
724  *
725  * While locked, BH processing will add new packets to
726  * the backlog queue.  This queue is processed by the
727  * owner of the socket lock right before it is released.
728  *
729  * Since ~2.3.5 it is also exclusive sleep lock serializing
730  * accesses from user process context.
731  */
732 #define sock_owned_by_user(sk)	((sk)->sk_lock.owner)
733 
734 extern void FASTCALL(lock_sock(struct sock *sk));
735 extern void FASTCALL(release_sock(struct sock *sk));
736 
737 /* BH context may only use the following locking interface. */
738 #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
739 #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
740 
741 extern struct sock		*sk_alloc(int family,
742 					  gfp_t priority,
743 					  struct proto *prot, int zero_it);
744 extern void			sk_free(struct sock *sk);
745 extern struct sock		*sk_clone(const struct sock *sk,
746 					  const gfp_t priority);
747 
748 extern struct sk_buff		*sock_wmalloc(struct sock *sk,
749 					      unsigned long size, int force,
750 					      gfp_t priority);
751 extern struct sk_buff		*sock_rmalloc(struct sock *sk,
752 					      unsigned long size, int force,
753 					      gfp_t priority);
754 extern void			sock_wfree(struct sk_buff *skb);
755 extern void			sock_rfree(struct sk_buff *skb);
756 
757 extern int			sock_setsockopt(struct socket *sock, int level,
758 						int op, char __user *optval,
759 						int optlen);
760 
761 extern int			sock_getsockopt(struct socket *sock, int level,
762 						int op, char __user *optval,
763 						int __user *optlen);
764 extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
765 						     unsigned long size,
766 						     int noblock,
767 						     int *errcode);
768 extern void *sock_kmalloc(struct sock *sk, int size,
769 			  gfp_t priority);
770 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
771 extern void sk_send_sigurg(struct sock *sk);
772 
773 /*
774  * Functions to fill in entries in struct proto_ops when a protocol
775  * does not implement a particular function.
776  */
777 extern int                      sock_no_bind(struct socket *,
778 					     struct sockaddr *, int);
779 extern int                      sock_no_connect(struct socket *,
780 						struct sockaddr *, int, int);
781 extern int                      sock_no_socketpair(struct socket *,
782 						   struct socket *);
783 extern int                      sock_no_accept(struct socket *,
784 					       struct socket *, int);
785 extern int                      sock_no_getname(struct socket *,
786 						struct sockaddr *, int *, int);
787 extern unsigned int             sock_no_poll(struct file *, struct socket *,
788 					     struct poll_table_struct *);
789 extern int                      sock_no_ioctl(struct socket *, unsigned int,
790 					      unsigned long);
791 extern int			sock_no_listen(struct socket *, int);
792 extern int                      sock_no_shutdown(struct socket *, int);
793 extern int			sock_no_getsockopt(struct socket *, int , int,
794 						   char __user *, int __user *);
795 extern int			sock_no_setsockopt(struct socket *, int, int,
796 						   char __user *, int);
797 extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
798 						struct msghdr *, size_t);
799 extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
800 						struct msghdr *, size_t, int);
801 extern int			sock_no_mmap(struct file *file,
802 					     struct socket *sock,
803 					     struct vm_area_struct *vma);
804 extern ssize_t			sock_no_sendpage(struct socket *sock,
805 						struct page *page,
806 						int offset, size_t size,
807 						int flags);
808 
809 /*
810  * Functions to fill in entries in struct proto_ops when a protocol
811  * uses the inet style.
812  */
813 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
814 				  char __user *optval, int __user *optlen);
815 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
816 			       struct msghdr *msg, size_t size, int flags);
817 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
818 				  char __user *optval, int optlen);
819 
820 extern void sk_common_release(struct sock *sk);
821 
822 /*
823  *	Default socket callbacks and setup code
824  */
825 
826 /* Initialise core socket variables */
827 extern void sock_init_data(struct socket *sock, struct sock *sk);
828 
829 /**
830  *	sk_filter - run a packet through a socket filter
831  *	@sk: sock associated with &sk_buff
832  *	@skb: buffer to filter
833  *	@needlock: set to 1 if the sock is not locked by caller.
834  *
835  * Run the filter code and then cut skb->data to correct size returned by
836  * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
837  * than pkt_len we keep whole skb->data. This is the socket level
838  * wrapper to sk_run_filter. It returns 0 if the packet should
839  * be accepted or -EPERM if the packet should be tossed.
840  *
841  */
842 
843 static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
844 {
845 	int err;
846 
847 	err = security_sock_rcv_skb(sk, skb);
848 	if (err)
849 		return err;
850 
851 	if (sk->sk_filter) {
852 		struct sk_filter *filter;
853 
854 		if (needlock)
855 			bh_lock_sock(sk);
856 
857 		filter = sk->sk_filter;
858 		if (filter) {
859 			unsigned int pkt_len = sk_run_filter(skb, filter->insns,
860 							     filter->len);
861 			if (!pkt_len)
862 				err = -EPERM;
863 			else
864 				skb_trim(skb, pkt_len);
865 		}
866 
867 		if (needlock)
868 			bh_unlock_sock(sk);
869 	}
870 	return err;
871 }
872 
873 /**
874  *	sk_filter_release: Release a socket filter
875  *	@sk: socket
876  *	@fp: filter to remove
877  *
878  *	Remove a filter from a socket and release its resources.
879  */
880 
881 static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
882 {
883 	unsigned int size = sk_filter_len(fp);
884 
885 	atomic_sub(size, &sk->sk_omem_alloc);
886 
887 	if (atomic_dec_and_test(&fp->refcnt))
888 		kfree(fp);
889 }
890 
891 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
892 {
893 	atomic_inc(&fp->refcnt);
894 	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
895 }
896 
897 /*
898  * Socket reference counting postulates.
899  *
900  * * Each user of socket SHOULD hold a reference count.
901  * * Each access point to socket (an hash table bucket, reference from a list,
902  *   running timer, skb in flight MUST hold a reference count.
903  * * When reference count hits 0, it means it will never increase back.
904  * * When reference count hits 0, it means that no references from
905  *   outside exist to this socket and current process on current CPU
906  *   is last user and may/should destroy this socket.
907  * * sk_free is called from any context: process, BH, IRQ. When
908  *   it is called, socket has no references from outside -> sk_free
909  *   may release descendant resources allocated by the socket, but
910  *   to the time when it is called, socket is NOT referenced by any
911  *   hash tables, lists etc.
912  * * Packets, delivered from outside (from network or from another process)
913  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
914  *   when they sit in queue. Otherwise, packets will leak to hole, when
915  *   socket is looked up by one cpu and unhasing is made by another CPU.
916  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
917  *   (leak to backlog). Packet socket does all the processing inside
918  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
919  *   use separate SMP lock, so that they are prone too.
920  */
921 
922 /* Ungrab socket and destroy it, if it was the last reference. */
923 static inline void sock_put(struct sock *sk)
924 {
925 	if (atomic_dec_and_test(&sk->sk_refcnt))
926 		sk_free(sk);
927 }
928 
929 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
930 {
931 	int rc = NET_RX_SUCCESS;
932 
933 	if (sk_filter(sk, skb, 0))
934 		goto discard_and_relse;
935 
936 	skb->dev = NULL;
937 
938 	bh_lock_sock(sk);
939 	if (!sock_owned_by_user(sk))
940 		rc = sk->sk_backlog_rcv(sk, skb);
941 	else
942 		sk_add_backlog(sk, skb);
943 	bh_unlock_sock(sk);
944 out:
945 	sock_put(sk);
946 	return rc;
947 discard_and_relse:
948 	kfree_skb(skb);
949 	goto out;
950 }
951 
952 /* Detach socket from process context.
953  * Announce socket dead, detach it from wait queue and inode.
954  * Note that parent inode held reference count on this struct sock,
955  * we do not release it in this function, because protocol
956  * probably wants some additional cleanups or even continuing
957  * to work with this socket (TCP).
958  */
959 static inline void sock_orphan(struct sock *sk)
960 {
961 	write_lock_bh(&sk->sk_callback_lock);
962 	sock_set_flag(sk, SOCK_DEAD);
963 	sk->sk_socket = NULL;
964 	sk->sk_sleep  = NULL;
965 	write_unlock_bh(&sk->sk_callback_lock);
966 }
967 
968 static inline void sock_graft(struct sock *sk, struct socket *parent)
969 {
970 	write_lock_bh(&sk->sk_callback_lock);
971 	sk->sk_sleep = &parent->wait;
972 	parent->sk = sk;
973 	sk->sk_socket = parent;
974 	write_unlock_bh(&sk->sk_callback_lock);
975 }
976 
977 extern int sock_i_uid(struct sock *sk);
978 extern unsigned long sock_i_ino(struct sock *sk);
979 
980 static inline struct dst_entry *
981 __sk_dst_get(struct sock *sk)
982 {
983 	return sk->sk_dst_cache;
984 }
985 
986 static inline struct dst_entry *
987 sk_dst_get(struct sock *sk)
988 {
989 	struct dst_entry *dst;
990 
991 	read_lock(&sk->sk_dst_lock);
992 	dst = sk->sk_dst_cache;
993 	if (dst)
994 		dst_hold(dst);
995 	read_unlock(&sk->sk_dst_lock);
996 	return dst;
997 }
998 
999 static inline void
1000 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1001 {
1002 	struct dst_entry *old_dst;
1003 
1004 	old_dst = sk->sk_dst_cache;
1005 	sk->sk_dst_cache = dst;
1006 	dst_release(old_dst);
1007 }
1008 
1009 static inline void
1010 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1011 {
1012 	write_lock(&sk->sk_dst_lock);
1013 	__sk_dst_set(sk, dst);
1014 	write_unlock(&sk->sk_dst_lock);
1015 }
1016 
1017 static inline void
1018 __sk_dst_reset(struct sock *sk)
1019 {
1020 	struct dst_entry *old_dst;
1021 
1022 	old_dst = sk->sk_dst_cache;
1023 	sk->sk_dst_cache = NULL;
1024 	dst_release(old_dst);
1025 }
1026 
1027 static inline void
1028 sk_dst_reset(struct sock *sk)
1029 {
1030 	write_lock(&sk->sk_dst_lock);
1031 	__sk_dst_reset(sk);
1032 	write_unlock(&sk->sk_dst_lock);
1033 }
1034 
1035 static inline struct dst_entry *
1036 __sk_dst_check(struct sock *sk, u32 cookie)
1037 {
1038 	struct dst_entry *dst = sk->sk_dst_cache;
1039 
1040 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1041 		sk->sk_dst_cache = NULL;
1042 		dst_release(dst);
1043 		return NULL;
1044 	}
1045 
1046 	return dst;
1047 }
1048 
1049 static inline struct dst_entry *
1050 sk_dst_check(struct sock *sk, u32 cookie)
1051 {
1052 	struct dst_entry *dst = sk_dst_get(sk);
1053 
1054 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1055 		sk_dst_reset(sk);
1056 		dst_release(dst);
1057 		return NULL;
1058 	}
1059 
1060 	return dst;
1061 }
1062 
1063 static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1064 {
1065 	__sk_dst_set(sk, dst);
1066 	sk->sk_route_caps = dst->dev->features;
1067 	if (sk->sk_route_caps & NETIF_F_TSO) {
1068 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
1069 			sk->sk_route_caps &= ~NETIF_F_TSO;
1070 	}
1071 }
1072 
1073 static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
1074 {
1075 	sk->sk_wmem_queued   += skb->truesize;
1076 	sk->sk_forward_alloc -= skb->truesize;
1077 }
1078 
1079 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1080 				   struct sk_buff *skb, struct page *page,
1081 				   int off, int copy)
1082 {
1083 	if (skb->ip_summed == CHECKSUM_NONE) {
1084 		int err = 0;
1085 		unsigned int csum = csum_and_copy_from_user(from,
1086 						     page_address(page) + off,
1087 							    copy, 0, &err);
1088 		if (err)
1089 			return err;
1090 		skb->csum = csum_block_add(skb->csum, csum, skb->len);
1091 	} else if (copy_from_user(page_address(page) + off, from, copy))
1092 		return -EFAULT;
1093 
1094 	skb->len	     += copy;
1095 	skb->data_len	     += copy;
1096 	skb->truesize	     += copy;
1097 	sk->sk_wmem_queued   += copy;
1098 	sk->sk_forward_alloc -= copy;
1099 	return 0;
1100 }
1101 
1102 /*
1103  * 	Queue a received datagram if it will fit. Stream and sequenced
1104  *	protocols can't normally use this as they need to fit buffers in
1105  *	and play with them.
1106  *
1107  * 	Inlined as it's very short and called for pretty much every
1108  *	packet ever received.
1109  */
1110 
1111 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1112 {
1113 	sock_hold(sk);
1114 	skb->sk = sk;
1115 	skb->destructor = sock_wfree;
1116 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1117 }
1118 
1119 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1120 {
1121 	skb->sk = sk;
1122 	skb->destructor = sock_rfree;
1123 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1124 }
1125 
1126 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1127 			   unsigned long expires);
1128 
1129 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1130 
1131 static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1132 {
1133 	int err = 0;
1134 	int skb_len;
1135 
1136 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1137 	   number of warnings when compiling with -W --ANK
1138 	 */
1139 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1140 	    (unsigned)sk->sk_rcvbuf) {
1141 		err = -ENOMEM;
1142 		goto out;
1143 	}
1144 
1145 	/* It would be deadlock, if sock_queue_rcv_skb is used
1146 	   with socket lock! We assume that users of this
1147 	   function are lock free.
1148 	*/
1149 	err = sk_filter(sk, skb, 1);
1150 	if (err)
1151 		goto out;
1152 
1153 	skb->dev = NULL;
1154 	skb_set_owner_r(skb, sk);
1155 
1156 	/* Cache the SKB length before we tack it onto the receive
1157 	 * queue.  Once it is added it no longer belongs to us and
1158 	 * may be freed by other threads of control pulling packets
1159 	 * from the queue.
1160 	 */
1161 	skb_len = skb->len;
1162 
1163 	skb_queue_tail(&sk->sk_receive_queue, skb);
1164 
1165 	if (!sock_flag(sk, SOCK_DEAD))
1166 		sk->sk_data_ready(sk, skb_len);
1167 out:
1168 	return err;
1169 }
1170 
1171 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1172 {
1173 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1174 	   number of warnings when compiling with -W --ANK
1175 	 */
1176 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1177 	    (unsigned)sk->sk_rcvbuf)
1178 		return -ENOMEM;
1179 	skb_set_owner_r(skb, sk);
1180 	skb_queue_tail(&sk->sk_error_queue, skb);
1181 	if (!sock_flag(sk, SOCK_DEAD))
1182 		sk->sk_data_ready(sk, skb->len);
1183 	return 0;
1184 }
1185 
1186 /*
1187  *	Recover an error report and clear atomically
1188  */
1189 
1190 static inline int sock_error(struct sock *sk)
1191 {
1192 	int err;
1193 	if (likely(!sk->sk_err))
1194 		return 0;
1195 	err = xchg(&sk->sk_err, 0);
1196 	return -err;
1197 }
1198 
1199 static inline unsigned long sock_wspace(struct sock *sk)
1200 {
1201 	int amt = 0;
1202 
1203 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1204 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1205 		if (amt < 0)
1206 			amt = 0;
1207 	}
1208 	return amt;
1209 }
1210 
1211 static inline void sk_wake_async(struct sock *sk, int how, int band)
1212 {
1213 	if (sk->sk_socket && sk->sk_socket->fasync_list)
1214 		sock_wake_async(sk->sk_socket, how, band);
1215 }
1216 
1217 #define SOCK_MIN_SNDBUF 2048
1218 #define SOCK_MIN_RCVBUF 256
1219 
1220 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1221 {
1222 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1223 		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
1224 		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1225 	}
1226 }
1227 
1228 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1229 						   int size, int mem,
1230 						   gfp_t gfp)
1231 {
1232 	struct sk_buff *skb;
1233 	int hdr_len;
1234 
1235 	hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
1236 	skb = alloc_skb_fclone(size + hdr_len, gfp);
1237 	if (skb) {
1238 		skb->truesize += mem;
1239 		if (sk_stream_wmem_schedule(sk, skb->truesize)) {
1240 			skb_reserve(skb, hdr_len);
1241 			return skb;
1242 		}
1243 		__kfree_skb(skb);
1244 	} else {
1245 		sk->sk_prot->enter_memory_pressure();
1246 		sk_stream_moderate_sndbuf(sk);
1247 	}
1248 	return NULL;
1249 }
1250 
1251 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
1252 						  int size,
1253 						  gfp_t gfp)
1254 {
1255 	return sk_stream_alloc_pskb(sk, size, 0, gfp);
1256 }
1257 
1258 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1259 {
1260 	struct page *page = NULL;
1261 
1262 	page = alloc_pages(sk->sk_allocation, 0);
1263 	if (!page) {
1264 		sk->sk_prot->enter_memory_pressure();
1265 		sk_stream_moderate_sndbuf(sk);
1266 	}
1267 	return page;
1268 }
1269 
1270 #define sk_stream_for_retrans_queue(skb, sk)				\
1271 		for (skb = (sk)->sk_write_queue.next;			\
1272 		     (skb != (sk)->sk_send_head) &&			\
1273 		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
1274 		     skb = skb->next)
1275 
1276 /*from STCP for fast SACK Process*/
1277 #define sk_stream_for_retrans_queue_from(skb, sk)			\
1278 		for (; (skb != (sk)->sk_send_head) &&                   \
1279 		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
1280 		     skb = skb->next)
1281 
1282 /*
1283  *	Default write policy as shown to user space via poll/select/SIGIO
1284  */
1285 static inline int sock_writeable(const struct sock *sk)
1286 {
1287 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
1288 }
1289 
1290 static inline gfp_t gfp_any(void)
1291 {
1292 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1293 }
1294 
1295 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1296 {
1297 	return noblock ? 0 : sk->sk_rcvtimeo;
1298 }
1299 
1300 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1301 {
1302 	return noblock ? 0 : sk->sk_sndtimeo;
1303 }
1304 
1305 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1306 {
1307 	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1308 }
1309 
1310 /* Alas, with timeout socket operations are not restartable.
1311  * Compare this to poll().
1312  */
1313 static inline int sock_intr_errno(long timeo)
1314 {
1315 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1316 }
1317 
1318 static __inline__ void
1319 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1320 {
1321 	struct timeval stamp;
1322 
1323 	skb_get_timestamp(skb, &stamp);
1324 	if (sock_flag(sk, SOCK_RCVTSTAMP)) {
1325 		/* Race occurred between timestamp enabling and packet
1326 		   receiving.  Fill in the current time for now. */
1327 		if (stamp.tv_sec == 0)
1328 			do_gettimeofday(&stamp);
1329 		skb_set_timestamp(skb, &stamp);
1330 		put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
1331 			 &stamp);
1332 	} else
1333 		sk->sk_stamp = stamp;
1334 }
1335 
1336 /**
1337  * sk_eat_skb - Release a skb if it is no longer needed
1338  * @sk: socket to eat this skb from
1339  * @skb: socket buffer to eat
1340  *
1341  * This routine must be called with interrupts disabled or with the socket
1342  * locked so that the sk_buff queue operation is ok.
1343 */
1344 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
1345 {
1346 	__skb_unlink(skb, &sk->sk_receive_queue);
1347 	__kfree_skb(skb);
1348 }
1349 
1350 extern void sock_enable_timestamp(struct sock *sk);
1351 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1352 
1353 /*
1354  *	Enable debug/info messages
1355  */
1356 
1357 #if 0
1358 #define NETDEBUG(fmt, args...)	do { } while (0)
1359 #define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
1360 #else
1361 #define NETDEBUG(fmt, args...)	printk(fmt,##args)
1362 #define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
1363 #endif
1364 
1365 /*
1366  * Macros for sleeping on a socket. Use them like this:
1367  *
1368  * SOCK_SLEEP_PRE(sk)
1369  * if (condition)
1370  * 	schedule();
1371  * SOCK_SLEEP_POST(sk)
1372  *
1373  * N.B. These are now obsolete and were, afaik, only ever used in DECnet
1374  * and when the last use of them in DECnet has gone, I'm intending to
1375  * remove them.
1376  */
1377 
1378 #define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \
1379 				DECLARE_WAITQUEUE(wait, tsk); \
1380 				tsk->state = TASK_INTERRUPTIBLE; \
1381 				add_wait_queue((sk)->sk_sleep, &wait); \
1382 				release_sock(sk);
1383 
1384 #define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \
1385 				remove_wait_queue((sk)->sk_sleep, &wait); \
1386 				lock_sock(sk); \
1387 				}
1388 
1389 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1390 {
1391 	if (valbool)
1392 		sock_set_flag(sk, bit);
1393 	else
1394 		sock_reset_flag(sk, bit);
1395 }
1396 
1397 extern __u32 sysctl_wmem_max;
1398 extern __u32 sysctl_rmem_max;
1399 
1400 #ifdef CONFIG_NET
1401 int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
1402 #else
1403 static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
1404 {
1405 	return -ENODEV;
1406 }
1407 #endif
1408 
1409 extern void sk_init(void);
1410 
1411 #ifdef CONFIG_SYSCTL
1412 extern struct ctl_table core_table[];
1413 #endif
1414 
1415 extern int sysctl_optmem_max;
1416 
1417 extern __u32 sysctl_wmem_default;
1418 extern __u32 sysctl_rmem_default;
1419 
1420 #endif	/* _SOCK_H */
1421