xref: /openbmc/linux/include/net/sock.h (revision 97fc2f0848c928c63c2ae619deee61a0b1107b69)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the AF_INET socket handler.
7  *
8  * Version:	@(#)sock.h	1.0.4	05/13/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche <flla@stud.uni-sb.de>
14  *
15  * Fixes:
16  *		Alan Cox	:	Volatiles in skbuff pointers. See
17  *					skbuff comments. May be overdone,
18  *					better to prove they can be removed
19  *					than the reverse.
20  *		Alan Cox	:	Added a zapped field for tcp to note
21  *					a socket is reset and must stay shut up
22  *		Alan Cox	:	New fields for options
23  *	Pauline Middelink	:	identd support
24  *		Alan Cox	:	Eliminate low level recv/recvfrom
25  *		David S. Miller	:	New socket lookup architecture.
26  *              Steve Whitehouse:       Default routines for sock_ops
27  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
28  *              			protinfo be just a void pointer, as the
29  *              			protocol specific parts were moved to
30  *              			respective headers and ipv4/v6, etc now
31  *              			use private slabcaches for its socks
32  *              Pedro Hortas	:	New flags field for socket options
33  *
34  *
35  *		This program is free software; you can redistribute it and/or
36  *		modify it under the terms of the GNU General Public License
37  *		as published by the Free Software Foundation; either version
38  *		2 of the License, or (at your option) any later version.
39  */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42 
43 #include <linux/config.h>
44 #include <linux/list.h>
45 #include <linux/timer.h>
46 #include <linux/cache.h>
47 #include <linux/module.h>
48 #include <linux/netdevice.h>
49 #include <linux/skbuff.h>	/* struct sk_buff */
50 #include <linux/security.h>
51 
52 #include <linux/filter.h>
53 
54 #include <asm/atomic.h>
55 #include <net/dst.h>
56 #include <net/checksum.h>
57 
58 /*
59  * This structure really needs to be cleaned up.
60  * Most of it is for TCP, and not used by any of
61  * the other protocols.
62  */
63 
64 /* Define this to get the SOCK_DBG debugging facility. */
65 #define SOCK_DEBUGGING
66 #ifdef SOCK_DEBUGGING
67 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
68 					printk(KERN_DEBUG msg); } while (0)
69 #else
70 #define SOCK_DEBUG(sk, msg...) do { } while (0)
71 #endif
72 
73 /* This is the per-socket lock.  The spinlock provides a synchronization
74  * between user contexts and software interrupt processing, whereas the
75  * mini-semaphore synchronizes multiple users amongst themselves.
76  */
77 struct sock_iocb;
78 typedef struct {
79 	spinlock_t		slock;
80 	struct sock_iocb	*owner;
81 	wait_queue_head_t	wq;
82 } socket_lock_t;
83 
84 #define sock_lock_init(__sk) \
85 do {	spin_lock_init(&((__sk)->sk_lock.slock)); \
86 	(__sk)->sk_lock.owner = NULL; \
87 	init_waitqueue_head(&((__sk)->sk_lock.wq)); \
88 } while(0)
89 
90 struct sock;
91 struct proto;
92 
93 /**
94  *	struct sock_common - minimal network layer representation of sockets
95  *	@skc_family: network address family
96  *	@skc_state: Connection state
97  *	@skc_reuse: %SO_REUSEADDR setting
98  *	@skc_bound_dev_if: bound device index if != 0
99  *	@skc_node: main hash linkage for various protocol lookup tables
100  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
101  *	@skc_refcnt: reference count
102  *	@skc_hash: hash value used with various protocol lookup tables
103  *	@skc_prot: protocol handlers inside a network family
104  *
105  *	This is the minimal network layer representation of sockets, the header
106  *	for struct sock and struct inet_timewait_sock.
107  */
108 struct sock_common {
109 	unsigned short		skc_family;
110 	volatile unsigned char	skc_state;
111 	unsigned char		skc_reuse;
112 	int			skc_bound_dev_if;
113 	struct hlist_node	skc_node;
114 	struct hlist_node	skc_bind_node;
115 	atomic_t		skc_refcnt;
116 	unsigned int		skc_hash;
117 	struct proto		*skc_prot;
118 };
119 
120 /**
121   *	struct sock - network layer representation of sockets
122   *	@__sk_common: shared layout with inet_timewait_sock
123   *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
124   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
125   *	@sk_lock:	synchronizer
126   *	@sk_rcvbuf: size of receive buffer in bytes
127   *	@sk_sleep: sock wait queue
128   *	@sk_dst_cache: destination cache
129   *	@sk_dst_lock: destination cache lock
130   *	@sk_policy: flow policy
131   *	@sk_rmem_alloc: receive queue bytes committed
132   *	@sk_receive_queue: incoming packets
133   *	@sk_wmem_alloc: transmit queue bytes committed
134   *	@sk_write_queue: Packet sending queue
135   *	@sk_async_wait_queue: DMA copied packets
136   *	@sk_omem_alloc: "o" is "option" or "other"
137   *	@sk_wmem_queued: persistent queue size
138   *	@sk_forward_alloc: space allocated forward
139   *	@sk_allocation: allocation mode
140   *	@sk_sndbuf: size of send buffer in bytes
141   *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
142   *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
143   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
144   *	@sk_lingertime: %SO_LINGER l_linger setting
145   *	@sk_backlog: always used with the per-socket spinlock held
146   *	@sk_callback_lock: used with the callbacks in the end of this struct
147   *	@sk_error_queue: rarely used
148   *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
149   *	@sk_err: last error
150   *	@sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
151   *	@sk_ack_backlog: current listen backlog
152   *	@sk_max_ack_backlog: listen backlog set in listen()
153   *	@sk_priority: %SO_PRIORITY setting
154   *	@sk_type: socket type (%SOCK_STREAM, etc)
155   *	@sk_protocol: which protocol this socket belongs in this network family
156   *	@sk_peercred: %SO_PEERCRED setting
157   *	@sk_rcvlowat: %SO_RCVLOWAT setting
158   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
159   *	@sk_sndtimeo: %SO_SNDTIMEO setting
160   *	@sk_filter: socket filtering instructions
161   *	@sk_protinfo: private area, net family specific, when not using slab
162   *	@sk_timer: sock cleanup timer
163   *	@sk_stamp: time stamp of last packet received
164   *	@sk_socket: Identd and reporting IO signals
165   *	@sk_user_data: RPC layer private data
166   *	@sk_sndmsg_page: cached page for sendmsg
167   *	@sk_sndmsg_off: cached offset for sendmsg
168   *	@sk_send_head: front of stuff to transmit
169   *	@sk_security: used by security modules
170   *	@sk_write_pending: a write to stream socket waits to start
171   *	@sk_state_change: callback to indicate change in the state of the sock
172   *	@sk_data_ready: callback to indicate there is data to be processed
173   *	@sk_write_space: callback to indicate there is bf sending space available
174   *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
175   *	@sk_backlog_rcv: callback to process the backlog
176   *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
177  */
178 struct sock {
179 	/*
180 	 * Now struct inet_timewait_sock also uses sock_common, so please just
181 	 * don't add nothing before this first member (__sk_common) --acme
182 	 */
183 	struct sock_common	__sk_common;
184 #define sk_family		__sk_common.skc_family
185 #define sk_state		__sk_common.skc_state
186 #define sk_reuse		__sk_common.skc_reuse
187 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
188 #define sk_node			__sk_common.skc_node
189 #define sk_bind_node		__sk_common.skc_bind_node
190 #define sk_refcnt		__sk_common.skc_refcnt
191 #define sk_hash			__sk_common.skc_hash
192 #define sk_prot			__sk_common.skc_prot
193 	unsigned char		sk_shutdown : 2,
194 				sk_no_check : 2,
195 				sk_userlocks : 4;
196 	unsigned char		sk_protocol;
197 	unsigned short		sk_type;
198 	int			sk_rcvbuf;
199 	socket_lock_t		sk_lock;
200 	wait_queue_head_t	*sk_sleep;
201 	struct dst_entry	*sk_dst_cache;
202 	struct xfrm_policy	*sk_policy[2];
203 	rwlock_t		sk_dst_lock;
204 	atomic_t		sk_rmem_alloc;
205 	atomic_t		sk_wmem_alloc;
206 	atomic_t		sk_omem_alloc;
207 	struct sk_buff_head	sk_receive_queue;
208 	struct sk_buff_head	sk_write_queue;
209 	struct sk_buff_head	sk_async_wait_queue;
210 	int			sk_wmem_queued;
211 	int			sk_forward_alloc;
212 	gfp_t			sk_allocation;
213 	int			sk_sndbuf;
214 	int			sk_route_caps;
215 	int			sk_rcvlowat;
216 	unsigned long 		sk_flags;
217 	unsigned long	        sk_lingertime;
218 	/*
219 	 * The backlog queue is special, it is always used with
220 	 * the per-socket spinlock held and requires low latency
221 	 * access. Therefore we special case it's implementation.
222 	 */
223 	struct {
224 		struct sk_buff *head;
225 		struct sk_buff *tail;
226 	} sk_backlog;
227 	struct sk_buff_head	sk_error_queue;
228 	struct proto		*sk_prot_creator;
229 	rwlock_t		sk_callback_lock;
230 	int			sk_err,
231 				sk_err_soft;
232 	unsigned short		sk_ack_backlog;
233 	unsigned short		sk_max_ack_backlog;
234 	__u32			sk_priority;
235 	struct ucred		sk_peercred;
236 	long			sk_rcvtimeo;
237 	long			sk_sndtimeo;
238 	struct sk_filter      	*sk_filter;
239 	void			*sk_protinfo;
240 	struct timer_list	sk_timer;
241 	struct timeval		sk_stamp;
242 	struct socket		*sk_socket;
243 	void			*sk_user_data;
244 	struct page		*sk_sndmsg_page;
245 	struct sk_buff		*sk_send_head;
246 	__u32			sk_sndmsg_off;
247 	int			sk_write_pending;
248 	void			*sk_security;
249 	void			(*sk_state_change)(struct sock *sk);
250 	void			(*sk_data_ready)(struct sock *sk, int bytes);
251 	void			(*sk_write_space)(struct sock *sk);
252 	void			(*sk_error_report)(struct sock *sk);
253   	int			(*sk_backlog_rcv)(struct sock *sk,
254 						  struct sk_buff *skb);
255 	void                    (*sk_destruct)(struct sock *sk);
256 };
257 
258 /*
259  * Hashed lists helper routines
260  */
261 static inline struct sock *__sk_head(const struct hlist_head *head)
262 {
263 	return hlist_entry(head->first, struct sock, sk_node);
264 }
265 
266 static inline struct sock *sk_head(const struct hlist_head *head)
267 {
268 	return hlist_empty(head) ? NULL : __sk_head(head);
269 }
270 
271 static inline struct sock *sk_next(const struct sock *sk)
272 {
273 	return sk->sk_node.next ?
274 		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
275 }
276 
277 static inline int sk_unhashed(const struct sock *sk)
278 {
279 	return hlist_unhashed(&sk->sk_node);
280 }
281 
282 static inline int sk_hashed(const struct sock *sk)
283 {
284 	return !sk_unhashed(sk);
285 }
286 
287 static __inline__ void sk_node_init(struct hlist_node *node)
288 {
289 	node->pprev = NULL;
290 }
291 
292 static __inline__ void __sk_del_node(struct sock *sk)
293 {
294 	__hlist_del(&sk->sk_node);
295 }
296 
297 static __inline__ int __sk_del_node_init(struct sock *sk)
298 {
299 	if (sk_hashed(sk)) {
300 		__sk_del_node(sk);
301 		sk_node_init(&sk->sk_node);
302 		return 1;
303 	}
304 	return 0;
305 }
306 
307 /* Grab socket reference count. This operation is valid only
308    when sk is ALREADY grabbed f.e. it is found in hash table
309    or a list and the lookup is made under lock preventing hash table
310    modifications.
311  */
312 
313 static inline void sock_hold(struct sock *sk)
314 {
315 	atomic_inc(&sk->sk_refcnt);
316 }
317 
318 /* Ungrab socket in the context, which assumes that socket refcnt
319    cannot hit zero, f.e. it is true in context of any socketcall.
320  */
321 static inline void __sock_put(struct sock *sk)
322 {
323 	atomic_dec(&sk->sk_refcnt);
324 }
325 
326 static __inline__ int sk_del_node_init(struct sock *sk)
327 {
328 	int rc = __sk_del_node_init(sk);
329 
330 	if (rc) {
331 		/* paranoid for a while -acme */
332 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
333 		__sock_put(sk);
334 	}
335 	return rc;
336 }
337 
338 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
339 {
340 	hlist_add_head(&sk->sk_node, list);
341 }
342 
343 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
344 {
345 	sock_hold(sk);
346 	__sk_add_node(sk, list);
347 }
348 
349 static __inline__ void __sk_del_bind_node(struct sock *sk)
350 {
351 	__hlist_del(&sk->sk_bind_node);
352 }
353 
354 static __inline__ void sk_add_bind_node(struct sock *sk,
355 					struct hlist_head *list)
356 {
357 	hlist_add_head(&sk->sk_bind_node, list);
358 }
359 
360 #define sk_for_each(__sk, node, list) \
361 	hlist_for_each_entry(__sk, node, list, sk_node)
362 #define sk_for_each_from(__sk, node) \
363 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
364 		hlist_for_each_entry_from(__sk, node, sk_node)
365 #define sk_for_each_continue(__sk, node) \
366 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
367 		hlist_for_each_entry_continue(__sk, node, sk_node)
368 #define sk_for_each_safe(__sk, node, tmp, list) \
369 	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
370 #define sk_for_each_bound(__sk, node, list) \
371 	hlist_for_each_entry(__sk, node, list, sk_bind_node)
372 
373 /* Sock flags */
374 enum sock_flags {
375 	SOCK_DEAD,
376 	SOCK_DONE,
377 	SOCK_URGINLINE,
378 	SOCK_KEEPOPEN,
379 	SOCK_LINGER,
380 	SOCK_DESTROY,
381 	SOCK_BROADCAST,
382 	SOCK_TIMESTAMP,
383 	SOCK_ZAPPED,
384 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
385 	SOCK_DBG, /* %SO_DEBUG setting */
386 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
387 	SOCK_NO_LARGESEND, /* whether to sent large segments or not */
388 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
389 	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
390 };
391 
392 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
393 {
394 	nsk->sk_flags = osk->sk_flags;
395 }
396 
397 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
398 {
399 	__set_bit(flag, &sk->sk_flags);
400 }
401 
402 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
403 {
404 	__clear_bit(flag, &sk->sk_flags);
405 }
406 
407 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
408 {
409 	return test_bit(flag, &sk->sk_flags);
410 }
411 
412 static inline void sk_acceptq_removed(struct sock *sk)
413 {
414 	sk->sk_ack_backlog--;
415 }
416 
417 static inline void sk_acceptq_added(struct sock *sk)
418 {
419 	sk->sk_ack_backlog++;
420 }
421 
422 static inline int sk_acceptq_is_full(struct sock *sk)
423 {
424 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
425 }
426 
427 /*
428  * Compute minimal free write space needed to queue new packets.
429  */
430 static inline int sk_stream_min_wspace(struct sock *sk)
431 {
432 	return sk->sk_wmem_queued / 2;
433 }
434 
435 static inline int sk_stream_wspace(struct sock *sk)
436 {
437 	return sk->sk_sndbuf - sk->sk_wmem_queued;
438 }
439 
440 extern void sk_stream_write_space(struct sock *sk);
441 
442 static inline int sk_stream_memory_free(struct sock *sk)
443 {
444 	return sk->sk_wmem_queued < sk->sk_sndbuf;
445 }
446 
447 extern void sk_stream_rfree(struct sk_buff *skb);
448 
449 static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
450 {
451 	skb->sk = sk;
452 	skb->destructor = sk_stream_rfree;
453 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
454 	sk->sk_forward_alloc -= skb->truesize;
455 }
456 
457 static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
458 {
459 	skb_truesize_check(skb);
460 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
461 	sk->sk_wmem_queued   -= skb->truesize;
462 	sk->sk_forward_alloc += skb->truesize;
463 	__kfree_skb(skb);
464 }
465 
466 /* The per-socket spinlock must be held here. */
467 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
468 {
469 	if (!sk->sk_backlog.tail) {
470 		sk->sk_backlog.head = sk->sk_backlog.tail = skb;
471 	} else {
472 		sk->sk_backlog.tail->next = skb;
473 		sk->sk_backlog.tail = skb;
474 	}
475 	skb->next = NULL;
476 }
477 
478 #define sk_wait_event(__sk, __timeo, __condition)		\
479 ({	int rc;							\
480 	release_sock(__sk);					\
481 	rc = __condition;					\
482 	if (!rc) {						\
483 		*(__timeo) = schedule_timeout(*(__timeo));	\
484 	}							\
485 	lock_sock(__sk);					\
486 	rc = __condition;					\
487 	rc;							\
488 })
489 
490 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
491 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
492 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
493 extern int sk_stream_error(struct sock *sk, int flags, int err);
494 extern void sk_stream_kill_queues(struct sock *sk);
495 
496 extern int sk_wait_data(struct sock *sk, long *timeo);
497 
498 struct request_sock_ops;
499 struct timewait_sock_ops;
500 
501 /* Networking protocol blocks we attach to sockets.
502  * socket layer -> transport layer interface
503  * transport -> network interface is defined by struct inet_proto
504  */
505 struct proto {
506 	void			(*close)(struct sock *sk,
507 					long timeout);
508 	int			(*connect)(struct sock *sk,
509 				        struct sockaddr *uaddr,
510 					int addr_len);
511 	int			(*disconnect)(struct sock *sk, int flags);
512 
513 	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
514 
515 	int			(*ioctl)(struct sock *sk, int cmd,
516 					 unsigned long arg);
517 	int			(*init)(struct sock *sk);
518 	int			(*destroy)(struct sock *sk);
519 	void			(*shutdown)(struct sock *sk, int how);
520 	int			(*setsockopt)(struct sock *sk, int level,
521 					int optname, char __user *optval,
522 					int optlen);
523 	int			(*getsockopt)(struct sock *sk, int level,
524 					int optname, char __user *optval,
525 					int __user *option);
526 	int			(*compat_setsockopt)(struct sock *sk,
527 					int level,
528 					int optname, char __user *optval,
529 					int optlen);
530 	int			(*compat_getsockopt)(struct sock *sk,
531 					int level,
532 					int optname, char __user *optval,
533 					int __user *option);
534 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
535 					   struct msghdr *msg, size_t len);
536 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
537 					   struct msghdr *msg,
538 					size_t len, int noblock, int flags,
539 					int *addr_len);
540 	int			(*sendpage)(struct sock *sk, struct page *page,
541 					int offset, size_t size, int flags);
542 	int			(*bind)(struct sock *sk,
543 					struct sockaddr *uaddr, int addr_len);
544 
545 	int			(*backlog_rcv) (struct sock *sk,
546 						struct sk_buff *skb);
547 
548 	/* Keeping track of sk's, looking them up, and port selection methods. */
549 	void			(*hash)(struct sock *sk);
550 	void			(*unhash)(struct sock *sk);
551 	int			(*get_port)(struct sock *sk, unsigned short snum);
552 
553 	/* Memory pressure */
554 	void			(*enter_memory_pressure)(void);
555 	atomic_t		*memory_allocated;	/* Current allocated memory. */
556 	atomic_t		*sockets_allocated;	/* Current number of sockets. */
557 	/*
558 	 * Pressure flag: try to collapse.
559 	 * Technical note: it is used by multiple contexts non atomically.
560 	 * All the sk_stream_mem_schedule() is of this nature: accounting
561 	 * is strict, actions are advisory and have some latency.
562 	 */
563 	int			*memory_pressure;
564 	int			*sysctl_mem;
565 	int			*sysctl_wmem;
566 	int			*sysctl_rmem;
567 	int			max_header;
568 
569 	kmem_cache_t		*slab;
570 	unsigned int		obj_size;
571 
572 	atomic_t		*orphan_count;
573 
574 	struct request_sock_ops	*rsk_prot;
575 	struct timewait_sock_ops *twsk_prot;
576 
577 	struct module		*owner;
578 
579 	char			name[32];
580 
581 	struct list_head	node;
582 #ifdef SOCK_REFCNT_DEBUG
583 	atomic_t		socks;
584 #endif
585 	struct {
586 		int inuse;
587 		u8  __pad[SMP_CACHE_BYTES - sizeof(int)];
588 	} stats[NR_CPUS];
589 };
590 
591 extern int proto_register(struct proto *prot, int alloc_slab);
592 extern void proto_unregister(struct proto *prot);
593 
594 #ifdef SOCK_REFCNT_DEBUG
595 static inline void sk_refcnt_debug_inc(struct sock *sk)
596 {
597 	atomic_inc(&sk->sk_prot->socks);
598 }
599 
600 static inline void sk_refcnt_debug_dec(struct sock *sk)
601 {
602 	atomic_dec(&sk->sk_prot->socks);
603 	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
604 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
605 }
606 
607 static inline void sk_refcnt_debug_release(const struct sock *sk)
608 {
609 	if (atomic_read(&sk->sk_refcnt) != 1)
610 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
611 		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
612 }
613 #else /* SOCK_REFCNT_DEBUG */
614 #define sk_refcnt_debug_inc(sk) do { } while (0)
615 #define sk_refcnt_debug_dec(sk) do { } while (0)
616 #define sk_refcnt_debug_release(sk) do { } while (0)
617 #endif /* SOCK_REFCNT_DEBUG */
618 
619 /* Called with local bh disabled */
620 static __inline__ void sock_prot_inc_use(struct proto *prot)
621 {
622 	prot->stats[smp_processor_id()].inuse++;
623 }
624 
625 static __inline__ void sock_prot_dec_use(struct proto *prot)
626 {
627 	prot->stats[smp_processor_id()].inuse--;
628 }
629 
630 /* With per-bucket locks this operation is not-atomic, so that
631  * this version is not worse.
632  */
633 static inline void __sk_prot_rehash(struct sock *sk)
634 {
635 	sk->sk_prot->unhash(sk);
636 	sk->sk_prot->hash(sk);
637 }
638 
639 /* About 10 seconds */
640 #define SOCK_DESTROY_TIME (10*HZ)
641 
642 /* Sockets 0-1023 can't be bound to unless you are superuser */
643 #define PROT_SOCK	1024
644 
645 #define SHUTDOWN_MASK	3
646 #define RCV_SHUTDOWN	1
647 #define SEND_SHUTDOWN	2
648 
649 #define SOCK_SNDBUF_LOCK	1
650 #define SOCK_RCVBUF_LOCK	2
651 #define SOCK_BINDADDR_LOCK	4
652 #define SOCK_BINDPORT_LOCK	8
653 
654 /* sock_iocb: used to kick off async processing of socket ios */
655 struct sock_iocb {
656 	struct list_head	list;
657 
658 	int			flags;
659 	int			size;
660 	struct socket		*sock;
661 	struct sock		*sk;
662 	struct scm_cookie	*scm;
663 	struct msghdr		*msg, async_msg;
664 	struct iovec		async_iov;
665 	struct kiocb		*kiocb;
666 };
667 
668 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
669 {
670 	return (struct sock_iocb *)iocb->private;
671 }
672 
673 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
674 {
675 	return si->kiocb;
676 }
677 
678 struct socket_alloc {
679 	struct socket socket;
680 	struct inode vfs_inode;
681 };
682 
683 static inline struct socket *SOCKET_I(struct inode *inode)
684 {
685 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
686 }
687 
688 static inline struct inode *SOCK_INODE(struct socket *socket)
689 {
690 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
691 }
692 
693 extern void __sk_stream_mem_reclaim(struct sock *sk);
694 extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
695 
696 #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
697 
698 static inline int sk_stream_pages(int amt)
699 {
700 	return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM;
701 }
702 
703 static inline void sk_stream_mem_reclaim(struct sock *sk)
704 {
705 	if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
706 		__sk_stream_mem_reclaim(sk);
707 }
708 
709 static inline void sk_stream_writequeue_purge(struct sock *sk)
710 {
711 	struct sk_buff *skb;
712 
713 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
714 		sk_stream_free_skb(sk, skb);
715 	sk_stream_mem_reclaim(sk);
716 }
717 
718 static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
719 {
720 	return (int)skb->truesize <= sk->sk_forward_alloc ||
721 		sk_stream_mem_schedule(sk, skb->truesize, 1);
722 }
723 
724 static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
725 {
726 	return size <= sk->sk_forward_alloc ||
727 	       sk_stream_mem_schedule(sk, size, 0);
728 }
729 
730 /* Used by processes to "lock" a socket state, so that
731  * interrupts and bottom half handlers won't change it
732  * from under us. It essentially blocks any incoming
733  * packets, so that we won't get any new data or any
734  * packets that change the state of the socket.
735  *
736  * While locked, BH processing will add new packets to
737  * the backlog queue.  This queue is processed by the
738  * owner of the socket lock right before it is released.
739  *
740  * Since ~2.3.5 it is also exclusive sleep lock serializing
741  * accesses from user process context.
742  */
743 #define sock_owned_by_user(sk)	((sk)->sk_lock.owner)
744 
745 extern void FASTCALL(lock_sock(struct sock *sk));
746 extern void FASTCALL(release_sock(struct sock *sk));
747 
748 /* BH context may only use the following locking interface. */
749 #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
750 #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
751 
752 extern struct sock		*sk_alloc(int family,
753 					  gfp_t priority,
754 					  struct proto *prot, int zero_it);
755 extern void			sk_free(struct sock *sk);
756 extern struct sock		*sk_clone(const struct sock *sk,
757 					  const gfp_t priority);
758 
759 extern struct sk_buff		*sock_wmalloc(struct sock *sk,
760 					      unsigned long size, int force,
761 					      gfp_t priority);
762 extern struct sk_buff		*sock_rmalloc(struct sock *sk,
763 					      unsigned long size, int force,
764 					      gfp_t priority);
765 extern void			sock_wfree(struct sk_buff *skb);
766 extern void			sock_rfree(struct sk_buff *skb);
767 
768 extern int			sock_setsockopt(struct socket *sock, int level,
769 						int op, char __user *optval,
770 						int optlen);
771 
772 extern int			sock_getsockopt(struct socket *sock, int level,
773 						int op, char __user *optval,
774 						int __user *optlen);
775 extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
776 						     unsigned long size,
777 						     int noblock,
778 						     int *errcode);
779 extern void *sock_kmalloc(struct sock *sk, int size,
780 			  gfp_t priority);
781 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
782 extern void sk_send_sigurg(struct sock *sk);
783 
784 /*
785  * Functions to fill in entries in struct proto_ops when a protocol
786  * does not implement a particular function.
787  */
788 extern int                      sock_no_bind(struct socket *,
789 					     struct sockaddr *, int);
790 extern int                      sock_no_connect(struct socket *,
791 						struct sockaddr *, int, int);
792 extern int                      sock_no_socketpair(struct socket *,
793 						   struct socket *);
794 extern int                      sock_no_accept(struct socket *,
795 					       struct socket *, int);
796 extern int                      sock_no_getname(struct socket *,
797 						struct sockaddr *, int *, int);
798 extern unsigned int             sock_no_poll(struct file *, struct socket *,
799 					     struct poll_table_struct *);
800 extern int                      sock_no_ioctl(struct socket *, unsigned int,
801 					      unsigned long);
802 extern int			sock_no_listen(struct socket *, int);
803 extern int                      sock_no_shutdown(struct socket *, int);
804 extern int			sock_no_getsockopt(struct socket *, int , int,
805 						   char __user *, int __user *);
806 extern int			sock_no_setsockopt(struct socket *, int, int,
807 						   char __user *, int);
808 extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
809 						struct msghdr *, size_t);
810 extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
811 						struct msghdr *, size_t, int);
812 extern int			sock_no_mmap(struct file *file,
813 					     struct socket *sock,
814 					     struct vm_area_struct *vma);
815 extern ssize_t			sock_no_sendpage(struct socket *sock,
816 						struct page *page,
817 						int offset, size_t size,
818 						int flags);
819 
820 /*
821  * Functions to fill in entries in struct proto_ops when a protocol
822  * uses the inet style.
823  */
824 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
825 				  char __user *optval, int __user *optlen);
826 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
827 			       struct msghdr *msg, size_t size, int flags);
828 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
829 				  char __user *optval, int optlen);
830 extern int compat_sock_common_getsockopt(struct socket *sock, int level,
831 		int optname, char __user *optval, int __user *optlen);
832 extern int compat_sock_common_setsockopt(struct socket *sock, int level,
833 		int optname, char __user *optval, int optlen);
834 
835 extern void sk_common_release(struct sock *sk);
836 
837 /*
838  *	Default socket callbacks and setup code
839  */
840 
841 /* Initialise core socket variables */
842 extern void sock_init_data(struct socket *sock, struct sock *sk);
843 
844 /**
845  *	sk_filter - run a packet through a socket filter
846  *	@sk: sock associated with &sk_buff
847  *	@skb: buffer to filter
848  *	@needlock: set to 1 if the sock is not locked by caller.
849  *
850  * Run the filter code and then cut skb->data to correct size returned by
851  * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
852  * than pkt_len we keep whole skb->data. This is the socket level
853  * wrapper to sk_run_filter. It returns 0 if the packet should
854  * be accepted or -EPERM if the packet should be tossed.
855  *
856  */
857 
858 static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
859 {
860 	int err;
861 
862 	err = security_sock_rcv_skb(sk, skb);
863 	if (err)
864 		return err;
865 
866 	if (sk->sk_filter) {
867 		struct sk_filter *filter;
868 
869 		if (needlock)
870 			bh_lock_sock(sk);
871 
872 		filter = sk->sk_filter;
873 		if (filter) {
874 			unsigned int pkt_len = sk_run_filter(skb, filter->insns,
875 							     filter->len);
876 			if (!pkt_len)
877 				err = -EPERM;
878 			else
879 				skb_trim(skb, pkt_len);
880 		}
881 
882 		if (needlock)
883 			bh_unlock_sock(sk);
884 	}
885 	return err;
886 }
887 
888 /**
889  *	sk_filter_release: Release a socket filter
890  *	@sk: socket
891  *	@fp: filter to remove
892  *
893  *	Remove a filter from a socket and release its resources.
894  */
895 
896 static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
897 {
898 	unsigned int size = sk_filter_len(fp);
899 
900 	atomic_sub(size, &sk->sk_omem_alloc);
901 
902 	if (atomic_dec_and_test(&fp->refcnt))
903 		kfree(fp);
904 }
905 
906 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
907 {
908 	atomic_inc(&fp->refcnt);
909 	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
910 }
911 
912 /*
913  * Socket reference counting postulates.
914  *
915  * * Each user of socket SHOULD hold a reference count.
916  * * Each access point to socket (an hash table bucket, reference from a list,
917  *   running timer, skb in flight MUST hold a reference count.
918  * * When reference count hits 0, it means it will never increase back.
919  * * When reference count hits 0, it means that no references from
920  *   outside exist to this socket and current process on current CPU
921  *   is last user and may/should destroy this socket.
922  * * sk_free is called from any context: process, BH, IRQ. When
923  *   it is called, socket has no references from outside -> sk_free
924  *   may release descendant resources allocated by the socket, but
925  *   to the time when it is called, socket is NOT referenced by any
926  *   hash tables, lists etc.
927  * * Packets, delivered from outside (from network or from another process)
928  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
929  *   when they sit in queue. Otherwise, packets will leak to hole, when
930  *   socket is looked up by one cpu and unhasing is made by another CPU.
931  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
932  *   (leak to backlog). Packet socket does all the processing inside
933  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
934  *   use separate SMP lock, so that they are prone too.
935  */
936 
937 /* Ungrab socket and destroy it, if it was the last reference. */
938 static inline void sock_put(struct sock *sk)
939 {
940 	if (atomic_dec_and_test(&sk->sk_refcnt))
941 		sk_free(sk);
942 }
943 
944 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb);
945 
946 /* Detach socket from process context.
947  * Announce socket dead, detach it from wait queue and inode.
948  * Note that parent inode held reference count on this struct sock,
949  * we do not release it in this function, because protocol
950  * probably wants some additional cleanups or even continuing
951  * to work with this socket (TCP).
952  */
953 static inline void sock_orphan(struct sock *sk)
954 {
955 	write_lock_bh(&sk->sk_callback_lock);
956 	sock_set_flag(sk, SOCK_DEAD);
957 	sk->sk_socket = NULL;
958 	sk->sk_sleep  = NULL;
959 	write_unlock_bh(&sk->sk_callback_lock);
960 }
961 
962 static inline void sock_graft(struct sock *sk, struct socket *parent)
963 {
964 	write_lock_bh(&sk->sk_callback_lock);
965 	sk->sk_sleep = &parent->wait;
966 	parent->sk = sk;
967 	sk->sk_socket = parent;
968 	write_unlock_bh(&sk->sk_callback_lock);
969 }
970 
971 extern int sock_i_uid(struct sock *sk);
972 extern unsigned long sock_i_ino(struct sock *sk);
973 
974 static inline struct dst_entry *
975 __sk_dst_get(struct sock *sk)
976 {
977 	return sk->sk_dst_cache;
978 }
979 
980 static inline struct dst_entry *
981 sk_dst_get(struct sock *sk)
982 {
983 	struct dst_entry *dst;
984 
985 	read_lock(&sk->sk_dst_lock);
986 	dst = sk->sk_dst_cache;
987 	if (dst)
988 		dst_hold(dst);
989 	read_unlock(&sk->sk_dst_lock);
990 	return dst;
991 }
992 
993 static inline void
994 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
995 {
996 	struct dst_entry *old_dst;
997 
998 	old_dst = sk->sk_dst_cache;
999 	sk->sk_dst_cache = dst;
1000 	dst_release(old_dst);
1001 }
1002 
1003 static inline void
1004 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1005 {
1006 	write_lock(&sk->sk_dst_lock);
1007 	__sk_dst_set(sk, dst);
1008 	write_unlock(&sk->sk_dst_lock);
1009 }
1010 
1011 static inline void
1012 __sk_dst_reset(struct sock *sk)
1013 {
1014 	struct dst_entry *old_dst;
1015 
1016 	old_dst = sk->sk_dst_cache;
1017 	sk->sk_dst_cache = NULL;
1018 	dst_release(old_dst);
1019 }
1020 
1021 static inline void
1022 sk_dst_reset(struct sock *sk)
1023 {
1024 	write_lock(&sk->sk_dst_lock);
1025 	__sk_dst_reset(sk);
1026 	write_unlock(&sk->sk_dst_lock);
1027 }
1028 
1029 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1030 
1031 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1032 
1033 static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1034 {
1035 	__sk_dst_set(sk, dst);
1036 	sk->sk_route_caps = dst->dev->features;
1037 	if (sk->sk_route_caps & NETIF_F_TSO) {
1038 		if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
1039 			sk->sk_route_caps &= ~NETIF_F_TSO;
1040 	}
1041 }
1042 
1043 static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
1044 {
1045 	sk->sk_wmem_queued   += skb->truesize;
1046 	sk->sk_forward_alloc -= skb->truesize;
1047 }
1048 
1049 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1050 				   struct sk_buff *skb, struct page *page,
1051 				   int off, int copy)
1052 {
1053 	if (skb->ip_summed == CHECKSUM_NONE) {
1054 		int err = 0;
1055 		unsigned int csum = csum_and_copy_from_user(from,
1056 						     page_address(page) + off,
1057 							    copy, 0, &err);
1058 		if (err)
1059 			return err;
1060 		skb->csum = csum_block_add(skb->csum, csum, skb->len);
1061 	} else if (copy_from_user(page_address(page) + off, from, copy))
1062 		return -EFAULT;
1063 
1064 	skb->len	     += copy;
1065 	skb->data_len	     += copy;
1066 	skb->truesize	     += copy;
1067 	sk->sk_wmem_queued   += copy;
1068 	sk->sk_forward_alloc -= copy;
1069 	return 0;
1070 }
1071 
1072 /*
1073  * 	Queue a received datagram if it will fit. Stream and sequenced
1074  *	protocols can't normally use this as they need to fit buffers in
1075  *	and play with them.
1076  *
1077  * 	Inlined as it's very short and called for pretty much every
1078  *	packet ever received.
1079  */
1080 
1081 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1082 {
1083 	sock_hold(sk);
1084 	skb->sk = sk;
1085 	skb->destructor = sock_wfree;
1086 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1087 }
1088 
1089 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1090 {
1091 	skb->sk = sk;
1092 	skb->destructor = sock_rfree;
1093 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1094 }
1095 
1096 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1097 			   unsigned long expires);
1098 
1099 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1100 
1101 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1102 
1103 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1104 {
1105 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1106 	   number of warnings when compiling with -W --ANK
1107 	 */
1108 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1109 	    (unsigned)sk->sk_rcvbuf)
1110 		return -ENOMEM;
1111 	skb_set_owner_r(skb, sk);
1112 	skb_queue_tail(&sk->sk_error_queue, skb);
1113 	if (!sock_flag(sk, SOCK_DEAD))
1114 		sk->sk_data_ready(sk, skb->len);
1115 	return 0;
1116 }
1117 
1118 /*
1119  *	Recover an error report and clear atomically
1120  */
1121 
1122 static inline int sock_error(struct sock *sk)
1123 {
1124 	int err;
1125 	if (likely(!sk->sk_err))
1126 		return 0;
1127 	err = xchg(&sk->sk_err, 0);
1128 	return -err;
1129 }
1130 
1131 static inline unsigned long sock_wspace(struct sock *sk)
1132 {
1133 	int amt = 0;
1134 
1135 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1136 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1137 		if (amt < 0)
1138 			amt = 0;
1139 	}
1140 	return amt;
1141 }
1142 
1143 static inline void sk_wake_async(struct sock *sk, int how, int band)
1144 {
1145 	if (sk->sk_socket && sk->sk_socket->fasync_list)
1146 		sock_wake_async(sk->sk_socket, how, band);
1147 }
1148 
1149 #define SOCK_MIN_SNDBUF 2048
1150 #define SOCK_MIN_RCVBUF 256
1151 
1152 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1153 {
1154 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1155 		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
1156 		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1157 	}
1158 }
1159 
1160 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1161 						   int size, int mem,
1162 						   gfp_t gfp)
1163 {
1164 	struct sk_buff *skb;
1165 	int hdr_len;
1166 
1167 	hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
1168 	skb = alloc_skb_fclone(size + hdr_len, gfp);
1169 	if (skb) {
1170 		skb->truesize += mem;
1171 		if (sk_stream_wmem_schedule(sk, skb->truesize)) {
1172 			skb_reserve(skb, hdr_len);
1173 			return skb;
1174 		}
1175 		__kfree_skb(skb);
1176 	} else {
1177 		sk->sk_prot->enter_memory_pressure();
1178 		sk_stream_moderate_sndbuf(sk);
1179 	}
1180 	return NULL;
1181 }
1182 
1183 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
1184 						  int size,
1185 						  gfp_t gfp)
1186 {
1187 	return sk_stream_alloc_pskb(sk, size, 0, gfp);
1188 }
1189 
1190 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1191 {
1192 	struct page *page = NULL;
1193 
1194 	page = alloc_pages(sk->sk_allocation, 0);
1195 	if (!page) {
1196 		sk->sk_prot->enter_memory_pressure();
1197 		sk_stream_moderate_sndbuf(sk);
1198 	}
1199 	return page;
1200 }
1201 
1202 #define sk_stream_for_retrans_queue(skb, sk)				\
1203 		for (skb = (sk)->sk_write_queue.next;			\
1204 		     (skb != (sk)->sk_send_head) &&			\
1205 		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
1206 		     skb = skb->next)
1207 
1208 /*from STCP for fast SACK Process*/
1209 #define sk_stream_for_retrans_queue_from(skb, sk)			\
1210 		for (; (skb != (sk)->sk_send_head) &&                   \
1211 		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
1212 		     skb = skb->next)
1213 
1214 /*
1215  *	Default write policy as shown to user space via poll/select/SIGIO
1216  */
1217 static inline int sock_writeable(const struct sock *sk)
1218 {
1219 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
1220 }
1221 
1222 static inline gfp_t gfp_any(void)
1223 {
1224 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1225 }
1226 
1227 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1228 {
1229 	return noblock ? 0 : sk->sk_rcvtimeo;
1230 }
1231 
1232 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1233 {
1234 	return noblock ? 0 : sk->sk_sndtimeo;
1235 }
1236 
1237 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1238 {
1239 	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1240 }
1241 
1242 /* Alas, with timeout socket operations are not restartable.
1243  * Compare this to poll().
1244  */
1245 static inline int sock_intr_errno(long timeo)
1246 {
1247 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1248 }
1249 
1250 static __inline__ void
1251 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1252 {
1253 	struct timeval stamp;
1254 
1255 	skb_get_timestamp(skb, &stamp);
1256 	if (sock_flag(sk, SOCK_RCVTSTAMP)) {
1257 		/* Race occurred between timestamp enabling and packet
1258 		   receiving.  Fill in the current time for now. */
1259 		if (stamp.tv_sec == 0)
1260 			do_gettimeofday(&stamp);
1261 		skb_set_timestamp(skb, &stamp);
1262 		put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
1263 			 &stamp);
1264 	} else
1265 		sk->sk_stamp = stamp;
1266 }
1267 
1268 /**
1269  * sk_eat_skb - Release a skb if it is no longer needed
1270  * @sk: socket to eat this skb from
1271  * @skb: socket buffer to eat
1272  *
1273  * This routine must be called with interrupts disabled or with the socket
1274  * locked so that the sk_buff queue operation is ok.
1275 */
1276 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
1277 {
1278 	__skb_unlink(skb, &sk->sk_receive_queue);
1279 	__kfree_skb(skb);
1280 }
1281 
1282 extern void sock_enable_timestamp(struct sock *sk);
1283 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1284 
1285 /*
1286  *	Enable debug/info messages
1287  */
1288 
1289 #ifdef CONFIG_NETDEBUG
1290 #define NETDEBUG(fmt, args...)	printk(fmt,##args)
1291 #define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
1292 #else
1293 #define NETDEBUG(fmt, args...)	do { } while (0)
1294 #define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
1295 #endif
1296 
1297 /*
1298  * Macros for sleeping on a socket. Use them like this:
1299  *
1300  * SOCK_SLEEP_PRE(sk)
1301  * if (condition)
1302  * 	schedule();
1303  * SOCK_SLEEP_POST(sk)
1304  *
1305  * N.B. These are now obsolete and were, afaik, only ever used in DECnet
1306  * and when the last use of them in DECnet has gone, I'm intending to
1307  * remove them.
1308  */
1309 
1310 #define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \
1311 				DECLARE_WAITQUEUE(wait, tsk); \
1312 				tsk->state = TASK_INTERRUPTIBLE; \
1313 				add_wait_queue((sk)->sk_sleep, &wait); \
1314 				release_sock(sk);
1315 
1316 #define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \
1317 				remove_wait_queue((sk)->sk_sleep, &wait); \
1318 				lock_sock(sk); \
1319 				}
1320 
1321 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1322 {
1323 	if (valbool)
1324 		sock_set_flag(sk, bit);
1325 	else
1326 		sock_reset_flag(sk, bit);
1327 }
1328 
1329 extern __u32 sysctl_wmem_max;
1330 extern __u32 sysctl_rmem_max;
1331 
1332 #ifdef CONFIG_NET
1333 int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
1334 #else
1335 static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
1336 {
1337 	return -ENODEV;
1338 }
1339 #endif
1340 
1341 extern void sk_init(void);
1342 
1343 #ifdef CONFIG_SYSCTL
1344 extern struct ctl_table core_table[];
1345 #endif
1346 
1347 extern int sysctl_optmem_max;
1348 
1349 extern __u32 sysctl_wmem_default;
1350 extern __u32 sysctl_rmem_default;
1351 
1352 #endif	/* _SOCK_H */
1353