xref: /openbmc/linux/include/net/sock.h (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the AF_INET socket handler.
7  *
8  * Version:	@(#)sock.h	1.0.4	05/13/93
9  *
10  * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche <flla@stud.uni-sb.de>
14  *
15  * Fixes:
16  *		Alan Cox	:	Volatiles in skbuff pointers. See
17  *					skbuff comments. May be overdone,
18  *					better to prove they can be removed
19  *					than the reverse.
20  *		Alan Cox	:	Added a zapped field for tcp to note
21  *					a socket is reset and must stay shut up
22  *		Alan Cox	:	New fields for options
23  *	Pauline Middelink	:	identd support
24  *		Alan Cox	:	Eliminate low level recv/recvfrom
25  *		David S. Miller	:	New socket lookup architecture.
26  *              Steve Whitehouse:       Default routines for sock_ops
27  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
28  *              			protinfo be just a void pointer, as the
29  *              			protocol specific parts were moved to
30  *              			respective headers and ipv4/v6, etc now
31  *              			use private slabcaches for its socks
32  *              Pedro Hortas	:	New flags field for socket options
33  *
34  *
35  *		This program is free software; you can redistribute it and/or
36  *		modify it under the terms of the GNU General Public License
37  *		as published by the Free Software Foundation; either version
38  *		2 of the License, or (at your option) any later version.
39  */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42 
43 #include <linux/config.h>
44 #include <linux/list.h>
45 #include <linux/timer.h>
46 #include <linux/cache.h>
47 #include <linux/module.h>
48 #include <linux/netdevice.h>
49 #include <linux/skbuff.h>	/* struct sk_buff */
50 #include <linux/security.h>
51 
52 #include <linux/filter.h>
53 
54 #include <asm/atomic.h>
55 #include <net/dst.h>
56 #include <net/checksum.h>
57 
58 /*
59  * This structure really needs to be cleaned up.
60  * Most of it is for TCP, and not used by any of
61  * the other protocols.
62  */
63 
64 /* Define this to get the SOCK_DBG debugging facility. */
65 #define SOCK_DEBUGGING
66 #ifdef SOCK_DEBUGGING
67 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
68 					printk(KERN_DEBUG msg); } while (0)
69 #else
70 #define SOCK_DEBUG(sk, msg...) do { } while (0)
71 #endif
72 
73 /* This is the per-socket lock.  The spinlock provides a synchronization
74  * between user contexts and software interrupt processing, whereas the
75  * mini-semaphore synchronizes multiple users amongst themselves.
76  */
77 struct sock_iocb;
78 typedef struct {
79 	spinlock_t		slock;
80 	struct sock_iocb	*owner;
81 	wait_queue_head_t	wq;
82 } socket_lock_t;
83 
84 #define sock_lock_init(__sk) \
85 do {	spin_lock_init(&((__sk)->sk_lock.slock)); \
86 	(__sk)->sk_lock.owner = NULL; \
87 	init_waitqueue_head(&((__sk)->sk_lock.wq)); \
88 } while(0)
89 
90 struct sock;
91 
92 /**
93   *	struct sock_common - minimal network layer representation of sockets
94   *	@skc_family - network address family
95   *	@skc_state - Connection state
96   *	@skc_reuse - %SO_REUSEADDR setting
97   *	@skc_bound_dev_if - bound device index if != 0
98   *	@skc_node - main hash linkage for various protocol lookup tables
99   *	@skc_bind_node - bind hash linkage for various protocol lookup tables
100   *	@skc_refcnt - reference count
101   *
102   *	This is the minimal network layer representation of sockets, the header
103   *	for struct sock and struct tcp_tw_bucket.
104   */
105 struct sock_common {
106 	unsigned short		skc_family;
107 	volatile unsigned char	skc_state;
108 	unsigned char		skc_reuse;
109 	int			skc_bound_dev_if;
110 	struct hlist_node	skc_node;
111 	struct hlist_node	skc_bind_node;
112 	atomic_t		skc_refcnt;
113 };
114 
115 /**
116   *	struct sock - network layer representation of sockets
117   *	@__sk_common - shared layout with tcp_tw_bucket
118   *	@sk_shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
119   *	@sk_userlocks - %SO_SNDBUF and %SO_RCVBUF settings
120   *	@sk_lock -	synchronizer
121   *	@sk_rcvbuf - size of receive buffer in bytes
122   *	@sk_sleep - sock wait queue
123   *	@sk_dst_cache - destination cache
124   *	@sk_dst_lock - destination cache lock
125   *	@sk_policy - flow policy
126   *	@sk_rmem_alloc - receive queue bytes committed
127   *	@sk_receive_queue - incoming packets
128   *	@sk_wmem_alloc - transmit queue bytes committed
129   *	@sk_write_queue - Packet sending queue
130   *	@sk_omem_alloc - "o" is "option" or "other"
131   *	@sk_wmem_queued - persistent queue size
132   *	@sk_forward_alloc - space allocated forward
133   *	@sk_allocation - allocation mode
134   *	@sk_sndbuf - size of send buffer in bytes
135   *	@sk_flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
136   *	@sk_no_check - %SO_NO_CHECK setting, wether or not checkup packets
137   *	@sk_route_caps - route capabilities (e.g. %NETIF_F_TSO)
138   *	@sk_lingertime - %SO_LINGER l_linger setting
139   *	@sk_hashent - hash entry in several tables (e.g. tcp_ehash)
140   *	@sk_backlog - always used with the per-socket spinlock held
141   *	@sk_callback_lock - used with the callbacks in the end of this struct
142   *	@sk_error_queue - rarely used
143   *	@sk_prot - protocol handlers inside a network family
144   *	@sk_err - last error
145   *	@sk_err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
146   *	@sk_ack_backlog - current listen backlog
147   *	@sk_max_ack_backlog - listen backlog set in listen()
148   *	@sk_priority - %SO_PRIORITY setting
149   *	@sk_type - socket type (%SOCK_STREAM, etc)
150   *	@sk_protocol - which protocol this socket belongs in this network family
151   *	@sk_peercred - %SO_PEERCRED setting
152   *	@sk_rcvlowat - %SO_RCVLOWAT setting
153   *	@sk_rcvtimeo - %SO_RCVTIMEO setting
154   *	@sk_sndtimeo - %SO_SNDTIMEO setting
155   *	@sk_filter - socket filtering instructions
156   *	@sk_protinfo - private area, net family specific, when not using slab
157   *	@sk_timer - sock cleanup timer
158   *	@sk_stamp - time stamp of last packet received
159   *	@sk_socket - Identd and reporting IO signals
160   *	@sk_user_data - RPC layer private data
161   *	@sk_sndmsg_page - cached page for sendmsg
162   *	@sk_sndmsg_off - cached offset for sendmsg
163   *	@sk_send_head - front of stuff to transmit
164   *	@sk_write_pending - a write to stream socket waits to start
165   *	@sk_state_change - callback to indicate change in the state of the sock
166   *	@sk_data_ready - callback to indicate there is data to be processed
167   *	@sk_write_space - callback to indicate there is bf sending space available
168   *	@sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
169   *	@sk_backlog_rcv - callback to process the backlog
170   *	@sk_destruct - called at sock freeing time, i.e. when all refcnt == 0
171  */
172 struct sock {
173 	/*
174 	 * Now struct tcp_tw_bucket also uses sock_common, so please just
175 	 * don't add nothing before this first member (__sk_common) --acme
176 	 */
177 	struct sock_common	__sk_common;
178 #define sk_family		__sk_common.skc_family
179 #define sk_state		__sk_common.skc_state
180 #define sk_reuse		__sk_common.skc_reuse
181 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
182 #define sk_node			__sk_common.skc_node
183 #define sk_bind_node		__sk_common.skc_bind_node
184 #define sk_refcnt		__sk_common.skc_refcnt
185 	unsigned char		sk_shutdown : 2,
186 				sk_no_check : 2,
187 				sk_userlocks : 4;
188 	unsigned char		sk_protocol;
189 	unsigned short		sk_type;
190 	int			sk_rcvbuf;
191 	socket_lock_t		sk_lock;
192 	wait_queue_head_t	*sk_sleep;
193 	struct dst_entry	*sk_dst_cache;
194 	struct xfrm_policy	*sk_policy[2];
195 	rwlock_t		sk_dst_lock;
196 	atomic_t		sk_rmem_alloc;
197 	atomic_t		sk_wmem_alloc;
198 	atomic_t		sk_omem_alloc;
199 	struct sk_buff_head	sk_receive_queue;
200 	struct sk_buff_head	sk_write_queue;
201 	int			sk_wmem_queued;
202 	int			sk_forward_alloc;
203 	unsigned int		sk_allocation;
204 	int			sk_sndbuf;
205 	int			sk_route_caps;
206 	int			sk_hashent;
207 	unsigned long 		sk_flags;
208 	unsigned long	        sk_lingertime;
209 	/*
210 	 * The backlog queue is special, it is always used with
211 	 * the per-socket spinlock held and requires low latency
212 	 * access. Therefore we special case it's implementation.
213 	 */
214 	struct {
215 		struct sk_buff *head;
216 		struct sk_buff *tail;
217 	} sk_backlog;
218 	struct sk_buff_head	sk_error_queue;
219 	struct proto		*sk_prot;
220 	rwlock_t		sk_callback_lock;
221 	int			sk_err,
222 				sk_err_soft;
223 	unsigned short		sk_ack_backlog;
224 	unsigned short		sk_max_ack_backlog;
225 	__u32			sk_priority;
226 	struct ucred		sk_peercred;
227 	int			sk_rcvlowat;
228 	long			sk_rcvtimeo;
229 	long			sk_sndtimeo;
230 	struct sk_filter      	*sk_filter;
231 	void			*sk_protinfo;
232 	struct timer_list	sk_timer;
233 	struct timeval		sk_stamp;
234 	struct socket		*sk_socket;
235 	void			*sk_user_data;
236 	struct page		*sk_sndmsg_page;
237 	struct sk_buff		*sk_send_head;
238 	__u32			sk_sndmsg_off;
239 	int			sk_write_pending;
240 	void			*sk_security;
241 	void			(*sk_state_change)(struct sock *sk);
242 	void			(*sk_data_ready)(struct sock *sk, int bytes);
243 	void			(*sk_write_space)(struct sock *sk);
244 	void			(*sk_error_report)(struct sock *sk);
245   	int			(*sk_backlog_rcv)(struct sock *sk,
246 						  struct sk_buff *skb);
247 	void                    (*sk_destruct)(struct sock *sk);
248 };
249 
250 /*
251  * Hashed lists helper routines
252  */
253 static inline struct sock *__sk_head(struct hlist_head *head)
254 {
255 	return hlist_entry(head->first, struct sock, sk_node);
256 }
257 
258 static inline struct sock *sk_head(struct hlist_head *head)
259 {
260 	return hlist_empty(head) ? NULL : __sk_head(head);
261 }
262 
263 static inline struct sock *sk_next(struct sock *sk)
264 {
265 	return sk->sk_node.next ?
266 		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
267 }
268 
269 static inline int sk_unhashed(struct sock *sk)
270 {
271 	return hlist_unhashed(&sk->sk_node);
272 }
273 
274 static inline int sk_hashed(struct sock *sk)
275 {
276 	return sk->sk_node.pprev != NULL;
277 }
278 
279 static __inline__ void sk_node_init(struct hlist_node *node)
280 {
281 	node->pprev = NULL;
282 }
283 
284 static __inline__ void __sk_del_node(struct sock *sk)
285 {
286 	__hlist_del(&sk->sk_node);
287 }
288 
289 static __inline__ int __sk_del_node_init(struct sock *sk)
290 {
291 	if (sk_hashed(sk)) {
292 		__sk_del_node(sk);
293 		sk_node_init(&sk->sk_node);
294 		return 1;
295 	}
296 	return 0;
297 }
298 
299 /* Grab socket reference count. This operation is valid only
300    when sk is ALREADY grabbed f.e. it is found in hash table
301    or a list and the lookup is made under lock preventing hash table
302    modifications.
303  */
304 
305 static inline void sock_hold(struct sock *sk)
306 {
307 	atomic_inc(&sk->sk_refcnt);
308 }
309 
310 /* Ungrab socket in the context, which assumes that socket refcnt
311    cannot hit zero, f.e. it is true in context of any socketcall.
312  */
313 static inline void __sock_put(struct sock *sk)
314 {
315 	atomic_dec(&sk->sk_refcnt);
316 }
317 
318 static __inline__ int sk_del_node_init(struct sock *sk)
319 {
320 	int rc = __sk_del_node_init(sk);
321 
322 	if (rc) {
323 		/* paranoid for a while -acme */
324 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
325 		__sock_put(sk);
326 	}
327 	return rc;
328 }
329 
330 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
331 {
332 	hlist_add_head(&sk->sk_node, list);
333 }
334 
335 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
336 {
337 	sock_hold(sk);
338 	__sk_add_node(sk, list);
339 }
340 
341 static __inline__ void __sk_del_bind_node(struct sock *sk)
342 {
343 	__hlist_del(&sk->sk_bind_node);
344 }
345 
346 static __inline__ void sk_add_bind_node(struct sock *sk,
347 					struct hlist_head *list)
348 {
349 	hlist_add_head(&sk->sk_bind_node, list);
350 }
351 
352 #define sk_for_each(__sk, node, list) \
353 	hlist_for_each_entry(__sk, node, list, sk_node)
354 #define sk_for_each_from(__sk, node) \
355 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
356 		hlist_for_each_entry_from(__sk, node, sk_node)
357 #define sk_for_each_continue(__sk, node) \
358 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
359 		hlist_for_each_entry_continue(__sk, node, sk_node)
360 #define sk_for_each_safe(__sk, node, tmp, list) \
361 	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
362 #define sk_for_each_bound(__sk, node, list) \
363 	hlist_for_each_entry(__sk, node, list, sk_bind_node)
364 
365 /* Sock flags */
366 enum sock_flags {
367 	SOCK_DEAD,
368 	SOCK_DONE,
369 	SOCK_URGINLINE,
370 	SOCK_KEEPOPEN,
371 	SOCK_LINGER,
372 	SOCK_DESTROY,
373 	SOCK_BROADCAST,
374 	SOCK_TIMESTAMP,
375 	SOCK_ZAPPED,
376 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
377 	SOCK_DBG, /* %SO_DEBUG setting */
378 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
379 	SOCK_NO_LARGESEND, /* whether to sent large segments or not */
380 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
381 	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
382 };
383 
384 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
385 {
386 	__set_bit(flag, &sk->sk_flags);
387 }
388 
389 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
390 {
391 	__clear_bit(flag, &sk->sk_flags);
392 }
393 
394 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
395 {
396 	return test_bit(flag, &sk->sk_flags);
397 }
398 
399 static inline void sk_acceptq_removed(struct sock *sk)
400 {
401 	sk->sk_ack_backlog--;
402 }
403 
404 static inline void sk_acceptq_added(struct sock *sk)
405 {
406 	sk->sk_ack_backlog++;
407 }
408 
409 static inline int sk_acceptq_is_full(struct sock *sk)
410 {
411 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
412 }
413 
414 /*
415  * Compute minimal free write space needed to queue new packets.
416  */
417 static inline int sk_stream_min_wspace(struct sock *sk)
418 {
419 	return sk->sk_wmem_queued / 2;
420 }
421 
422 static inline int sk_stream_wspace(struct sock *sk)
423 {
424 	return sk->sk_sndbuf - sk->sk_wmem_queued;
425 }
426 
427 extern void sk_stream_write_space(struct sock *sk);
428 
429 static inline int sk_stream_memory_free(struct sock *sk)
430 {
431 	return sk->sk_wmem_queued < sk->sk_sndbuf;
432 }
433 
434 extern void sk_stream_rfree(struct sk_buff *skb);
435 
436 static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
437 {
438 	skb->sk = sk;
439 	skb->destructor = sk_stream_rfree;
440 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
441 	sk->sk_forward_alloc -= skb->truesize;
442 }
443 
444 static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
445 {
446 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
447 	sk->sk_wmem_queued   -= skb->truesize;
448 	sk->sk_forward_alloc += skb->truesize;
449 	__kfree_skb(skb);
450 }
451 
452 /* The per-socket spinlock must be held here. */
453 #define sk_add_backlog(__sk, __skb)				\
454 do {	if (!(__sk)->sk_backlog.tail) {				\
455 		(__sk)->sk_backlog.head =			\
456 		     (__sk)->sk_backlog.tail = (__skb);		\
457 	} else {						\
458 		((__sk)->sk_backlog.tail)->next = (__skb);	\
459 		(__sk)->sk_backlog.tail = (__skb);		\
460 	}							\
461 	(__skb)->next = NULL;					\
462 } while(0)
463 
464 #define sk_wait_event(__sk, __timeo, __condition)		\
465 ({	int rc;							\
466 	release_sock(__sk);					\
467 	rc = __condition;					\
468 	if (!rc) {						\
469 		*(__timeo) = schedule_timeout(*(__timeo));	\
470 		rc = __condition;				\
471 	}							\
472 	lock_sock(__sk);					\
473 	rc;							\
474 })
475 
476 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
477 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
478 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
479 extern int sk_stream_error(struct sock *sk, int flags, int err);
480 extern void sk_stream_kill_queues(struct sock *sk);
481 
482 extern int sk_wait_data(struct sock *sk, long *timeo);
483 
484 /* Networking protocol blocks we attach to sockets.
485  * socket layer -> transport layer interface
486  * transport -> network interface is defined by struct inet_proto
487  */
488 struct proto {
489 	void			(*close)(struct sock *sk,
490 					long timeout);
491 	int			(*connect)(struct sock *sk,
492 				        struct sockaddr *uaddr,
493 					int addr_len);
494 	int			(*disconnect)(struct sock *sk, int flags);
495 
496 	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
497 
498 	int			(*ioctl)(struct sock *sk, int cmd,
499 					 unsigned long arg);
500 	int			(*init)(struct sock *sk);
501 	int			(*destroy)(struct sock *sk);
502 	void			(*shutdown)(struct sock *sk, int how);
503 	int			(*setsockopt)(struct sock *sk, int level,
504 					int optname, char __user *optval,
505 					int optlen);
506 	int			(*getsockopt)(struct sock *sk, int level,
507 					int optname, char __user *optval,
508 					int __user *option);
509 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
510 					   struct msghdr *msg, size_t len);
511 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
512 					   struct msghdr *msg,
513 					size_t len, int noblock, int flags,
514 					int *addr_len);
515 	int			(*sendpage)(struct sock *sk, struct page *page,
516 					int offset, size_t size, int flags);
517 	int			(*bind)(struct sock *sk,
518 					struct sockaddr *uaddr, int addr_len);
519 
520 	int			(*backlog_rcv) (struct sock *sk,
521 						struct sk_buff *skb);
522 
523 	/* Keeping track of sk's, looking them up, and port selection methods. */
524 	void			(*hash)(struct sock *sk);
525 	void			(*unhash)(struct sock *sk);
526 	int			(*get_port)(struct sock *sk, unsigned short snum);
527 
528 	/* Memory pressure */
529 	void			(*enter_memory_pressure)(void);
530 	atomic_t		*memory_allocated;	/* Current allocated memory. */
531 	atomic_t		*sockets_allocated;	/* Current number of sockets. */
532 	/*
533 	 * Pressure flag: try to collapse.
534 	 * Technical note: it is used by multiple contexts non atomically.
535 	 * All the sk_stream_mem_schedule() is of this nature: accounting
536 	 * is strict, actions are advisory and have some latency.
537 	 */
538 	int			*memory_pressure;
539 	int			*sysctl_mem;
540 	int			*sysctl_wmem;
541 	int			*sysctl_rmem;
542 	int			max_header;
543 
544 	kmem_cache_t		*slab;
545 	unsigned int		obj_size;
546 
547 	struct module		*owner;
548 
549 	char			name[32];
550 
551 	struct list_head	node;
552 
553 	struct {
554 		int inuse;
555 		u8  __pad[SMP_CACHE_BYTES - sizeof(int)];
556 	} stats[NR_CPUS];
557 };
558 
559 extern int proto_register(struct proto *prot, int alloc_slab);
560 extern void proto_unregister(struct proto *prot);
561 
562 /* Called with local bh disabled */
563 static __inline__ void sock_prot_inc_use(struct proto *prot)
564 {
565 	prot->stats[smp_processor_id()].inuse++;
566 }
567 
568 static __inline__ void sock_prot_dec_use(struct proto *prot)
569 {
570 	prot->stats[smp_processor_id()].inuse--;
571 }
572 
573 /* About 10 seconds */
574 #define SOCK_DESTROY_TIME (10*HZ)
575 
576 /* Sockets 0-1023 can't be bound to unless you are superuser */
577 #define PROT_SOCK	1024
578 
579 #define SHUTDOWN_MASK	3
580 #define RCV_SHUTDOWN	1
581 #define SEND_SHUTDOWN	2
582 
583 #define SOCK_SNDBUF_LOCK	1
584 #define SOCK_RCVBUF_LOCK	2
585 #define SOCK_BINDADDR_LOCK	4
586 #define SOCK_BINDPORT_LOCK	8
587 
588 /* sock_iocb: used to kick off async processing of socket ios */
589 struct sock_iocb {
590 	struct list_head	list;
591 
592 	int			flags;
593 	int			size;
594 	struct socket		*sock;
595 	struct sock		*sk;
596 	struct scm_cookie	*scm;
597 	struct msghdr		*msg, async_msg;
598 	struct iovec		async_iov;
599 	struct kiocb		*kiocb;
600 };
601 
602 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
603 {
604 	return (struct sock_iocb *)iocb->private;
605 }
606 
607 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
608 {
609 	return si->kiocb;
610 }
611 
612 struct socket_alloc {
613 	struct socket socket;
614 	struct inode vfs_inode;
615 };
616 
617 static inline struct socket *SOCKET_I(struct inode *inode)
618 {
619 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
620 }
621 
622 static inline struct inode *SOCK_INODE(struct socket *socket)
623 {
624 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
625 }
626 
627 extern void __sk_stream_mem_reclaim(struct sock *sk);
628 extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
629 
630 #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
631 
632 static inline int sk_stream_pages(int amt)
633 {
634 	return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM;
635 }
636 
637 static inline void sk_stream_mem_reclaim(struct sock *sk)
638 {
639 	if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
640 		__sk_stream_mem_reclaim(sk);
641 }
642 
643 static inline void sk_stream_writequeue_purge(struct sock *sk)
644 {
645 	struct sk_buff *skb;
646 
647 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
648 		sk_stream_free_skb(sk, skb);
649 	sk_stream_mem_reclaim(sk);
650 }
651 
652 static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
653 {
654 	return (int)skb->truesize <= sk->sk_forward_alloc ||
655 		sk_stream_mem_schedule(sk, skb->truesize, 1);
656 }
657 
658 /* Used by processes to "lock" a socket state, so that
659  * interrupts and bottom half handlers won't change it
660  * from under us. It essentially blocks any incoming
661  * packets, so that we won't get any new data or any
662  * packets that change the state of the socket.
663  *
664  * While locked, BH processing will add new packets to
665  * the backlog queue.  This queue is processed by the
666  * owner of the socket lock right before it is released.
667  *
668  * Since ~2.3.5 it is also exclusive sleep lock serializing
669  * accesses from user process context.
670  */
671 #define sock_owned_by_user(sk)	((sk)->sk_lock.owner)
672 
673 extern void FASTCALL(lock_sock(struct sock *sk));
674 extern void FASTCALL(release_sock(struct sock *sk));
675 
676 /* BH context may only use the following locking interface. */
677 #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
678 #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
679 
680 extern struct sock		*sk_alloc(int family, int priority,
681 					  struct proto *prot, int zero_it);
682 extern void			sk_free(struct sock *sk);
683 
684 extern struct sk_buff		*sock_wmalloc(struct sock *sk,
685 					      unsigned long size, int force,
686 					      int priority);
687 extern struct sk_buff		*sock_rmalloc(struct sock *sk,
688 					      unsigned long size, int force,
689 					      int priority);
690 extern void			sock_wfree(struct sk_buff *skb);
691 extern void			sock_rfree(struct sk_buff *skb);
692 
693 extern int			sock_setsockopt(struct socket *sock, int level,
694 						int op, char __user *optval,
695 						int optlen);
696 
697 extern int			sock_getsockopt(struct socket *sock, int level,
698 						int op, char __user *optval,
699 						int __user *optlen);
700 extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
701 						     unsigned long size,
702 						     int noblock,
703 						     int *errcode);
704 extern void *sock_kmalloc(struct sock *sk, int size, int priority);
705 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
706 extern void sk_send_sigurg(struct sock *sk);
707 
708 /*
709  * Functions to fill in entries in struct proto_ops when a protocol
710  * does not implement a particular function.
711  */
712 extern int                      sock_no_bind(struct socket *,
713 					     struct sockaddr *, int);
714 extern int                      sock_no_connect(struct socket *,
715 						struct sockaddr *, int, int);
716 extern int                      sock_no_socketpair(struct socket *,
717 						   struct socket *);
718 extern int                      sock_no_accept(struct socket *,
719 					       struct socket *, int);
720 extern int                      sock_no_getname(struct socket *,
721 						struct sockaddr *, int *, int);
722 extern unsigned int             sock_no_poll(struct file *, struct socket *,
723 					     struct poll_table_struct *);
724 extern int                      sock_no_ioctl(struct socket *, unsigned int,
725 					      unsigned long);
726 extern int			sock_no_listen(struct socket *, int);
727 extern int                      sock_no_shutdown(struct socket *, int);
728 extern int			sock_no_getsockopt(struct socket *, int , int,
729 						   char __user *, int __user *);
730 extern int			sock_no_setsockopt(struct socket *, int, int,
731 						   char __user *, int);
732 extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
733 						struct msghdr *, size_t);
734 extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
735 						struct msghdr *, size_t, int);
736 extern int			sock_no_mmap(struct file *file,
737 					     struct socket *sock,
738 					     struct vm_area_struct *vma);
739 extern ssize_t			sock_no_sendpage(struct socket *sock,
740 						struct page *page,
741 						int offset, size_t size,
742 						int flags);
743 
744 /*
745  * Functions to fill in entries in struct proto_ops when a protocol
746  * uses the inet style.
747  */
748 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
749 				  char __user *optval, int __user *optlen);
750 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
751 			       struct msghdr *msg, size_t size, int flags);
752 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
753 				  char __user *optval, int optlen);
754 
755 extern void sk_common_release(struct sock *sk);
756 
757 /*
758  *	Default socket callbacks and setup code
759  */
760 
761 /* Initialise core socket variables */
762 extern void sock_init_data(struct socket *sock, struct sock *sk);
763 
764 /**
765  *	sk_filter - run a packet through a socket filter
766  *	@sk: sock associated with &sk_buff
767  *	@skb: buffer to filter
768  *	@needlock: set to 1 if the sock is not locked by caller.
769  *
770  * Run the filter code and then cut skb->data to correct size returned by
771  * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
772  * than pkt_len we keep whole skb->data. This is the socket level
773  * wrapper to sk_run_filter. It returns 0 if the packet should
774  * be accepted or -EPERM if the packet should be tossed.
775  *
776  */
777 
778 static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
779 {
780 	int err;
781 
782 	err = security_sock_rcv_skb(sk, skb);
783 	if (err)
784 		return err;
785 
786 	if (sk->sk_filter) {
787 		struct sk_filter *filter;
788 
789 		if (needlock)
790 			bh_lock_sock(sk);
791 
792 		filter = sk->sk_filter;
793 		if (filter) {
794 			int pkt_len = sk_run_filter(skb, filter->insns,
795 						    filter->len);
796 			if (!pkt_len)
797 				err = -EPERM;
798 			else
799 				skb_trim(skb, pkt_len);
800 		}
801 
802 		if (needlock)
803 			bh_unlock_sock(sk);
804 	}
805 	return err;
806 }
807 
808 /**
809  *	sk_filter_release: Release a socket filter
810  *	@sk: socket
811  *	@fp: filter to remove
812  *
813  *	Remove a filter from a socket and release its resources.
814  */
815 
816 static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
817 {
818 	unsigned int size = sk_filter_len(fp);
819 
820 	atomic_sub(size, &sk->sk_omem_alloc);
821 
822 	if (atomic_dec_and_test(&fp->refcnt))
823 		kfree(fp);
824 }
825 
826 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
827 {
828 	atomic_inc(&fp->refcnt);
829 	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
830 }
831 
832 /*
833  * Socket reference counting postulates.
834  *
835  * * Each user of socket SHOULD hold a reference count.
836  * * Each access point to socket (an hash table bucket, reference from a list,
837  *   running timer, skb in flight MUST hold a reference count.
838  * * When reference count hits 0, it means it will never increase back.
839  * * When reference count hits 0, it means that no references from
840  *   outside exist to this socket and current process on current CPU
841  *   is last user and may/should destroy this socket.
842  * * sk_free is called from any context: process, BH, IRQ. When
843  *   it is called, socket has no references from outside -> sk_free
844  *   may release descendant resources allocated by the socket, but
845  *   to the time when it is called, socket is NOT referenced by any
846  *   hash tables, lists etc.
847  * * Packets, delivered from outside (from network or from another process)
848  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
849  *   when they sit in queue. Otherwise, packets will leak to hole, when
850  *   socket is looked up by one cpu and unhasing is made by another CPU.
851  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
852  *   (leak to backlog). Packet socket does all the processing inside
853  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
854  *   use separate SMP lock, so that they are prone too.
855  */
856 
857 /* Ungrab socket and destroy it, if it was the last reference. */
858 static inline void sock_put(struct sock *sk)
859 {
860 	if (atomic_dec_and_test(&sk->sk_refcnt))
861 		sk_free(sk);
862 }
863 
864 /* Detach socket from process context.
865  * Announce socket dead, detach it from wait queue and inode.
866  * Note that parent inode held reference count on this struct sock,
867  * we do not release it in this function, because protocol
868  * probably wants some additional cleanups or even continuing
869  * to work with this socket (TCP).
870  */
871 static inline void sock_orphan(struct sock *sk)
872 {
873 	write_lock_bh(&sk->sk_callback_lock);
874 	sock_set_flag(sk, SOCK_DEAD);
875 	sk->sk_socket = NULL;
876 	sk->sk_sleep  = NULL;
877 	write_unlock_bh(&sk->sk_callback_lock);
878 }
879 
880 static inline void sock_graft(struct sock *sk, struct socket *parent)
881 {
882 	write_lock_bh(&sk->sk_callback_lock);
883 	sk->sk_sleep = &parent->wait;
884 	parent->sk = sk;
885 	sk->sk_socket = parent;
886 	write_unlock_bh(&sk->sk_callback_lock);
887 }
888 
889 extern int sock_i_uid(struct sock *sk);
890 extern unsigned long sock_i_ino(struct sock *sk);
891 
892 static inline struct dst_entry *
893 __sk_dst_get(struct sock *sk)
894 {
895 	return sk->sk_dst_cache;
896 }
897 
898 static inline struct dst_entry *
899 sk_dst_get(struct sock *sk)
900 {
901 	struct dst_entry *dst;
902 
903 	read_lock(&sk->sk_dst_lock);
904 	dst = sk->sk_dst_cache;
905 	if (dst)
906 		dst_hold(dst);
907 	read_unlock(&sk->sk_dst_lock);
908 	return dst;
909 }
910 
911 static inline void
912 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
913 {
914 	struct dst_entry *old_dst;
915 
916 	old_dst = sk->sk_dst_cache;
917 	sk->sk_dst_cache = dst;
918 	dst_release(old_dst);
919 }
920 
921 static inline void
922 sk_dst_set(struct sock *sk, struct dst_entry *dst)
923 {
924 	write_lock(&sk->sk_dst_lock);
925 	__sk_dst_set(sk, dst);
926 	write_unlock(&sk->sk_dst_lock);
927 }
928 
929 static inline void
930 __sk_dst_reset(struct sock *sk)
931 {
932 	struct dst_entry *old_dst;
933 
934 	old_dst = sk->sk_dst_cache;
935 	sk->sk_dst_cache = NULL;
936 	dst_release(old_dst);
937 }
938 
939 static inline void
940 sk_dst_reset(struct sock *sk)
941 {
942 	write_lock(&sk->sk_dst_lock);
943 	__sk_dst_reset(sk);
944 	write_unlock(&sk->sk_dst_lock);
945 }
946 
947 static inline struct dst_entry *
948 __sk_dst_check(struct sock *sk, u32 cookie)
949 {
950 	struct dst_entry *dst = sk->sk_dst_cache;
951 
952 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
953 		sk->sk_dst_cache = NULL;
954 		dst_release(dst);
955 		return NULL;
956 	}
957 
958 	return dst;
959 }
960 
961 static inline struct dst_entry *
962 sk_dst_check(struct sock *sk, u32 cookie)
963 {
964 	struct dst_entry *dst = sk_dst_get(sk);
965 
966 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
967 		sk_dst_reset(sk);
968 		dst_release(dst);
969 		return NULL;
970 	}
971 
972 	return dst;
973 }
974 
975 static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
976 {
977 	sk->sk_wmem_queued   += skb->truesize;
978 	sk->sk_forward_alloc -= skb->truesize;
979 }
980 
981 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
982 				   struct sk_buff *skb, struct page *page,
983 				   int off, int copy)
984 {
985 	if (skb->ip_summed == CHECKSUM_NONE) {
986 		int err = 0;
987 		unsigned int csum = csum_and_copy_from_user(from,
988 						     page_address(page) + off,
989 							    copy, 0, &err);
990 		if (err)
991 			return err;
992 		skb->csum = csum_block_add(skb->csum, csum, skb->len);
993 	} else if (copy_from_user(page_address(page) + off, from, copy))
994 		return -EFAULT;
995 
996 	skb->len	     += copy;
997 	skb->data_len	     += copy;
998 	skb->truesize	     += copy;
999 	sk->sk_wmem_queued   += copy;
1000 	sk->sk_forward_alloc -= copy;
1001 	return 0;
1002 }
1003 
1004 /*
1005  * 	Queue a received datagram if it will fit. Stream and sequenced
1006  *	protocols can't normally use this as they need to fit buffers in
1007  *	and play with them.
1008  *
1009  * 	Inlined as it's very short and called for pretty much every
1010  *	packet ever received.
1011  */
1012 
1013 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1014 {
1015 	sock_hold(sk);
1016 	skb->sk = sk;
1017 	skb->destructor = sock_wfree;
1018 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1019 }
1020 
1021 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1022 {
1023 	skb->sk = sk;
1024 	skb->destructor = sock_rfree;
1025 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1026 }
1027 
1028 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1029 			   unsigned long expires);
1030 
1031 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1032 
1033 static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1034 {
1035 	int err = 0;
1036 	int skb_len;
1037 
1038 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1039 	   number of warnings when compiling with -W --ANK
1040 	 */
1041 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1042 	    (unsigned)sk->sk_rcvbuf) {
1043 		err = -ENOMEM;
1044 		goto out;
1045 	}
1046 
1047 	/* It would be deadlock, if sock_queue_rcv_skb is used
1048 	   with socket lock! We assume that users of this
1049 	   function are lock free.
1050 	*/
1051 	err = sk_filter(sk, skb, 1);
1052 	if (err)
1053 		goto out;
1054 
1055 	skb->dev = NULL;
1056 	skb_set_owner_r(skb, sk);
1057 
1058 	/* Cache the SKB length before we tack it onto the receive
1059 	 * queue.  Once it is added it no longer belongs to us and
1060 	 * may be freed by other threads of control pulling packets
1061 	 * from the queue.
1062 	 */
1063 	skb_len = skb->len;
1064 
1065 	skb_queue_tail(&sk->sk_receive_queue, skb);
1066 
1067 	if (!sock_flag(sk, SOCK_DEAD))
1068 		sk->sk_data_ready(sk, skb_len);
1069 out:
1070 	return err;
1071 }
1072 
1073 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1074 {
1075 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1076 	   number of warnings when compiling with -W --ANK
1077 	 */
1078 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1079 	    (unsigned)sk->sk_rcvbuf)
1080 		return -ENOMEM;
1081 	skb_set_owner_r(skb, sk);
1082 	skb_queue_tail(&sk->sk_error_queue, skb);
1083 	if (!sock_flag(sk, SOCK_DEAD))
1084 		sk->sk_data_ready(sk, skb->len);
1085 	return 0;
1086 }
1087 
1088 /*
1089  *	Recover an error report and clear atomically
1090  */
1091 
1092 static inline int sock_error(struct sock *sk)
1093 {
1094 	int err = xchg(&sk->sk_err, 0);
1095 	return -err;
1096 }
1097 
1098 static inline unsigned long sock_wspace(struct sock *sk)
1099 {
1100 	int amt = 0;
1101 
1102 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1103 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1104 		if (amt < 0)
1105 			amt = 0;
1106 	}
1107 	return amt;
1108 }
1109 
1110 static inline void sk_wake_async(struct sock *sk, int how, int band)
1111 {
1112 	if (sk->sk_socket && sk->sk_socket->fasync_list)
1113 		sock_wake_async(sk->sk_socket, how, band);
1114 }
1115 
1116 #define SOCK_MIN_SNDBUF 2048
1117 #define SOCK_MIN_RCVBUF 256
1118 
1119 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1120 {
1121 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1122 		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
1123 		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1124 	}
1125 }
1126 
1127 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1128 						   int size, int mem, int gfp)
1129 {
1130 	struct sk_buff *skb = alloc_skb(size + sk->sk_prot->max_header, gfp);
1131 
1132 	if (skb) {
1133 		skb->truesize += mem;
1134 		if (sk->sk_forward_alloc >= (int)skb->truesize ||
1135 		    sk_stream_mem_schedule(sk, skb->truesize, 0)) {
1136 			skb_reserve(skb, sk->sk_prot->max_header);
1137 			return skb;
1138 		}
1139 		__kfree_skb(skb);
1140 	} else {
1141 		sk->sk_prot->enter_memory_pressure();
1142 		sk_stream_moderate_sndbuf(sk);
1143 	}
1144 	return NULL;
1145 }
1146 
1147 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
1148 						  int size, int gfp)
1149 {
1150 	return sk_stream_alloc_pskb(sk, size, 0, gfp);
1151 }
1152 
1153 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1154 {
1155 	struct page *page = NULL;
1156 
1157 	if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
1158 	    sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
1159 		page = alloc_pages(sk->sk_allocation, 0);
1160 	else {
1161 		sk->sk_prot->enter_memory_pressure();
1162 		sk_stream_moderate_sndbuf(sk);
1163 	}
1164 	return page;
1165 }
1166 
1167 #define sk_stream_for_retrans_queue(skb, sk)				\
1168 		for (skb = (sk)->sk_write_queue.next;			\
1169 		     (skb != (sk)->sk_send_head) &&			\
1170 		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
1171 		     skb = skb->next)
1172 
1173 /*
1174  *	Default write policy as shown to user space via poll/select/SIGIO
1175  */
1176 static inline int sock_writeable(const struct sock *sk)
1177 {
1178 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
1179 }
1180 
1181 static inline int gfp_any(void)
1182 {
1183 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1184 }
1185 
1186 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1187 {
1188 	return noblock ? 0 : sk->sk_rcvtimeo;
1189 }
1190 
1191 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1192 {
1193 	return noblock ? 0 : sk->sk_sndtimeo;
1194 }
1195 
1196 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1197 {
1198 	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1199 }
1200 
1201 /* Alas, with timeout socket operations are not restartable.
1202  * Compare this to poll().
1203  */
1204 static inline int sock_intr_errno(long timeo)
1205 {
1206 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1207 }
1208 
1209 static __inline__ void
1210 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1211 {
1212 	struct timeval *stamp = &skb->stamp;
1213 	if (sock_flag(sk, SOCK_RCVTSTAMP)) {
1214 		/* Race occurred between timestamp enabling and packet
1215 		   receiving.  Fill in the current time for now. */
1216 		if (stamp->tv_sec == 0)
1217 			do_gettimeofday(stamp);
1218 		put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
1219 			 stamp);
1220 	} else
1221 		sk->sk_stamp = *stamp;
1222 }
1223 
1224 /**
1225  * sk_eat_skb - Release a skb if it is no longer needed
1226  * @sk - socket to eat this skb from
1227  * @skb - socket buffer to eat
1228  *
1229  * This routine must be called with interrupts disabled or with the socket
1230  * locked so that the sk_buff queue operation is ok.
1231 */
1232 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
1233 {
1234 	__skb_unlink(skb, &sk->sk_receive_queue);
1235 	__kfree_skb(skb);
1236 }
1237 
1238 extern void sock_enable_timestamp(struct sock *sk);
1239 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1240 
1241 /*
1242  *	Enable debug/info messages
1243  */
1244 
1245 #if 0
1246 #define NETDEBUG(x)	do { } while (0)
1247 #define LIMIT_NETDEBUG(x) do {} while(0)
1248 #else
1249 #define NETDEBUG(x)	do { x; } while (0)
1250 #define LIMIT_NETDEBUG(x) do { if (net_ratelimit()) { x; } } while(0)
1251 #endif
1252 
1253 /*
1254  * Macros for sleeping on a socket. Use them like this:
1255  *
1256  * SOCK_SLEEP_PRE(sk)
1257  * if (condition)
1258  * 	schedule();
1259  * SOCK_SLEEP_POST(sk)
1260  *
1261  * N.B. These are now obsolete and were, afaik, only ever used in DECnet
1262  * and when the last use of them in DECnet has gone, I'm intending to
1263  * remove them.
1264  */
1265 
1266 #define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \
1267 				DECLARE_WAITQUEUE(wait, tsk); \
1268 				tsk->state = TASK_INTERRUPTIBLE; \
1269 				add_wait_queue((sk)->sk_sleep, &wait); \
1270 				release_sock(sk);
1271 
1272 #define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \
1273 				remove_wait_queue((sk)->sk_sleep, &wait); \
1274 				lock_sock(sk); \
1275 				}
1276 
1277 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1278 {
1279 	if (valbool)
1280 		sock_set_flag(sk, bit);
1281 	else
1282 		sock_reset_flag(sk, bit);
1283 }
1284 
1285 extern __u32 sysctl_wmem_max;
1286 extern __u32 sysctl_rmem_max;
1287 
1288 #ifdef CONFIG_NET
1289 int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
1290 #else
1291 static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
1292 {
1293 	return -ENODEV;
1294 }
1295 #endif
1296 
1297 #endif	/* _SOCK_H */
1298