xref: /openbmc/linux/net/unix/af_unix.c (revision bf070bb0)
1 /*
2  * NET4:	Implementation of BSD Unix domain sockets.
3  *
4  * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *
6  *		This program is free software; you can redistribute it and/or
7  *		modify it under the terms of the GNU General Public License
8  *		as published by the Free Software Foundation; either version
9  *		2 of the License, or (at your option) any later version.
10  *
11  * Fixes:
12  *		Linus Torvalds	:	Assorted bug cures.
13  *		Niibe Yutaka	:	async I/O support.
14  *		Carsten Paeth	:	PF_UNIX check, address fixes.
15  *		Alan Cox	:	Limit size of allocated blocks.
16  *		Alan Cox	:	Fixed the stupid socketpair bug.
17  *		Alan Cox	:	BSD compatibility fine tuning.
18  *		Alan Cox	:	Fixed a bug in connect when interrupted.
19  *		Alan Cox	:	Sorted out a proper draft version of
20  *					file descriptor passing hacked up from
21  *					Mike Shaver's work.
22  *		Marty Leisner	:	Fixes to fd passing
23  *		Nick Nevin	:	recvmsg bugfix.
24  *		Alan Cox	:	Started proper garbage collector
25  *		Heiko EiBfeldt	:	Missing verify_area check
26  *		Alan Cox	:	Started POSIXisms
27  *		Andreas Schwab	:	Replace inode by dentry for proper
28  *					reference counting
29  *		Kirk Petersen	:	Made this a module
30  *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
31  *					Lots of bug fixes.
32  *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
33  *					by above two patches.
34  *	     Andrea Arcangeli	:	If possible we block in connect(2)
35  *					if the max backlog of the listen socket
36  *					is been reached. This won't break
37  *					old apps and it will avoid huge amount
38  *					of socks hashed (this for unix_gc()
39  *					performances reasons).
40  *					Security fix that limits the max
41  *					number of socks to 2*max_files and
42  *					the number of skb queueable in the
43  *					dgram receiver.
44  *		Artur Skawina   :	Hash function optimizations
45  *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
46  *	      Malcolm Beattie   :	Set peercred for socketpair
47  *	     Michal Ostrowski   :       Module initialization cleanup.
48  *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
49  *	     				the core infrastructure is doing that
50  *	     				for all net proto families now (2.5.69+)
51  *
52  *
53  * Known differences from reference BSD that was tested:
54  *
55  *	[TO FIX]
56  *	ECONNREFUSED is not returned from one end of a connected() socket to the
57  *		other the moment one end closes.
58  *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
59  *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
60  *	[NOT TO FIX]
61  *	accept() returns a path name even if the connecting socket has closed
62  *		in the meantime (BSD loses the path and gives up).
63  *	accept() returns 0 length path for an unbound connector. BSD returns 16
64  *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65  *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
66  *	BSD af_unix apparently has connect forgetting to block properly.
67  *		(need to check this with the POSIX spec in detail)
68  *
69  * Differences from 2.0.0-11-... (ANK)
70  *	Bug fixes and improvements.
71  *		- client shutdown killed server socket.
72  *		- removed all useless cli/sti pairs.
73  *
74  *	Semantic changes/extensions.
75  *		- generic control message passing.
76  *		- SCM_CREDENTIALS control message.
77  *		- "Abstract" (not FS based) socket bindings.
78  *		  Abstract names are sequences of bytes (not zero terminated)
79  *		  started by 0, so that this name space does not intersect
80  *		  with BSD names.
81  */
82 
83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84 
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched/signal.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
95 #include <linux/un.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <linux/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
112 #include <net/scm.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/freezer.h>
120 #include <linux/file.h>
121 
122 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
123 EXPORT_SYMBOL_GPL(unix_socket_table);
124 DEFINE_SPINLOCK(unix_table_lock);
125 EXPORT_SYMBOL_GPL(unix_table_lock);
126 static atomic_long_t unix_nr_socks;
127 
128 
129 static struct hlist_head *unix_sockets_unbound(void *addr)
130 {
131 	unsigned long hash = (unsigned long)addr;
132 
133 	hash ^= hash >> 16;
134 	hash ^= hash >> 8;
135 	hash %= UNIX_HASH_SIZE;
136 	return &unix_socket_table[UNIX_HASH_SIZE + hash];
137 }
138 
139 #define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
140 
141 #ifdef CONFIG_SECURITY_NETWORK
142 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
143 {
144 	UNIXCB(skb).secid = scm->secid;
145 }
146 
147 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
148 {
149 	scm->secid = UNIXCB(skb).secid;
150 }
151 
152 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
153 {
154 	return (scm->secid == UNIXCB(skb).secid);
155 }
156 #else
157 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
158 { }
159 
160 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
161 { }
162 
163 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
164 {
165 	return true;
166 }
167 #endif /* CONFIG_SECURITY_NETWORK */
168 
169 /*
170  *  SMP locking strategy:
171  *    hash table is protected with spinlock unix_table_lock
172  *    each socket state is protected by separate spin lock.
173  */
174 
175 static inline unsigned int unix_hash_fold(__wsum n)
176 {
177 	unsigned int hash = (__force unsigned int)csum_fold(n);
178 
179 	hash ^= hash>>8;
180 	return hash&(UNIX_HASH_SIZE-1);
181 }
182 
183 #define unix_peer(sk) (unix_sk(sk)->peer)
184 
185 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
186 {
187 	return unix_peer(osk) == sk;
188 }
189 
190 static inline int unix_may_send(struct sock *sk, struct sock *osk)
191 {
192 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
193 }
194 
195 static inline int unix_recvq_full(struct sock const *sk)
196 {
197 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
198 }
199 
200 struct sock *unix_peer_get(struct sock *s)
201 {
202 	struct sock *peer;
203 
204 	unix_state_lock(s);
205 	peer = unix_peer(s);
206 	if (peer)
207 		sock_hold(peer);
208 	unix_state_unlock(s);
209 	return peer;
210 }
211 EXPORT_SYMBOL_GPL(unix_peer_get);
212 
213 static inline void unix_release_addr(struct unix_address *addr)
214 {
215 	if (refcount_dec_and_test(&addr->refcnt))
216 		kfree(addr);
217 }
218 
219 /*
220  *	Check unix socket name:
221  *		- should be not zero length.
222  *	        - if started by not zero, should be NULL terminated (FS object)
223  *		- if started by zero, it is abstract name.
224  */
225 
226 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
227 {
228 	if (len <= sizeof(short) || len > sizeof(*sunaddr))
229 		return -EINVAL;
230 	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
231 		return -EINVAL;
232 	if (sunaddr->sun_path[0]) {
233 		/*
234 		 * This may look like an off by one error but it is a bit more
235 		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
236 		 * sun_path[108] doesn't as such exist.  However in kernel space
237 		 * we are guaranteed that it is a valid memory location in our
238 		 * kernel address buffer.
239 		 */
240 		((char *)sunaddr)[len] = 0;
241 		len = strlen(sunaddr->sun_path)+1+sizeof(short);
242 		return len;
243 	}
244 
245 	*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
246 	return len;
247 }
248 
249 static void __unix_remove_socket(struct sock *sk)
250 {
251 	sk_del_node_init(sk);
252 }
253 
254 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
255 {
256 	WARN_ON(!sk_unhashed(sk));
257 	sk_add_node(sk, list);
258 }
259 
260 static inline void unix_remove_socket(struct sock *sk)
261 {
262 	spin_lock(&unix_table_lock);
263 	__unix_remove_socket(sk);
264 	spin_unlock(&unix_table_lock);
265 }
266 
267 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
268 {
269 	spin_lock(&unix_table_lock);
270 	__unix_insert_socket(list, sk);
271 	spin_unlock(&unix_table_lock);
272 }
273 
274 static struct sock *__unix_find_socket_byname(struct net *net,
275 					      struct sockaddr_un *sunname,
276 					      int len, int type, unsigned int hash)
277 {
278 	struct sock *s;
279 
280 	sk_for_each(s, &unix_socket_table[hash ^ type]) {
281 		struct unix_sock *u = unix_sk(s);
282 
283 		if (!net_eq(sock_net(s), net))
284 			continue;
285 
286 		if (u->addr->len == len &&
287 		    !memcmp(u->addr->name, sunname, len))
288 			goto found;
289 	}
290 	s = NULL;
291 found:
292 	return s;
293 }
294 
295 static inline struct sock *unix_find_socket_byname(struct net *net,
296 						   struct sockaddr_un *sunname,
297 						   int len, int type,
298 						   unsigned int hash)
299 {
300 	struct sock *s;
301 
302 	spin_lock(&unix_table_lock);
303 	s = __unix_find_socket_byname(net, sunname, len, type, hash);
304 	if (s)
305 		sock_hold(s);
306 	spin_unlock(&unix_table_lock);
307 	return s;
308 }
309 
310 static struct sock *unix_find_socket_byinode(struct inode *i)
311 {
312 	struct sock *s;
313 
314 	spin_lock(&unix_table_lock);
315 	sk_for_each(s,
316 		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
317 		struct dentry *dentry = unix_sk(s)->path.dentry;
318 
319 		if (dentry && d_backing_inode(dentry) == i) {
320 			sock_hold(s);
321 			goto found;
322 		}
323 	}
324 	s = NULL;
325 found:
326 	spin_unlock(&unix_table_lock);
327 	return s;
328 }
329 
330 /* Support code for asymmetrically connected dgram sockets
331  *
332  * If a datagram socket is connected to a socket not itself connected
333  * to the first socket (eg, /dev/log), clients may only enqueue more
334  * messages if the present receive queue of the server socket is not
335  * "too large". This means there's a second writeability condition
336  * poll and sendmsg need to test. The dgram recv code will do a wake
337  * up on the peer_wait wait queue of a socket upon reception of a
338  * datagram which needs to be propagated to sleeping would-be writers
339  * since these might not have sent anything so far. This can't be
340  * accomplished via poll_wait because the lifetime of the server
341  * socket might be less than that of its clients if these break their
342  * association with it or if the server socket is closed while clients
343  * are still connected to it and there's no way to inform "a polling
344  * implementation" that it should let go of a certain wait queue
345  *
346  * In order to propagate a wake up, a wait_queue_entry_t of the client
347  * socket is enqueued on the peer_wait queue of the server socket
348  * whose wake function does a wake_up on the ordinary client socket
349  * wait queue. This connection is established whenever a write (or
350  * poll for write) hit the flow control condition and broken when the
351  * association to the server socket is dissolved or after a wake up
352  * was relayed.
353  */
354 
355 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
356 				      void *key)
357 {
358 	struct unix_sock *u;
359 	wait_queue_head_t *u_sleep;
360 
361 	u = container_of(q, struct unix_sock, peer_wake);
362 
363 	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
364 			    q);
365 	u->peer_wake.private = NULL;
366 
367 	/* relaying can only happen while the wq still exists */
368 	u_sleep = sk_sleep(&u->sk);
369 	if (u_sleep)
370 		wake_up_interruptible_poll(u_sleep, key);
371 
372 	return 0;
373 }
374 
375 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
376 {
377 	struct unix_sock *u, *u_other;
378 	int rc;
379 
380 	u = unix_sk(sk);
381 	u_other = unix_sk(other);
382 	rc = 0;
383 	spin_lock(&u_other->peer_wait.lock);
384 
385 	if (!u->peer_wake.private) {
386 		u->peer_wake.private = other;
387 		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
388 
389 		rc = 1;
390 	}
391 
392 	spin_unlock(&u_other->peer_wait.lock);
393 	return rc;
394 }
395 
396 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
397 					    struct sock *other)
398 {
399 	struct unix_sock *u, *u_other;
400 
401 	u = unix_sk(sk);
402 	u_other = unix_sk(other);
403 	spin_lock(&u_other->peer_wait.lock);
404 
405 	if (u->peer_wake.private == other) {
406 		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
407 		u->peer_wake.private = NULL;
408 	}
409 
410 	spin_unlock(&u_other->peer_wait.lock);
411 }
412 
413 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
414 						   struct sock *other)
415 {
416 	unix_dgram_peer_wake_disconnect(sk, other);
417 	wake_up_interruptible_poll(sk_sleep(sk),
418 				   POLLOUT |
419 				   POLLWRNORM |
420 				   POLLWRBAND);
421 }
422 
423 /* preconditions:
424  *	- unix_peer(sk) == other
425  *	- association is stable
426  */
427 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
428 {
429 	int connected;
430 
431 	connected = unix_dgram_peer_wake_connect(sk, other);
432 
433 	if (unix_recvq_full(other))
434 		return 1;
435 
436 	if (connected)
437 		unix_dgram_peer_wake_disconnect(sk, other);
438 
439 	return 0;
440 }
441 
442 static int unix_writable(const struct sock *sk)
443 {
444 	return sk->sk_state != TCP_LISTEN &&
445 	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
446 }
447 
448 static void unix_write_space(struct sock *sk)
449 {
450 	struct socket_wq *wq;
451 
452 	rcu_read_lock();
453 	if (unix_writable(sk)) {
454 		wq = rcu_dereference(sk->sk_wq);
455 		if (skwq_has_sleeper(wq))
456 			wake_up_interruptible_sync_poll(&wq->wait,
457 				POLLOUT | POLLWRNORM | POLLWRBAND);
458 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
459 	}
460 	rcu_read_unlock();
461 }
462 
463 /* When dgram socket disconnects (or changes its peer), we clear its receive
464  * queue of packets arrived from previous peer. First, it allows to do
465  * flow control based only on wmem_alloc; second, sk connected to peer
466  * may receive messages only from that peer. */
467 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
468 {
469 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
470 		skb_queue_purge(&sk->sk_receive_queue);
471 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
472 
473 		/* If one link of bidirectional dgram pipe is disconnected,
474 		 * we signal error. Messages are lost. Do not make this,
475 		 * when peer was not connected to us.
476 		 */
477 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
478 			other->sk_err = ECONNRESET;
479 			other->sk_error_report(other);
480 		}
481 	}
482 }
483 
484 static void unix_sock_destructor(struct sock *sk)
485 {
486 	struct unix_sock *u = unix_sk(sk);
487 
488 	skb_queue_purge(&sk->sk_receive_queue);
489 
490 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
491 	WARN_ON(!sk_unhashed(sk));
492 	WARN_ON(sk->sk_socket);
493 	if (!sock_flag(sk, SOCK_DEAD)) {
494 		pr_info("Attempt to release alive unix socket: %p\n", sk);
495 		return;
496 	}
497 
498 	if (u->addr)
499 		unix_release_addr(u->addr);
500 
501 	atomic_long_dec(&unix_nr_socks);
502 	local_bh_disable();
503 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
504 	local_bh_enable();
505 #ifdef UNIX_REFCNT_DEBUG
506 	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
507 		atomic_long_read(&unix_nr_socks));
508 #endif
509 }
510 
511 static void unix_release_sock(struct sock *sk, int embrion)
512 {
513 	struct unix_sock *u = unix_sk(sk);
514 	struct path path;
515 	struct sock *skpair;
516 	struct sk_buff *skb;
517 	int state;
518 
519 	unix_remove_socket(sk);
520 
521 	/* Clear state */
522 	unix_state_lock(sk);
523 	sock_orphan(sk);
524 	sk->sk_shutdown = SHUTDOWN_MASK;
525 	path	     = u->path;
526 	u->path.dentry = NULL;
527 	u->path.mnt = NULL;
528 	state = sk->sk_state;
529 	sk->sk_state = TCP_CLOSE;
530 	unix_state_unlock(sk);
531 
532 	wake_up_interruptible_all(&u->peer_wait);
533 
534 	skpair = unix_peer(sk);
535 
536 	if (skpair != NULL) {
537 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
538 			unix_state_lock(skpair);
539 			/* No more writes */
540 			skpair->sk_shutdown = SHUTDOWN_MASK;
541 			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
542 				skpair->sk_err = ECONNRESET;
543 			unix_state_unlock(skpair);
544 			skpair->sk_state_change(skpair);
545 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
546 		}
547 
548 		unix_dgram_peer_wake_disconnect(sk, skpair);
549 		sock_put(skpair); /* It may now die */
550 		unix_peer(sk) = NULL;
551 	}
552 
553 	/* Try to flush out this socket. Throw out buffers at least */
554 
555 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
556 		if (state == TCP_LISTEN)
557 			unix_release_sock(skb->sk, 1);
558 		/* passed fds are erased in the kfree_skb hook	      */
559 		UNIXCB(skb).consumed = skb->len;
560 		kfree_skb(skb);
561 	}
562 
563 	if (path.dentry)
564 		path_put(&path);
565 
566 	sock_put(sk);
567 
568 	/* ---- Socket is dead now and most probably destroyed ---- */
569 
570 	/*
571 	 * Fixme: BSD difference: In BSD all sockets connected to us get
572 	 *	  ECONNRESET and we die on the spot. In Linux we behave
573 	 *	  like files and pipes do and wait for the last
574 	 *	  dereference.
575 	 *
576 	 * Can't we simply set sock->err?
577 	 *
578 	 *	  What the above comment does talk about? --ANK(980817)
579 	 */
580 
581 	if (unix_tot_inflight)
582 		unix_gc();		/* Garbage collect fds */
583 }
584 
585 static void init_peercred(struct sock *sk)
586 {
587 	put_pid(sk->sk_peer_pid);
588 	if (sk->sk_peer_cred)
589 		put_cred(sk->sk_peer_cred);
590 	sk->sk_peer_pid  = get_pid(task_tgid(current));
591 	sk->sk_peer_cred = get_current_cred();
592 }
593 
594 static void copy_peercred(struct sock *sk, struct sock *peersk)
595 {
596 	put_pid(sk->sk_peer_pid);
597 	if (sk->sk_peer_cred)
598 		put_cred(sk->sk_peer_cred);
599 	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
600 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
601 }
602 
603 static int unix_listen(struct socket *sock, int backlog)
604 {
605 	int err;
606 	struct sock *sk = sock->sk;
607 	struct unix_sock *u = unix_sk(sk);
608 	struct pid *old_pid = NULL;
609 
610 	err = -EOPNOTSUPP;
611 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
612 		goto out;	/* Only stream/seqpacket sockets accept */
613 	err = -EINVAL;
614 	if (!u->addr)
615 		goto out;	/* No listens on an unbound socket */
616 	unix_state_lock(sk);
617 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
618 		goto out_unlock;
619 	if (backlog > sk->sk_max_ack_backlog)
620 		wake_up_interruptible_all(&u->peer_wait);
621 	sk->sk_max_ack_backlog	= backlog;
622 	sk->sk_state		= TCP_LISTEN;
623 	/* set credentials so connect can copy them */
624 	init_peercred(sk);
625 	err = 0;
626 
627 out_unlock:
628 	unix_state_unlock(sk);
629 	put_pid(old_pid);
630 out:
631 	return err;
632 }
633 
634 static int unix_release(struct socket *);
635 static int unix_bind(struct socket *, struct sockaddr *, int);
636 static int unix_stream_connect(struct socket *, struct sockaddr *,
637 			       int addr_len, int flags);
638 static int unix_socketpair(struct socket *, struct socket *);
639 static int unix_accept(struct socket *, struct socket *, int, bool);
640 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
641 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
642 static unsigned int unix_dgram_poll(struct file *, struct socket *,
643 				    poll_table *);
644 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
645 static int unix_shutdown(struct socket *, int);
646 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
647 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
648 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
649 				    size_t size, int flags);
650 static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
651 				       struct pipe_inode_info *, size_t size,
652 				       unsigned int flags);
653 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
654 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
655 static int unix_dgram_connect(struct socket *, struct sockaddr *,
656 			      int, int);
657 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
658 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
659 				  int);
660 
661 static int unix_set_peek_off(struct sock *sk, int val)
662 {
663 	struct unix_sock *u = unix_sk(sk);
664 
665 	if (mutex_lock_interruptible(&u->iolock))
666 		return -EINTR;
667 
668 	sk->sk_peek_off = val;
669 	mutex_unlock(&u->iolock);
670 
671 	return 0;
672 }
673 
674 
675 static const struct proto_ops unix_stream_ops = {
676 	.family =	PF_UNIX,
677 	.owner =	THIS_MODULE,
678 	.release =	unix_release,
679 	.bind =		unix_bind,
680 	.connect =	unix_stream_connect,
681 	.socketpair =	unix_socketpair,
682 	.accept =	unix_accept,
683 	.getname =	unix_getname,
684 	.poll =		unix_poll,
685 	.ioctl =	unix_ioctl,
686 	.listen =	unix_listen,
687 	.shutdown =	unix_shutdown,
688 	.setsockopt =	sock_no_setsockopt,
689 	.getsockopt =	sock_no_getsockopt,
690 	.sendmsg =	unix_stream_sendmsg,
691 	.recvmsg =	unix_stream_recvmsg,
692 	.mmap =		sock_no_mmap,
693 	.sendpage =	unix_stream_sendpage,
694 	.splice_read =	unix_stream_splice_read,
695 	.set_peek_off =	unix_set_peek_off,
696 };
697 
698 static const struct proto_ops unix_dgram_ops = {
699 	.family =	PF_UNIX,
700 	.owner =	THIS_MODULE,
701 	.release =	unix_release,
702 	.bind =		unix_bind,
703 	.connect =	unix_dgram_connect,
704 	.socketpair =	unix_socketpair,
705 	.accept =	sock_no_accept,
706 	.getname =	unix_getname,
707 	.poll =		unix_dgram_poll,
708 	.ioctl =	unix_ioctl,
709 	.listen =	sock_no_listen,
710 	.shutdown =	unix_shutdown,
711 	.setsockopt =	sock_no_setsockopt,
712 	.getsockopt =	sock_no_getsockopt,
713 	.sendmsg =	unix_dgram_sendmsg,
714 	.recvmsg =	unix_dgram_recvmsg,
715 	.mmap =		sock_no_mmap,
716 	.sendpage =	sock_no_sendpage,
717 	.set_peek_off =	unix_set_peek_off,
718 };
719 
720 static const struct proto_ops unix_seqpacket_ops = {
721 	.family =	PF_UNIX,
722 	.owner =	THIS_MODULE,
723 	.release =	unix_release,
724 	.bind =		unix_bind,
725 	.connect =	unix_stream_connect,
726 	.socketpair =	unix_socketpair,
727 	.accept =	unix_accept,
728 	.getname =	unix_getname,
729 	.poll =		unix_dgram_poll,
730 	.ioctl =	unix_ioctl,
731 	.listen =	unix_listen,
732 	.shutdown =	unix_shutdown,
733 	.setsockopt =	sock_no_setsockopt,
734 	.getsockopt =	sock_no_getsockopt,
735 	.sendmsg =	unix_seqpacket_sendmsg,
736 	.recvmsg =	unix_seqpacket_recvmsg,
737 	.mmap =		sock_no_mmap,
738 	.sendpage =	sock_no_sendpage,
739 	.set_peek_off =	unix_set_peek_off,
740 };
741 
742 static struct proto unix_proto = {
743 	.name			= "UNIX",
744 	.owner			= THIS_MODULE,
745 	.obj_size		= sizeof(struct unix_sock),
746 };
747 
748 /*
749  * AF_UNIX sockets do not interact with hardware, hence they
750  * dont trigger interrupts - so it's safe for them to have
751  * bh-unsafe locking for their sk_receive_queue.lock. Split off
752  * this special lock-class by reinitializing the spinlock key:
753  */
754 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
755 
756 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
757 {
758 	struct sock *sk = NULL;
759 	struct unix_sock *u;
760 
761 	atomic_long_inc(&unix_nr_socks);
762 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
763 		goto out;
764 
765 	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
766 	if (!sk)
767 		goto out;
768 
769 	sock_init_data(sock, sk);
770 	lockdep_set_class(&sk->sk_receive_queue.lock,
771 				&af_unix_sk_receive_queue_lock_key);
772 
773 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
774 	sk->sk_write_space	= unix_write_space;
775 	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
776 	sk->sk_destruct		= unix_sock_destructor;
777 	u	  = unix_sk(sk);
778 	u->path.dentry = NULL;
779 	u->path.mnt = NULL;
780 	spin_lock_init(&u->lock);
781 	atomic_long_set(&u->inflight, 0);
782 	INIT_LIST_HEAD(&u->link);
783 	mutex_init(&u->iolock); /* single task reading lock */
784 	mutex_init(&u->bindlock); /* single task binding lock */
785 	init_waitqueue_head(&u->peer_wait);
786 	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
787 	unix_insert_socket(unix_sockets_unbound(sk), sk);
788 out:
789 	if (sk == NULL)
790 		atomic_long_dec(&unix_nr_socks);
791 	else {
792 		local_bh_disable();
793 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
794 		local_bh_enable();
795 	}
796 	return sk;
797 }
798 
799 static int unix_create(struct net *net, struct socket *sock, int protocol,
800 		       int kern)
801 {
802 	if (protocol && protocol != PF_UNIX)
803 		return -EPROTONOSUPPORT;
804 
805 	sock->state = SS_UNCONNECTED;
806 
807 	switch (sock->type) {
808 	case SOCK_STREAM:
809 		sock->ops = &unix_stream_ops;
810 		break;
811 		/*
812 		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
813 		 *	nothing uses it.
814 		 */
815 	case SOCK_RAW:
816 		sock->type = SOCK_DGRAM;
817 		/* fall through */
818 	case SOCK_DGRAM:
819 		sock->ops = &unix_dgram_ops;
820 		break;
821 	case SOCK_SEQPACKET:
822 		sock->ops = &unix_seqpacket_ops;
823 		break;
824 	default:
825 		return -ESOCKTNOSUPPORT;
826 	}
827 
828 	return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
829 }
830 
831 static int unix_release(struct socket *sock)
832 {
833 	struct sock *sk = sock->sk;
834 
835 	if (!sk)
836 		return 0;
837 
838 	unix_release_sock(sk, 0);
839 	sock->sk = NULL;
840 
841 	return 0;
842 }
843 
844 static int unix_autobind(struct socket *sock)
845 {
846 	struct sock *sk = sock->sk;
847 	struct net *net = sock_net(sk);
848 	struct unix_sock *u = unix_sk(sk);
849 	static u32 ordernum = 1;
850 	struct unix_address *addr;
851 	int err;
852 	unsigned int retries = 0;
853 
854 	err = mutex_lock_interruptible(&u->bindlock);
855 	if (err)
856 		return err;
857 
858 	err = 0;
859 	if (u->addr)
860 		goto out;
861 
862 	err = -ENOMEM;
863 	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
864 	if (!addr)
865 		goto out;
866 
867 	addr->name->sun_family = AF_UNIX;
868 	refcount_set(&addr->refcnt, 1);
869 
870 retry:
871 	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
872 	addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
873 
874 	spin_lock(&unix_table_lock);
875 	ordernum = (ordernum+1)&0xFFFFF;
876 
877 	if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
878 				      addr->hash)) {
879 		spin_unlock(&unix_table_lock);
880 		/*
881 		 * __unix_find_socket_byname() may take long time if many names
882 		 * are already in use.
883 		 */
884 		cond_resched();
885 		/* Give up if all names seems to be in use. */
886 		if (retries++ == 0xFFFFF) {
887 			err = -ENOSPC;
888 			kfree(addr);
889 			goto out;
890 		}
891 		goto retry;
892 	}
893 	addr->hash ^= sk->sk_type;
894 
895 	__unix_remove_socket(sk);
896 	u->addr = addr;
897 	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
898 	spin_unlock(&unix_table_lock);
899 	err = 0;
900 
901 out:	mutex_unlock(&u->bindlock);
902 	return err;
903 }
904 
905 static struct sock *unix_find_other(struct net *net,
906 				    struct sockaddr_un *sunname, int len,
907 				    int type, unsigned int hash, int *error)
908 {
909 	struct sock *u;
910 	struct path path;
911 	int err = 0;
912 
913 	if (sunname->sun_path[0]) {
914 		struct inode *inode;
915 		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
916 		if (err)
917 			goto fail;
918 		inode = d_backing_inode(path.dentry);
919 		err = inode_permission(inode, MAY_WRITE);
920 		if (err)
921 			goto put_fail;
922 
923 		err = -ECONNREFUSED;
924 		if (!S_ISSOCK(inode->i_mode))
925 			goto put_fail;
926 		u = unix_find_socket_byinode(inode);
927 		if (!u)
928 			goto put_fail;
929 
930 		if (u->sk_type == type)
931 			touch_atime(&path);
932 
933 		path_put(&path);
934 
935 		err = -EPROTOTYPE;
936 		if (u->sk_type != type) {
937 			sock_put(u);
938 			goto fail;
939 		}
940 	} else {
941 		err = -ECONNREFUSED;
942 		u = unix_find_socket_byname(net, sunname, len, type, hash);
943 		if (u) {
944 			struct dentry *dentry;
945 			dentry = unix_sk(u)->path.dentry;
946 			if (dentry)
947 				touch_atime(&unix_sk(u)->path);
948 		} else
949 			goto fail;
950 	}
951 	return u;
952 
953 put_fail:
954 	path_put(&path);
955 fail:
956 	*error = err;
957 	return NULL;
958 }
959 
960 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
961 {
962 	struct dentry *dentry;
963 	struct path path;
964 	int err = 0;
965 	/*
966 	 * Get the parent directory, calculate the hash for last
967 	 * component.
968 	 */
969 	dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
970 	err = PTR_ERR(dentry);
971 	if (IS_ERR(dentry))
972 		return err;
973 
974 	/*
975 	 * All right, let's create it.
976 	 */
977 	err = security_path_mknod(&path, dentry, mode, 0);
978 	if (!err) {
979 		err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
980 		if (!err) {
981 			res->mnt = mntget(path.mnt);
982 			res->dentry = dget(dentry);
983 		}
984 	}
985 	done_path_create(&path, dentry);
986 	return err;
987 }
988 
989 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
990 {
991 	struct sock *sk = sock->sk;
992 	struct net *net = sock_net(sk);
993 	struct unix_sock *u = unix_sk(sk);
994 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
995 	char *sun_path = sunaddr->sun_path;
996 	int err;
997 	unsigned int hash;
998 	struct unix_address *addr;
999 	struct hlist_head *list;
1000 	struct path path = { };
1001 
1002 	err = -EINVAL;
1003 	if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
1004 	    sunaddr->sun_family != AF_UNIX)
1005 		goto out;
1006 
1007 	if (addr_len == sizeof(short)) {
1008 		err = unix_autobind(sock);
1009 		goto out;
1010 	}
1011 
1012 	err = unix_mkname(sunaddr, addr_len, &hash);
1013 	if (err < 0)
1014 		goto out;
1015 	addr_len = err;
1016 
1017 	if (sun_path[0]) {
1018 		umode_t mode = S_IFSOCK |
1019 		       (SOCK_INODE(sock)->i_mode & ~current_umask());
1020 		err = unix_mknod(sun_path, mode, &path);
1021 		if (err) {
1022 			if (err == -EEXIST)
1023 				err = -EADDRINUSE;
1024 			goto out;
1025 		}
1026 	}
1027 
1028 	err = mutex_lock_interruptible(&u->bindlock);
1029 	if (err)
1030 		goto out_put;
1031 
1032 	err = -EINVAL;
1033 	if (u->addr)
1034 		goto out_up;
1035 
1036 	err = -ENOMEM;
1037 	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1038 	if (!addr)
1039 		goto out_up;
1040 
1041 	memcpy(addr->name, sunaddr, addr_len);
1042 	addr->len = addr_len;
1043 	addr->hash = hash ^ sk->sk_type;
1044 	refcount_set(&addr->refcnt, 1);
1045 
1046 	if (sun_path[0]) {
1047 		addr->hash = UNIX_HASH_SIZE;
1048 		hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1049 		spin_lock(&unix_table_lock);
1050 		u->path = path;
1051 		list = &unix_socket_table[hash];
1052 	} else {
1053 		spin_lock(&unix_table_lock);
1054 		err = -EADDRINUSE;
1055 		if (__unix_find_socket_byname(net, sunaddr, addr_len,
1056 					      sk->sk_type, hash)) {
1057 			unix_release_addr(addr);
1058 			goto out_unlock;
1059 		}
1060 
1061 		list = &unix_socket_table[addr->hash];
1062 	}
1063 
1064 	err = 0;
1065 	__unix_remove_socket(sk);
1066 	u->addr = addr;
1067 	__unix_insert_socket(list, sk);
1068 
1069 out_unlock:
1070 	spin_unlock(&unix_table_lock);
1071 out_up:
1072 	mutex_unlock(&u->bindlock);
1073 out_put:
1074 	if (err)
1075 		path_put(&path);
1076 out:
1077 	return err;
1078 }
1079 
1080 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1081 {
1082 	if (unlikely(sk1 == sk2) || !sk2) {
1083 		unix_state_lock(sk1);
1084 		return;
1085 	}
1086 	if (sk1 < sk2) {
1087 		unix_state_lock(sk1);
1088 		unix_state_lock_nested(sk2);
1089 	} else {
1090 		unix_state_lock(sk2);
1091 		unix_state_lock_nested(sk1);
1092 	}
1093 }
1094 
1095 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1096 {
1097 	if (unlikely(sk1 == sk2) || !sk2) {
1098 		unix_state_unlock(sk1);
1099 		return;
1100 	}
1101 	unix_state_unlock(sk1);
1102 	unix_state_unlock(sk2);
1103 }
1104 
1105 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1106 			      int alen, int flags)
1107 {
1108 	struct sock *sk = sock->sk;
1109 	struct net *net = sock_net(sk);
1110 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1111 	struct sock *other;
1112 	unsigned int hash;
1113 	int err;
1114 
1115 	err = -EINVAL;
1116 	if (alen < offsetofend(struct sockaddr, sa_family))
1117 		goto out;
1118 
1119 	if (addr->sa_family != AF_UNSPEC) {
1120 		err = unix_mkname(sunaddr, alen, &hash);
1121 		if (err < 0)
1122 			goto out;
1123 		alen = err;
1124 
1125 		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1126 		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1127 			goto out;
1128 
1129 restart:
1130 		other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1131 		if (!other)
1132 			goto out;
1133 
1134 		unix_state_double_lock(sk, other);
1135 
1136 		/* Apparently VFS overslept socket death. Retry. */
1137 		if (sock_flag(other, SOCK_DEAD)) {
1138 			unix_state_double_unlock(sk, other);
1139 			sock_put(other);
1140 			goto restart;
1141 		}
1142 
1143 		err = -EPERM;
1144 		if (!unix_may_send(sk, other))
1145 			goto out_unlock;
1146 
1147 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1148 		if (err)
1149 			goto out_unlock;
1150 
1151 	} else {
1152 		/*
1153 		 *	1003.1g breaking connected state with AF_UNSPEC
1154 		 */
1155 		other = NULL;
1156 		unix_state_double_lock(sk, other);
1157 	}
1158 
1159 	/*
1160 	 * If it was connected, reconnect.
1161 	 */
1162 	if (unix_peer(sk)) {
1163 		struct sock *old_peer = unix_peer(sk);
1164 		unix_peer(sk) = other;
1165 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1166 
1167 		unix_state_double_unlock(sk, other);
1168 
1169 		if (other != old_peer)
1170 			unix_dgram_disconnected(sk, old_peer);
1171 		sock_put(old_peer);
1172 	} else {
1173 		unix_peer(sk) = other;
1174 		unix_state_double_unlock(sk, other);
1175 	}
1176 	return 0;
1177 
1178 out_unlock:
1179 	unix_state_double_unlock(sk, other);
1180 	sock_put(other);
1181 out:
1182 	return err;
1183 }
1184 
1185 static long unix_wait_for_peer(struct sock *other, long timeo)
1186 {
1187 	struct unix_sock *u = unix_sk(other);
1188 	int sched;
1189 	DEFINE_WAIT(wait);
1190 
1191 	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1192 
1193 	sched = !sock_flag(other, SOCK_DEAD) &&
1194 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1195 		unix_recvq_full(other);
1196 
1197 	unix_state_unlock(other);
1198 
1199 	if (sched)
1200 		timeo = schedule_timeout(timeo);
1201 
1202 	finish_wait(&u->peer_wait, &wait);
1203 	return timeo;
1204 }
1205 
1206 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1207 			       int addr_len, int flags)
1208 {
1209 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1210 	struct sock *sk = sock->sk;
1211 	struct net *net = sock_net(sk);
1212 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1213 	struct sock *newsk = NULL;
1214 	struct sock *other = NULL;
1215 	struct sk_buff *skb = NULL;
1216 	unsigned int hash;
1217 	int st;
1218 	int err;
1219 	long timeo;
1220 
1221 	err = unix_mkname(sunaddr, addr_len, &hash);
1222 	if (err < 0)
1223 		goto out;
1224 	addr_len = err;
1225 
1226 	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1227 	    (err = unix_autobind(sock)) != 0)
1228 		goto out;
1229 
1230 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1231 
1232 	/* First of all allocate resources.
1233 	   If we will make it after state is locked,
1234 	   we will have to recheck all again in any case.
1235 	 */
1236 
1237 	err = -ENOMEM;
1238 
1239 	/* create new sock for complete connection */
1240 	newsk = unix_create1(sock_net(sk), NULL, 0);
1241 	if (newsk == NULL)
1242 		goto out;
1243 
1244 	/* Allocate skb for sending to listening sock */
1245 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1246 	if (skb == NULL)
1247 		goto out;
1248 
1249 restart:
1250 	/*  Find listening sock. */
1251 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1252 	if (!other)
1253 		goto out;
1254 
1255 	/* Latch state of peer */
1256 	unix_state_lock(other);
1257 
1258 	/* Apparently VFS overslept socket death. Retry. */
1259 	if (sock_flag(other, SOCK_DEAD)) {
1260 		unix_state_unlock(other);
1261 		sock_put(other);
1262 		goto restart;
1263 	}
1264 
1265 	err = -ECONNREFUSED;
1266 	if (other->sk_state != TCP_LISTEN)
1267 		goto out_unlock;
1268 	if (other->sk_shutdown & RCV_SHUTDOWN)
1269 		goto out_unlock;
1270 
1271 	if (unix_recvq_full(other)) {
1272 		err = -EAGAIN;
1273 		if (!timeo)
1274 			goto out_unlock;
1275 
1276 		timeo = unix_wait_for_peer(other, timeo);
1277 
1278 		err = sock_intr_errno(timeo);
1279 		if (signal_pending(current))
1280 			goto out;
1281 		sock_put(other);
1282 		goto restart;
1283 	}
1284 
1285 	/* Latch our state.
1286 
1287 	   It is tricky place. We need to grab our state lock and cannot
1288 	   drop lock on peer. It is dangerous because deadlock is
1289 	   possible. Connect to self case and simultaneous
1290 	   attempt to connect are eliminated by checking socket
1291 	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1292 	   check this before attempt to grab lock.
1293 
1294 	   Well, and we have to recheck the state after socket locked.
1295 	 */
1296 	st = sk->sk_state;
1297 
1298 	switch (st) {
1299 	case TCP_CLOSE:
1300 		/* This is ok... continue with connect */
1301 		break;
1302 	case TCP_ESTABLISHED:
1303 		/* Socket is already connected */
1304 		err = -EISCONN;
1305 		goto out_unlock;
1306 	default:
1307 		err = -EINVAL;
1308 		goto out_unlock;
1309 	}
1310 
1311 	unix_state_lock_nested(sk);
1312 
1313 	if (sk->sk_state != st) {
1314 		unix_state_unlock(sk);
1315 		unix_state_unlock(other);
1316 		sock_put(other);
1317 		goto restart;
1318 	}
1319 
1320 	err = security_unix_stream_connect(sk, other, newsk);
1321 	if (err) {
1322 		unix_state_unlock(sk);
1323 		goto out_unlock;
1324 	}
1325 
1326 	/* The way is open! Fastly set all the necessary fields... */
1327 
1328 	sock_hold(sk);
1329 	unix_peer(newsk)	= sk;
1330 	newsk->sk_state		= TCP_ESTABLISHED;
1331 	newsk->sk_type		= sk->sk_type;
1332 	init_peercred(newsk);
1333 	newu = unix_sk(newsk);
1334 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1335 	otheru = unix_sk(other);
1336 
1337 	/* copy address information from listening to new sock*/
1338 	if (otheru->addr) {
1339 		refcount_inc(&otheru->addr->refcnt);
1340 		newu->addr = otheru->addr;
1341 	}
1342 	if (otheru->path.dentry) {
1343 		path_get(&otheru->path);
1344 		newu->path = otheru->path;
1345 	}
1346 
1347 	/* Set credentials */
1348 	copy_peercred(sk, other);
1349 
1350 	sock->state	= SS_CONNECTED;
1351 	sk->sk_state	= TCP_ESTABLISHED;
1352 	sock_hold(newsk);
1353 
1354 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1355 	unix_peer(sk)	= newsk;
1356 
1357 	unix_state_unlock(sk);
1358 
1359 	/* take ten and and send info to listening sock */
1360 	spin_lock(&other->sk_receive_queue.lock);
1361 	__skb_queue_tail(&other->sk_receive_queue, skb);
1362 	spin_unlock(&other->sk_receive_queue.lock);
1363 	unix_state_unlock(other);
1364 	other->sk_data_ready(other);
1365 	sock_put(other);
1366 	return 0;
1367 
1368 out_unlock:
1369 	if (other)
1370 		unix_state_unlock(other);
1371 
1372 out:
1373 	kfree_skb(skb);
1374 	if (newsk)
1375 		unix_release_sock(newsk, 0);
1376 	if (other)
1377 		sock_put(other);
1378 	return err;
1379 }
1380 
1381 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1382 {
1383 	struct sock *ska = socka->sk, *skb = sockb->sk;
1384 
1385 	/* Join our sockets back to back */
1386 	sock_hold(ska);
1387 	sock_hold(skb);
1388 	unix_peer(ska) = skb;
1389 	unix_peer(skb) = ska;
1390 	init_peercred(ska);
1391 	init_peercred(skb);
1392 
1393 	if (ska->sk_type != SOCK_DGRAM) {
1394 		ska->sk_state = TCP_ESTABLISHED;
1395 		skb->sk_state = TCP_ESTABLISHED;
1396 		socka->state  = SS_CONNECTED;
1397 		sockb->state  = SS_CONNECTED;
1398 	}
1399 	return 0;
1400 }
1401 
1402 static void unix_sock_inherit_flags(const struct socket *old,
1403 				    struct socket *new)
1404 {
1405 	if (test_bit(SOCK_PASSCRED, &old->flags))
1406 		set_bit(SOCK_PASSCRED, &new->flags);
1407 	if (test_bit(SOCK_PASSSEC, &old->flags))
1408 		set_bit(SOCK_PASSSEC, &new->flags);
1409 }
1410 
1411 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1412 		       bool kern)
1413 {
1414 	struct sock *sk = sock->sk;
1415 	struct sock *tsk;
1416 	struct sk_buff *skb;
1417 	int err;
1418 
1419 	err = -EOPNOTSUPP;
1420 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1421 		goto out;
1422 
1423 	err = -EINVAL;
1424 	if (sk->sk_state != TCP_LISTEN)
1425 		goto out;
1426 
1427 	/* If socket state is TCP_LISTEN it cannot change (for now...),
1428 	 * so that no locks are necessary.
1429 	 */
1430 
1431 	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1432 	if (!skb) {
1433 		/* This means receive shutdown. */
1434 		if (err == 0)
1435 			err = -EINVAL;
1436 		goto out;
1437 	}
1438 
1439 	tsk = skb->sk;
1440 	skb_free_datagram(sk, skb);
1441 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1442 
1443 	/* attach accepted sock to socket */
1444 	unix_state_lock(tsk);
1445 	newsock->state = SS_CONNECTED;
1446 	unix_sock_inherit_flags(sock, newsock);
1447 	sock_graft(tsk, newsock);
1448 	unix_state_unlock(tsk);
1449 	return 0;
1450 
1451 out:
1452 	return err;
1453 }
1454 
1455 
1456 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1457 {
1458 	struct sock *sk = sock->sk;
1459 	struct unix_sock *u;
1460 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1461 	int err = 0;
1462 
1463 	if (peer) {
1464 		sk = unix_peer_get(sk);
1465 
1466 		err = -ENOTCONN;
1467 		if (!sk)
1468 			goto out;
1469 		err = 0;
1470 	} else {
1471 		sock_hold(sk);
1472 	}
1473 
1474 	u = unix_sk(sk);
1475 	unix_state_lock(sk);
1476 	if (!u->addr) {
1477 		sunaddr->sun_family = AF_UNIX;
1478 		sunaddr->sun_path[0] = 0;
1479 		*uaddr_len = sizeof(short);
1480 	} else {
1481 		struct unix_address *addr = u->addr;
1482 
1483 		*uaddr_len = addr->len;
1484 		memcpy(sunaddr, addr->name, *uaddr_len);
1485 	}
1486 	unix_state_unlock(sk);
1487 	sock_put(sk);
1488 out:
1489 	return err;
1490 }
1491 
1492 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1493 {
1494 	int i;
1495 
1496 	scm->fp = UNIXCB(skb).fp;
1497 	UNIXCB(skb).fp = NULL;
1498 
1499 	for (i = scm->fp->count-1; i >= 0; i--)
1500 		unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1501 }
1502 
1503 static void unix_destruct_scm(struct sk_buff *skb)
1504 {
1505 	struct scm_cookie scm;
1506 	memset(&scm, 0, sizeof(scm));
1507 	scm.pid  = UNIXCB(skb).pid;
1508 	if (UNIXCB(skb).fp)
1509 		unix_detach_fds(&scm, skb);
1510 
1511 	/* Alas, it calls VFS */
1512 	/* So fscking what? fput() had been SMP-safe since the last Summer */
1513 	scm_destroy(&scm);
1514 	sock_wfree(skb);
1515 }
1516 
1517 /*
1518  * The "user->unix_inflight" variable is protected by the garbage
1519  * collection lock, and we just read it locklessly here. If you go
1520  * over the limit, there might be a tiny race in actually noticing
1521  * it across threads. Tough.
1522  */
1523 static inline bool too_many_unix_fds(struct task_struct *p)
1524 {
1525 	struct user_struct *user = current_user();
1526 
1527 	if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1528 		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1529 	return false;
1530 }
1531 
1532 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1533 {
1534 	int i;
1535 
1536 	if (too_many_unix_fds(current))
1537 		return -ETOOMANYREFS;
1538 
1539 	/*
1540 	 * Need to duplicate file references for the sake of garbage
1541 	 * collection.  Otherwise a socket in the fps might become a
1542 	 * candidate for GC while the skb is not yet queued.
1543 	 */
1544 	UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1545 	if (!UNIXCB(skb).fp)
1546 		return -ENOMEM;
1547 
1548 	for (i = scm->fp->count - 1; i >= 0; i--)
1549 		unix_inflight(scm->fp->user, scm->fp->fp[i]);
1550 	return 0;
1551 }
1552 
1553 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1554 {
1555 	int err = 0;
1556 
1557 	UNIXCB(skb).pid  = get_pid(scm->pid);
1558 	UNIXCB(skb).uid = scm->creds.uid;
1559 	UNIXCB(skb).gid = scm->creds.gid;
1560 	UNIXCB(skb).fp = NULL;
1561 	unix_get_secdata(scm, skb);
1562 	if (scm->fp && send_fds)
1563 		err = unix_attach_fds(scm, skb);
1564 
1565 	skb->destructor = unix_destruct_scm;
1566 	return err;
1567 }
1568 
1569 static bool unix_passcred_enabled(const struct socket *sock,
1570 				  const struct sock *other)
1571 {
1572 	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1573 	       !other->sk_socket ||
1574 	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1575 }
1576 
1577 /*
1578  * Some apps rely on write() giving SCM_CREDENTIALS
1579  * We include credentials if source or destination socket
1580  * asserted SOCK_PASSCRED.
1581  */
1582 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1583 			    const struct sock *other)
1584 {
1585 	if (UNIXCB(skb).pid)
1586 		return;
1587 	if (unix_passcred_enabled(sock, other)) {
1588 		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1589 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1590 	}
1591 }
1592 
1593 static int maybe_init_creds(struct scm_cookie *scm,
1594 			    struct socket *socket,
1595 			    const struct sock *other)
1596 {
1597 	int err;
1598 	struct msghdr msg = { .msg_controllen = 0 };
1599 
1600 	err = scm_send(socket, &msg, scm, false);
1601 	if (err)
1602 		return err;
1603 
1604 	if (unix_passcred_enabled(socket, other)) {
1605 		scm->pid = get_pid(task_tgid(current));
1606 		current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1607 	}
1608 	return err;
1609 }
1610 
1611 static bool unix_skb_scm_eq(struct sk_buff *skb,
1612 			    struct scm_cookie *scm)
1613 {
1614 	const struct unix_skb_parms *u = &UNIXCB(skb);
1615 
1616 	return u->pid == scm->pid &&
1617 	       uid_eq(u->uid, scm->creds.uid) &&
1618 	       gid_eq(u->gid, scm->creds.gid) &&
1619 	       unix_secdata_eq(scm, skb);
1620 }
1621 
1622 /*
1623  *	Send AF_UNIX data.
1624  */
1625 
1626 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1627 			      size_t len)
1628 {
1629 	struct sock *sk = sock->sk;
1630 	struct net *net = sock_net(sk);
1631 	struct unix_sock *u = unix_sk(sk);
1632 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1633 	struct sock *other = NULL;
1634 	int namelen = 0; /* fake GCC */
1635 	int err;
1636 	unsigned int hash;
1637 	struct sk_buff *skb;
1638 	long timeo;
1639 	struct scm_cookie scm;
1640 	int data_len = 0;
1641 	int sk_locked;
1642 
1643 	wait_for_unix_gc();
1644 	err = scm_send(sock, msg, &scm, false);
1645 	if (err < 0)
1646 		return err;
1647 
1648 	err = -EOPNOTSUPP;
1649 	if (msg->msg_flags&MSG_OOB)
1650 		goto out;
1651 
1652 	if (msg->msg_namelen) {
1653 		err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1654 		if (err < 0)
1655 			goto out;
1656 		namelen = err;
1657 	} else {
1658 		sunaddr = NULL;
1659 		err = -ENOTCONN;
1660 		other = unix_peer_get(sk);
1661 		if (!other)
1662 			goto out;
1663 	}
1664 
1665 	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1666 	    && (err = unix_autobind(sock)) != 0)
1667 		goto out;
1668 
1669 	err = -EMSGSIZE;
1670 	if (len > sk->sk_sndbuf - 32)
1671 		goto out;
1672 
1673 	if (len > SKB_MAX_ALLOC) {
1674 		data_len = min_t(size_t,
1675 				 len - SKB_MAX_ALLOC,
1676 				 MAX_SKB_FRAGS * PAGE_SIZE);
1677 		data_len = PAGE_ALIGN(data_len);
1678 
1679 		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1680 	}
1681 
1682 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1683 				   msg->msg_flags & MSG_DONTWAIT, &err,
1684 				   PAGE_ALLOC_COSTLY_ORDER);
1685 	if (skb == NULL)
1686 		goto out;
1687 
1688 	err = unix_scm_to_skb(&scm, skb, true);
1689 	if (err < 0)
1690 		goto out_free;
1691 
1692 	skb_put(skb, len - data_len);
1693 	skb->data_len = data_len;
1694 	skb->len = len;
1695 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1696 	if (err)
1697 		goto out_free;
1698 
1699 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1700 
1701 restart:
1702 	if (!other) {
1703 		err = -ECONNRESET;
1704 		if (sunaddr == NULL)
1705 			goto out_free;
1706 
1707 		other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1708 					hash, &err);
1709 		if (other == NULL)
1710 			goto out_free;
1711 	}
1712 
1713 	if (sk_filter(other, skb) < 0) {
1714 		/* Toss the packet but do not return any error to the sender */
1715 		err = len;
1716 		goto out_free;
1717 	}
1718 
1719 	sk_locked = 0;
1720 	unix_state_lock(other);
1721 restart_locked:
1722 	err = -EPERM;
1723 	if (!unix_may_send(sk, other))
1724 		goto out_unlock;
1725 
1726 	if (unlikely(sock_flag(other, SOCK_DEAD))) {
1727 		/*
1728 		 *	Check with 1003.1g - what should
1729 		 *	datagram error
1730 		 */
1731 		unix_state_unlock(other);
1732 		sock_put(other);
1733 
1734 		if (!sk_locked)
1735 			unix_state_lock(sk);
1736 
1737 		err = 0;
1738 		if (unix_peer(sk) == other) {
1739 			unix_peer(sk) = NULL;
1740 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1741 
1742 			unix_state_unlock(sk);
1743 
1744 			unix_dgram_disconnected(sk, other);
1745 			sock_put(other);
1746 			err = -ECONNREFUSED;
1747 		} else {
1748 			unix_state_unlock(sk);
1749 		}
1750 
1751 		other = NULL;
1752 		if (err)
1753 			goto out_free;
1754 		goto restart;
1755 	}
1756 
1757 	err = -EPIPE;
1758 	if (other->sk_shutdown & RCV_SHUTDOWN)
1759 		goto out_unlock;
1760 
1761 	if (sk->sk_type != SOCK_SEQPACKET) {
1762 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1763 		if (err)
1764 			goto out_unlock;
1765 	}
1766 
1767 	/* other == sk && unix_peer(other) != sk if
1768 	 * - unix_peer(sk) == NULL, destination address bound to sk
1769 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
1770 	 */
1771 	if (other != sk &&
1772 	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1773 		if (timeo) {
1774 			timeo = unix_wait_for_peer(other, timeo);
1775 
1776 			err = sock_intr_errno(timeo);
1777 			if (signal_pending(current))
1778 				goto out_free;
1779 
1780 			goto restart;
1781 		}
1782 
1783 		if (!sk_locked) {
1784 			unix_state_unlock(other);
1785 			unix_state_double_lock(sk, other);
1786 		}
1787 
1788 		if (unix_peer(sk) != other ||
1789 		    unix_dgram_peer_wake_me(sk, other)) {
1790 			err = -EAGAIN;
1791 			sk_locked = 1;
1792 			goto out_unlock;
1793 		}
1794 
1795 		if (!sk_locked) {
1796 			sk_locked = 1;
1797 			goto restart_locked;
1798 		}
1799 	}
1800 
1801 	if (unlikely(sk_locked))
1802 		unix_state_unlock(sk);
1803 
1804 	if (sock_flag(other, SOCK_RCVTSTAMP))
1805 		__net_timestamp(skb);
1806 	maybe_add_creds(skb, sock, other);
1807 	skb_queue_tail(&other->sk_receive_queue, skb);
1808 	unix_state_unlock(other);
1809 	other->sk_data_ready(other);
1810 	sock_put(other);
1811 	scm_destroy(&scm);
1812 	return len;
1813 
1814 out_unlock:
1815 	if (sk_locked)
1816 		unix_state_unlock(sk);
1817 	unix_state_unlock(other);
1818 out_free:
1819 	kfree_skb(skb);
1820 out:
1821 	if (other)
1822 		sock_put(other);
1823 	scm_destroy(&scm);
1824 	return err;
1825 }
1826 
1827 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1828  * bytes, and a minimun of a full page.
1829  */
1830 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1831 
1832 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1833 			       size_t len)
1834 {
1835 	struct sock *sk = sock->sk;
1836 	struct sock *other = NULL;
1837 	int err, size;
1838 	struct sk_buff *skb;
1839 	int sent = 0;
1840 	struct scm_cookie scm;
1841 	bool fds_sent = false;
1842 	int data_len;
1843 
1844 	wait_for_unix_gc();
1845 	err = scm_send(sock, msg, &scm, false);
1846 	if (err < 0)
1847 		return err;
1848 
1849 	err = -EOPNOTSUPP;
1850 	if (msg->msg_flags&MSG_OOB)
1851 		goto out_err;
1852 
1853 	if (msg->msg_namelen) {
1854 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1855 		goto out_err;
1856 	} else {
1857 		err = -ENOTCONN;
1858 		other = unix_peer(sk);
1859 		if (!other)
1860 			goto out_err;
1861 	}
1862 
1863 	if (sk->sk_shutdown & SEND_SHUTDOWN)
1864 		goto pipe_err;
1865 
1866 	while (sent < len) {
1867 		size = len - sent;
1868 
1869 		/* Keep two messages in the pipe so it schedules better */
1870 		size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1871 
1872 		/* allow fallback to order-0 allocations */
1873 		size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1874 
1875 		data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1876 
1877 		data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1878 
1879 		skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1880 					   msg->msg_flags & MSG_DONTWAIT, &err,
1881 					   get_order(UNIX_SKB_FRAGS_SZ));
1882 		if (!skb)
1883 			goto out_err;
1884 
1885 		/* Only send the fds in the first buffer */
1886 		err = unix_scm_to_skb(&scm, skb, !fds_sent);
1887 		if (err < 0) {
1888 			kfree_skb(skb);
1889 			goto out_err;
1890 		}
1891 		fds_sent = true;
1892 
1893 		skb_put(skb, size - data_len);
1894 		skb->data_len = data_len;
1895 		skb->len = size;
1896 		err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1897 		if (err) {
1898 			kfree_skb(skb);
1899 			goto out_err;
1900 		}
1901 
1902 		unix_state_lock(other);
1903 
1904 		if (sock_flag(other, SOCK_DEAD) ||
1905 		    (other->sk_shutdown & RCV_SHUTDOWN))
1906 			goto pipe_err_free;
1907 
1908 		maybe_add_creds(skb, sock, other);
1909 		skb_queue_tail(&other->sk_receive_queue, skb);
1910 		unix_state_unlock(other);
1911 		other->sk_data_ready(other);
1912 		sent += size;
1913 	}
1914 
1915 	scm_destroy(&scm);
1916 
1917 	return sent;
1918 
1919 pipe_err_free:
1920 	unix_state_unlock(other);
1921 	kfree_skb(skb);
1922 pipe_err:
1923 	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1924 		send_sig(SIGPIPE, current, 0);
1925 	err = -EPIPE;
1926 out_err:
1927 	scm_destroy(&scm);
1928 	return sent ? : err;
1929 }
1930 
1931 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1932 				    int offset, size_t size, int flags)
1933 {
1934 	int err;
1935 	bool send_sigpipe = false;
1936 	bool init_scm = true;
1937 	struct scm_cookie scm;
1938 	struct sock *other, *sk = socket->sk;
1939 	struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1940 
1941 	if (flags & MSG_OOB)
1942 		return -EOPNOTSUPP;
1943 
1944 	other = unix_peer(sk);
1945 	if (!other || sk->sk_state != TCP_ESTABLISHED)
1946 		return -ENOTCONN;
1947 
1948 	if (false) {
1949 alloc_skb:
1950 		unix_state_unlock(other);
1951 		mutex_unlock(&unix_sk(other)->iolock);
1952 		newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1953 					      &err, 0);
1954 		if (!newskb)
1955 			goto err;
1956 	}
1957 
1958 	/* we must acquire iolock as we modify already present
1959 	 * skbs in the sk_receive_queue and mess with skb->len
1960 	 */
1961 	err = mutex_lock_interruptible(&unix_sk(other)->iolock);
1962 	if (err) {
1963 		err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1964 		goto err;
1965 	}
1966 
1967 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1968 		err = -EPIPE;
1969 		send_sigpipe = true;
1970 		goto err_unlock;
1971 	}
1972 
1973 	unix_state_lock(other);
1974 
1975 	if (sock_flag(other, SOCK_DEAD) ||
1976 	    other->sk_shutdown & RCV_SHUTDOWN) {
1977 		err = -EPIPE;
1978 		send_sigpipe = true;
1979 		goto err_state_unlock;
1980 	}
1981 
1982 	if (init_scm) {
1983 		err = maybe_init_creds(&scm, socket, other);
1984 		if (err)
1985 			goto err_state_unlock;
1986 		init_scm = false;
1987 	}
1988 
1989 	skb = skb_peek_tail(&other->sk_receive_queue);
1990 	if (tail && tail == skb) {
1991 		skb = newskb;
1992 	} else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1993 		if (newskb) {
1994 			skb = newskb;
1995 		} else {
1996 			tail = skb;
1997 			goto alloc_skb;
1998 		}
1999 	} else if (newskb) {
2000 		/* this is fast path, we don't necessarily need to
2001 		 * call to kfree_skb even though with newskb == NULL
2002 		 * this - does no harm
2003 		 */
2004 		consume_skb(newskb);
2005 		newskb = NULL;
2006 	}
2007 
2008 	if (skb_append_pagefrags(skb, page, offset, size)) {
2009 		tail = skb;
2010 		goto alloc_skb;
2011 	}
2012 
2013 	skb->len += size;
2014 	skb->data_len += size;
2015 	skb->truesize += size;
2016 	refcount_add(size, &sk->sk_wmem_alloc);
2017 
2018 	if (newskb) {
2019 		err = unix_scm_to_skb(&scm, skb, false);
2020 		if (err)
2021 			goto err_state_unlock;
2022 		spin_lock(&other->sk_receive_queue.lock);
2023 		__skb_queue_tail(&other->sk_receive_queue, newskb);
2024 		spin_unlock(&other->sk_receive_queue.lock);
2025 	}
2026 
2027 	unix_state_unlock(other);
2028 	mutex_unlock(&unix_sk(other)->iolock);
2029 
2030 	other->sk_data_ready(other);
2031 	scm_destroy(&scm);
2032 	return size;
2033 
2034 err_state_unlock:
2035 	unix_state_unlock(other);
2036 err_unlock:
2037 	mutex_unlock(&unix_sk(other)->iolock);
2038 err:
2039 	kfree_skb(newskb);
2040 	if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2041 		send_sig(SIGPIPE, current, 0);
2042 	if (!init_scm)
2043 		scm_destroy(&scm);
2044 	return err;
2045 }
2046 
2047 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2048 				  size_t len)
2049 {
2050 	int err;
2051 	struct sock *sk = sock->sk;
2052 
2053 	err = sock_error(sk);
2054 	if (err)
2055 		return err;
2056 
2057 	if (sk->sk_state != TCP_ESTABLISHED)
2058 		return -ENOTCONN;
2059 
2060 	if (msg->msg_namelen)
2061 		msg->msg_namelen = 0;
2062 
2063 	return unix_dgram_sendmsg(sock, msg, len);
2064 }
2065 
2066 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2067 				  size_t size, int flags)
2068 {
2069 	struct sock *sk = sock->sk;
2070 
2071 	if (sk->sk_state != TCP_ESTABLISHED)
2072 		return -ENOTCONN;
2073 
2074 	return unix_dgram_recvmsg(sock, msg, size, flags);
2075 }
2076 
2077 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2078 {
2079 	struct unix_sock *u = unix_sk(sk);
2080 
2081 	if (u->addr) {
2082 		msg->msg_namelen = u->addr->len;
2083 		memcpy(msg->msg_name, u->addr->name, u->addr->len);
2084 	}
2085 }
2086 
2087 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2088 			      size_t size, int flags)
2089 {
2090 	struct scm_cookie scm;
2091 	struct sock *sk = sock->sk;
2092 	struct unix_sock *u = unix_sk(sk);
2093 	struct sk_buff *skb, *last;
2094 	long timeo;
2095 	int err;
2096 	int peeked, skip;
2097 
2098 	err = -EOPNOTSUPP;
2099 	if (flags&MSG_OOB)
2100 		goto out;
2101 
2102 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2103 
2104 	do {
2105 		mutex_lock(&u->iolock);
2106 
2107 		skip = sk_peek_offset(sk, flags);
2108 		skb = __skb_try_recv_datagram(sk, flags, NULL, &peeked, &skip,
2109 					      &err, &last);
2110 		if (skb)
2111 			break;
2112 
2113 		mutex_unlock(&u->iolock);
2114 
2115 		if (err != -EAGAIN)
2116 			break;
2117 	} while (timeo &&
2118 		 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
2119 
2120 	if (!skb) { /* implies iolock unlocked */
2121 		unix_state_lock(sk);
2122 		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2123 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2124 		    (sk->sk_shutdown & RCV_SHUTDOWN))
2125 			err = 0;
2126 		unix_state_unlock(sk);
2127 		goto out;
2128 	}
2129 
2130 	if (wq_has_sleeper(&u->peer_wait))
2131 		wake_up_interruptible_sync_poll(&u->peer_wait,
2132 						POLLOUT | POLLWRNORM |
2133 						POLLWRBAND);
2134 
2135 	if (msg->msg_name)
2136 		unix_copy_addr(msg, skb->sk);
2137 
2138 	if (size > skb->len - skip)
2139 		size = skb->len - skip;
2140 	else if (size < skb->len - skip)
2141 		msg->msg_flags |= MSG_TRUNC;
2142 
2143 	err = skb_copy_datagram_msg(skb, skip, msg, size);
2144 	if (err)
2145 		goto out_free;
2146 
2147 	if (sock_flag(sk, SOCK_RCVTSTAMP))
2148 		__sock_recv_timestamp(msg, sk, skb);
2149 
2150 	memset(&scm, 0, sizeof(scm));
2151 
2152 	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2153 	unix_set_secdata(&scm, skb);
2154 
2155 	if (!(flags & MSG_PEEK)) {
2156 		if (UNIXCB(skb).fp)
2157 			unix_detach_fds(&scm, skb);
2158 
2159 		sk_peek_offset_bwd(sk, skb->len);
2160 	} else {
2161 		/* It is questionable: on PEEK we could:
2162 		   - do not return fds - good, but too simple 8)
2163 		   - return fds, and do not return them on read (old strategy,
2164 		     apparently wrong)
2165 		   - clone fds (I chose it for now, it is the most universal
2166 		     solution)
2167 
2168 		   POSIX 1003.1g does not actually define this clearly
2169 		   at all. POSIX 1003.1g doesn't define a lot of things
2170 		   clearly however!
2171 
2172 		*/
2173 
2174 		sk_peek_offset_fwd(sk, size);
2175 
2176 		if (UNIXCB(skb).fp)
2177 			scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2178 	}
2179 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2180 
2181 	scm_recv(sock, msg, &scm, flags);
2182 
2183 out_free:
2184 	skb_free_datagram(sk, skb);
2185 	mutex_unlock(&u->iolock);
2186 out:
2187 	return err;
2188 }
2189 
2190 /*
2191  *	Sleep until more data has arrived. But check for races..
2192  */
2193 static long unix_stream_data_wait(struct sock *sk, long timeo,
2194 				  struct sk_buff *last, unsigned int last_len,
2195 				  bool freezable)
2196 {
2197 	struct sk_buff *tail;
2198 	DEFINE_WAIT(wait);
2199 
2200 	unix_state_lock(sk);
2201 
2202 	for (;;) {
2203 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2204 
2205 		tail = skb_peek_tail(&sk->sk_receive_queue);
2206 		if (tail != last ||
2207 		    (tail && tail->len != last_len) ||
2208 		    sk->sk_err ||
2209 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2210 		    signal_pending(current) ||
2211 		    !timeo)
2212 			break;
2213 
2214 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2215 		unix_state_unlock(sk);
2216 		if (freezable)
2217 			timeo = freezable_schedule_timeout(timeo);
2218 		else
2219 			timeo = schedule_timeout(timeo);
2220 		unix_state_lock(sk);
2221 
2222 		if (sock_flag(sk, SOCK_DEAD))
2223 			break;
2224 
2225 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2226 	}
2227 
2228 	finish_wait(sk_sleep(sk), &wait);
2229 	unix_state_unlock(sk);
2230 	return timeo;
2231 }
2232 
2233 static unsigned int unix_skb_len(const struct sk_buff *skb)
2234 {
2235 	return skb->len - UNIXCB(skb).consumed;
2236 }
2237 
2238 struct unix_stream_read_state {
2239 	int (*recv_actor)(struct sk_buff *, int, int,
2240 			  struct unix_stream_read_state *);
2241 	struct socket *socket;
2242 	struct msghdr *msg;
2243 	struct pipe_inode_info *pipe;
2244 	size_t size;
2245 	int flags;
2246 	unsigned int splice_flags;
2247 };
2248 
2249 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2250 				    bool freezable)
2251 {
2252 	struct scm_cookie scm;
2253 	struct socket *sock = state->socket;
2254 	struct sock *sk = sock->sk;
2255 	struct unix_sock *u = unix_sk(sk);
2256 	int copied = 0;
2257 	int flags = state->flags;
2258 	int noblock = flags & MSG_DONTWAIT;
2259 	bool check_creds = false;
2260 	int target;
2261 	int err = 0;
2262 	long timeo;
2263 	int skip;
2264 	size_t size = state->size;
2265 	unsigned int last_len;
2266 
2267 	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2268 		err = -EINVAL;
2269 		goto out;
2270 	}
2271 
2272 	if (unlikely(flags & MSG_OOB)) {
2273 		err = -EOPNOTSUPP;
2274 		goto out;
2275 	}
2276 
2277 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2278 	timeo = sock_rcvtimeo(sk, noblock);
2279 
2280 	memset(&scm, 0, sizeof(scm));
2281 
2282 	/* Lock the socket to prevent queue disordering
2283 	 * while sleeps in memcpy_tomsg
2284 	 */
2285 	mutex_lock(&u->iolock);
2286 
2287 	skip = max(sk_peek_offset(sk, flags), 0);
2288 
2289 	do {
2290 		int chunk;
2291 		bool drop_skb;
2292 		struct sk_buff *skb, *last;
2293 
2294 redo:
2295 		unix_state_lock(sk);
2296 		if (sock_flag(sk, SOCK_DEAD)) {
2297 			err = -ECONNRESET;
2298 			goto unlock;
2299 		}
2300 		last = skb = skb_peek(&sk->sk_receive_queue);
2301 		last_len = last ? last->len : 0;
2302 again:
2303 		if (skb == NULL) {
2304 			if (copied >= target)
2305 				goto unlock;
2306 
2307 			/*
2308 			 *	POSIX 1003.1g mandates this order.
2309 			 */
2310 
2311 			err = sock_error(sk);
2312 			if (err)
2313 				goto unlock;
2314 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2315 				goto unlock;
2316 
2317 			unix_state_unlock(sk);
2318 			if (!timeo) {
2319 				err = -EAGAIN;
2320 				break;
2321 			}
2322 
2323 			mutex_unlock(&u->iolock);
2324 
2325 			timeo = unix_stream_data_wait(sk, timeo, last,
2326 						      last_len, freezable);
2327 
2328 			if (signal_pending(current)) {
2329 				err = sock_intr_errno(timeo);
2330 				scm_destroy(&scm);
2331 				goto out;
2332 			}
2333 
2334 			mutex_lock(&u->iolock);
2335 			goto redo;
2336 unlock:
2337 			unix_state_unlock(sk);
2338 			break;
2339 		}
2340 
2341 		while (skip >= unix_skb_len(skb)) {
2342 			skip -= unix_skb_len(skb);
2343 			last = skb;
2344 			last_len = skb->len;
2345 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2346 			if (!skb)
2347 				goto again;
2348 		}
2349 
2350 		unix_state_unlock(sk);
2351 
2352 		if (check_creds) {
2353 			/* Never glue messages from different writers */
2354 			if (!unix_skb_scm_eq(skb, &scm))
2355 				break;
2356 		} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2357 			/* Copy credentials */
2358 			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2359 			unix_set_secdata(&scm, skb);
2360 			check_creds = true;
2361 		}
2362 
2363 		/* Copy address just once */
2364 		if (state->msg && state->msg->msg_name) {
2365 			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2366 					 state->msg->msg_name);
2367 			unix_copy_addr(state->msg, skb->sk);
2368 			sunaddr = NULL;
2369 		}
2370 
2371 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2372 		skb_get(skb);
2373 		chunk = state->recv_actor(skb, skip, chunk, state);
2374 		drop_skb = !unix_skb_len(skb);
2375 		/* skb is only safe to use if !drop_skb */
2376 		consume_skb(skb);
2377 		if (chunk < 0) {
2378 			if (copied == 0)
2379 				copied = -EFAULT;
2380 			break;
2381 		}
2382 		copied += chunk;
2383 		size -= chunk;
2384 
2385 		if (drop_skb) {
2386 			/* the skb was touched by a concurrent reader;
2387 			 * we should not expect anything from this skb
2388 			 * anymore and assume it invalid - we can be
2389 			 * sure it was dropped from the socket queue
2390 			 *
2391 			 * let's report a short read
2392 			 */
2393 			err = 0;
2394 			break;
2395 		}
2396 
2397 		/* Mark read part of skb as used */
2398 		if (!(flags & MSG_PEEK)) {
2399 			UNIXCB(skb).consumed += chunk;
2400 
2401 			sk_peek_offset_bwd(sk, chunk);
2402 
2403 			if (UNIXCB(skb).fp)
2404 				unix_detach_fds(&scm, skb);
2405 
2406 			if (unix_skb_len(skb))
2407 				break;
2408 
2409 			skb_unlink(skb, &sk->sk_receive_queue);
2410 			consume_skb(skb);
2411 
2412 			if (scm.fp)
2413 				break;
2414 		} else {
2415 			/* It is questionable, see note in unix_dgram_recvmsg.
2416 			 */
2417 			if (UNIXCB(skb).fp)
2418 				scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2419 
2420 			sk_peek_offset_fwd(sk, chunk);
2421 
2422 			if (UNIXCB(skb).fp)
2423 				break;
2424 
2425 			skip = 0;
2426 			last = skb;
2427 			last_len = skb->len;
2428 			unix_state_lock(sk);
2429 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2430 			if (skb)
2431 				goto again;
2432 			unix_state_unlock(sk);
2433 			break;
2434 		}
2435 	} while (size);
2436 
2437 	mutex_unlock(&u->iolock);
2438 	if (state->msg)
2439 		scm_recv(sock, state->msg, &scm, flags);
2440 	else
2441 		scm_destroy(&scm);
2442 out:
2443 	return copied ? : err;
2444 }
2445 
2446 static int unix_stream_read_actor(struct sk_buff *skb,
2447 				  int skip, int chunk,
2448 				  struct unix_stream_read_state *state)
2449 {
2450 	int ret;
2451 
2452 	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2453 				    state->msg, chunk);
2454 	return ret ?: chunk;
2455 }
2456 
2457 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2458 			       size_t size, int flags)
2459 {
2460 	struct unix_stream_read_state state = {
2461 		.recv_actor = unix_stream_read_actor,
2462 		.socket = sock,
2463 		.msg = msg,
2464 		.size = size,
2465 		.flags = flags
2466 	};
2467 
2468 	return unix_stream_read_generic(&state, true);
2469 }
2470 
2471 static int unix_stream_splice_actor(struct sk_buff *skb,
2472 				    int skip, int chunk,
2473 				    struct unix_stream_read_state *state)
2474 {
2475 	return skb_splice_bits(skb, state->socket->sk,
2476 			       UNIXCB(skb).consumed + skip,
2477 			       state->pipe, chunk, state->splice_flags);
2478 }
2479 
2480 static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2481 				       struct pipe_inode_info *pipe,
2482 				       size_t size, unsigned int flags)
2483 {
2484 	struct unix_stream_read_state state = {
2485 		.recv_actor = unix_stream_splice_actor,
2486 		.socket = sock,
2487 		.pipe = pipe,
2488 		.size = size,
2489 		.splice_flags = flags,
2490 	};
2491 
2492 	if (unlikely(*ppos))
2493 		return -ESPIPE;
2494 
2495 	if (sock->file->f_flags & O_NONBLOCK ||
2496 	    flags & SPLICE_F_NONBLOCK)
2497 		state.flags = MSG_DONTWAIT;
2498 
2499 	return unix_stream_read_generic(&state, false);
2500 }
2501 
2502 static int unix_shutdown(struct socket *sock, int mode)
2503 {
2504 	struct sock *sk = sock->sk;
2505 	struct sock *other;
2506 
2507 	if (mode < SHUT_RD || mode > SHUT_RDWR)
2508 		return -EINVAL;
2509 	/* This maps:
2510 	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2511 	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2512 	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2513 	 */
2514 	++mode;
2515 
2516 	unix_state_lock(sk);
2517 	sk->sk_shutdown |= mode;
2518 	other = unix_peer(sk);
2519 	if (other)
2520 		sock_hold(other);
2521 	unix_state_unlock(sk);
2522 	sk->sk_state_change(sk);
2523 
2524 	if (other &&
2525 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2526 
2527 		int peer_mode = 0;
2528 
2529 		if (mode&RCV_SHUTDOWN)
2530 			peer_mode |= SEND_SHUTDOWN;
2531 		if (mode&SEND_SHUTDOWN)
2532 			peer_mode |= RCV_SHUTDOWN;
2533 		unix_state_lock(other);
2534 		other->sk_shutdown |= peer_mode;
2535 		unix_state_unlock(other);
2536 		other->sk_state_change(other);
2537 		if (peer_mode == SHUTDOWN_MASK)
2538 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2539 		else if (peer_mode & RCV_SHUTDOWN)
2540 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2541 	}
2542 	if (other)
2543 		sock_put(other);
2544 
2545 	return 0;
2546 }
2547 
2548 long unix_inq_len(struct sock *sk)
2549 {
2550 	struct sk_buff *skb;
2551 	long amount = 0;
2552 
2553 	if (sk->sk_state == TCP_LISTEN)
2554 		return -EINVAL;
2555 
2556 	spin_lock(&sk->sk_receive_queue.lock);
2557 	if (sk->sk_type == SOCK_STREAM ||
2558 	    sk->sk_type == SOCK_SEQPACKET) {
2559 		skb_queue_walk(&sk->sk_receive_queue, skb)
2560 			amount += unix_skb_len(skb);
2561 	} else {
2562 		skb = skb_peek(&sk->sk_receive_queue);
2563 		if (skb)
2564 			amount = skb->len;
2565 	}
2566 	spin_unlock(&sk->sk_receive_queue.lock);
2567 
2568 	return amount;
2569 }
2570 EXPORT_SYMBOL_GPL(unix_inq_len);
2571 
2572 long unix_outq_len(struct sock *sk)
2573 {
2574 	return sk_wmem_alloc_get(sk);
2575 }
2576 EXPORT_SYMBOL_GPL(unix_outq_len);
2577 
2578 static int unix_open_file(struct sock *sk)
2579 {
2580 	struct path path;
2581 	struct file *f;
2582 	int fd;
2583 
2584 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2585 		return -EPERM;
2586 
2587 	unix_state_lock(sk);
2588 	path = unix_sk(sk)->path;
2589 	if (!path.dentry) {
2590 		unix_state_unlock(sk);
2591 		return -ENOENT;
2592 	}
2593 
2594 	path_get(&path);
2595 	unix_state_unlock(sk);
2596 
2597 	fd = get_unused_fd_flags(O_CLOEXEC);
2598 	if (fd < 0)
2599 		goto out;
2600 
2601 	f = dentry_open(&path, O_PATH, current_cred());
2602 	if (IS_ERR(f)) {
2603 		put_unused_fd(fd);
2604 		fd = PTR_ERR(f);
2605 		goto out;
2606 	}
2607 
2608 	fd_install(fd, f);
2609 out:
2610 	path_put(&path);
2611 
2612 	return fd;
2613 }
2614 
2615 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2616 {
2617 	struct sock *sk = sock->sk;
2618 	long amount = 0;
2619 	int err;
2620 
2621 	switch (cmd) {
2622 	case SIOCOUTQ:
2623 		amount = unix_outq_len(sk);
2624 		err = put_user(amount, (int __user *)arg);
2625 		break;
2626 	case SIOCINQ:
2627 		amount = unix_inq_len(sk);
2628 		if (amount < 0)
2629 			err = amount;
2630 		else
2631 			err = put_user(amount, (int __user *)arg);
2632 		break;
2633 	case SIOCUNIXFILE:
2634 		err = unix_open_file(sk);
2635 		break;
2636 	default:
2637 		err = -ENOIOCTLCMD;
2638 		break;
2639 	}
2640 	return err;
2641 }
2642 
2643 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2644 {
2645 	struct sock *sk = sock->sk;
2646 	unsigned int mask;
2647 
2648 	sock_poll_wait(file, sk_sleep(sk), wait);
2649 	mask = 0;
2650 
2651 	/* exceptional events? */
2652 	if (sk->sk_err)
2653 		mask |= POLLERR;
2654 	if (sk->sk_shutdown == SHUTDOWN_MASK)
2655 		mask |= POLLHUP;
2656 	if (sk->sk_shutdown & RCV_SHUTDOWN)
2657 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2658 
2659 	/* readable? */
2660 	if (!skb_queue_empty(&sk->sk_receive_queue))
2661 		mask |= POLLIN | POLLRDNORM;
2662 
2663 	/* Connection-based need to check for termination and startup */
2664 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2665 	    sk->sk_state == TCP_CLOSE)
2666 		mask |= POLLHUP;
2667 
2668 	/*
2669 	 * we set writable also when the other side has shut down the
2670 	 * connection. This prevents stuck sockets.
2671 	 */
2672 	if (unix_writable(sk))
2673 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2674 
2675 	return mask;
2676 }
2677 
2678 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2679 				    poll_table *wait)
2680 {
2681 	struct sock *sk = sock->sk, *other;
2682 	unsigned int mask, writable;
2683 
2684 	sock_poll_wait(file, sk_sleep(sk), wait);
2685 	mask = 0;
2686 
2687 	/* exceptional events? */
2688 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2689 		mask |= POLLERR |
2690 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2691 
2692 	if (sk->sk_shutdown & RCV_SHUTDOWN)
2693 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2694 	if (sk->sk_shutdown == SHUTDOWN_MASK)
2695 		mask |= POLLHUP;
2696 
2697 	/* readable? */
2698 	if (!skb_queue_empty(&sk->sk_receive_queue))
2699 		mask |= POLLIN | POLLRDNORM;
2700 
2701 	/* Connection-based need to check for termination and startup */
2702 	if (sk->sk_type == SOCK_SEQPACKET) {
2703 		if (sk->sk_state == TCP_CLOSE)
2704 			mask |= POLLHUP;
2705 		/* connection hasn't started yet? */
2706 		if (sk->sk_state == TCP_SYN_SENT)
2707 			return mask;
2708 	}
2709 
2710 	/* No write status requested, avoid expensive OUT tests. */
2711 	if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2712 		return mask;
2713 
2714 	writable = unix_writable(sk);
2715 	if (writable) {
2716 		unix_state_lock(sk);
2717 
2718 		other = unix_peer(sk);
2719 		if (other && unix_peer(other) != sk &&
2720 		    unix_recvq_full(other) &&
2721 		    unix_dgram_peer_wake_me(sk, other))
2722 			writable = 0;
2723 
2724 		unix_state_unlock(sk);
2725 	}
2726 
2727 	if (writable)
2728 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2729 	else
2730 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2731 
2732 	return mask;
2733 }
2734 
2735 #ifdef CONFIG_PROC_FS
2736 
2737 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2738 
2739 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2740 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2741 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2742 
2743 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2744 {
2745 	unsigned long offset = get_offset(*pos);
2746 	unsigned long bucket = get_bucket(*pos);
2747 	struct sock *sk;
2748 	unsigned long count = 0;
2749 
2750 	for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2751 		if (sock_net(sk) != seq_file_net(seq))
2752 			continue;
2753 		if (++count == offset)
2754 			break;
2755 	}
2756 
2757 	return sk;
2758 }
2759 
2760 static struct sock *unix_next_socket(struct seq_file *seq,
2761 				     struct sock *sk,
2762 				     loff_t *pos)
2763 {
2764 	unsigned long bucket;
2765 
2766 	while (sk > (struct sock *)SEQ_START_TOKEN) {
2767 		sk = sk_next(sk);
2768 		if (!sk)
2769 			goto next_bucket;
2770 		if (sock_net(sk) == seq_file_net(seq))
2771 			return sk;
2772 	}
2773 
2774 	do {
2775 		sk = unix_from_bucket(seq, pos);
2776 		if (sk)
2777 			return sk;
2778 
2779 next_bucket:
2780 		bucket = get_bucket(*pos) + 1;
2781 		*pos = set_bucket_offset(bucket, 1);
2782 	} while (bucket < ARRAY_SIZE(unix_socket_table));
2783 
2784 	return NULL;
2785 }
2786 
2787 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2788 	__acquires(unix_table_lock)
2789 {
2790 	spin_lock(&unix_table_lock);
2791 
2792 	if (!*pos)
2793 		return SEQ_START_TOKEN;
2794 
2795 	if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2796 		return NULL;
2797 
2798 	return unix_next_socket(seq, NULL, pos);
2799 }
2800 
2801 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2802 {
2803 	++*pos;
2804 	return unix_next_socket(seq, v, pos);
2805 }
2806 
2807 static void unix_seq_stop(struct seq_file *seq, void *v)
2808 	__releases(unix_table_lock)
2809 {
2810 	spin_unlock(&unix_table_lock);
2811 }
2812 
2813 static int unix_seq_show(struct seq_file *seq, void *v)
2814 {
2815 
2816 	if (v == SEQ_START_TOKEN)
2817 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2818 			 "Inode Path\n");
2819 	else {
2820 		struct sock *s = v;
2821 		struct unix_sock *u = unix_sk(s);
2822 		unix_state_lock(s);
2823 
2824 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2825 			s,
2826 			refcount_read(&s->sk_refcnt),
2827 			0,
2828 			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2829 			s->sk_type,
2830 			s->sk_socket ?
2831 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2832 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2833 			sock_i_ino(s));
2834 
2835 		if (u->addr) {
2836 			int i, len;
2837 			seq_putc(seq, ' ');
2838 
2839 			i = 0;
2840 			len = u->addr->len - sizeof(short);
2841 			if (!UNIX_ABSTRACT(s))
2842 				len--;
2843 			else {
2844 				seq_putc(seq, '@');
2845 				i++;
2846 			}
2847 			for ( ; i < len; i++)
2848 				seq_putc(seq, u->addr->name->sun_path[i] ?:
2849 					 '@');
2850 		}
2851 		unix_state_unlock(s);
2852 		seq_putc(seq, '\n');
2853 	}
2854 
2855 	return 0;
2856 }
2857 
2858 static const struct seq_operations unix_seq_ops = {
2859 	.start  = unix_seq_start,
2860 	.next   = unix_seq_next,
2861 	.stop   = unix_seq_stop,
2862 	.show   = unix_seq_show,
2863 };
2864 
2865 static int unix_seq_open(struct inode *inode, struct file *file)
2866 {
2867 	return seq_open_net(inode, file, &unix_seq_ops,
2868 			    sizeof(struct seq_net_private));
2869 }
2870 
2871 static const struct file_operations unix_seq_fops = {
2872 	.owner		= THIS_MODULE,
2873 	.open		= unix_seq_open,
2874 	.read		= seq_read,
2875 	.llseek		= seq_lseek,
2876 	.release	= seq_release_net,
2877 };
2878 
2879 #endif
2880 
2881 static const struct net_proto_family unix_family_ops = {
2882 	.family = PF_UNIX,
2883 	.create = unix_create,
2884 	.owner	= THIS_MODULE,
2885 };
2886 
2887 
2888 static int __net_init unix_net_init(struct net *net)
2889 {
2890 	int error = -ENOMEM;
2891 
2892 	net->unx.sysctl_max_dgram_qlen = 10;
2893 	if (unix_sysctl_register(net))
2894 		goto out;
2895 
2896 #ifdef CONFIG_PROC_FS
2897 	if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2898 		unix_sysctl_unregister(net);
2899 		goto out;
2900 	}
2901 #endif
2902 	error = 0;
2903 out:
2904 	return error;
2905 }
2906 
2907 static void __net_exit unix_net_exit(struct net *net)
2908 {
2909 	unix_sysctl_unregister(net);
2910 	remove_proc_entry("unix", net->proc_net);
2911 }
2912 
2913 static struct pernet_operations unix_net_ops = {
2914 	.init = unix_net_init,
2915 	.exit = unix_net_exit,
2916 };
2917 
2918 static int __init af_unix_init(void)
2919 {
2920 	int rc = -1;
2921 
2922 	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2923 
2924 	rc = proto_register(&unix_proto, 1);
2925 	if (rc != 0) {
2926 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2927 		goto out;
2928 	}
2929 
2930 	sock_register(&unix_family_ops);
2931 	register_pernet_subsys(&unix_net_ops);
2932 out:
2933 	return rc;
2934 }
2935 
2936 static void __exit af_unix_exit(void)
2937 {
2938 	sock_unregister(PF_UNIX);
2939 	proto_unregister(&unix_proto);
2940 	unregister_pernet_subsys(&unix_net_ops);
2941 }
2942 
2943 /* Earlier than device_initcall() so that other drivers invoking
2944    request_module() don't end up in a loop when modprobe tries
2945    to use a UNIX socket. But later than subsys_initcall() because
2946    we depend on stuff initialised there */
2947 fs_initcall(af_unix_init);
2948 module_exit(af_unix_exit);
2949 
2950 MODULE_LICENSE("GPL");
2951 MODULE_ALIAS_NETPROTO(PF_UNIX);
2952