xref: /openbmc/linux/net/unix/af_unix.c (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET4:	Implementation of BSD Unix domain sockets.
4  *
5  * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *
7  * Fixes:
8  *		Linus Torvalds	:	Assorted bug cures.
9  *		Niibe Yutaka	:	async I/O support.
10  *		Carsten Paeth	:	PF_UNIX check, address fixes.
11  *		Alan Cox	:	Limit size of allocated blocks.
12  *		Alan Cox	:	Fixed the stupid socketpair bug.
13  *		Alan Cox	:	BSD compatibility fine tuning.
14  *		Alan Cox	:	Fixed a bug in connect when interrupted.
15  *		Alan Cox	:	Sorted out a proper draft version of
16  *					file descriptor passing hacked up from
17  *					Mike Shaver's work.
18  *		Marty Leisner	:	Fixes to fd passing
19  *		Nick Nevin	:	recvmsg bugfix.
20  *		Alan Cox	:	Started proper garbage collector
21  *		Heiko EiBfeldt	:	Missing verify_area check
22  *		Alan Cox	:	Started POSIXisms
23  *		Andreas Schwab	:	Replace inode by dentry for proper
24  *					reference counting
25  *		Kirk Petersen	:	Made this a module
26  *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
27  *					Lots of bug fixes.
28  *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
29  *					by above two patches.
30  *	     Andrea Arcangeli	:	If possible we block in connect(2)
31  *					if the max backlog of the listen socket
32  *					is been reached. This won't break
33  *					old apps and it will avoid huge amount
34  *					of socks hashed (this for unix_gc()
35  *					performances reasons).
36  *					Security fix that limits the max
37  *					number of socks to 2*max_files and
38  *					the number of skb queueable in the
39  *					dgram receiver.
40  *		Artur Skawina   :	Hash function optimizations
41  *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
42  *	      Malcolm Beattie   :	Set peercred for socketpair
43  *	     Michal Ostrowski   :       Module initialization cleanup.
44  *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
45  *	     				the core infrastructure is doing that
46  *	     				for all net proto families now (2.5.69+)
47  *
48  * Known differences from reference BSD that was tested:
49  *
50  *	[TO FIX]
51  *	ECONNREFUSED is not returned from one end of a connected() socket to the
52  *		other the moment one end closes.
53  *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
54  *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
55  *	[NOT TO FIX]
56  *	accept() returns a path name even if the connecting socket has closed
57  *		in the meantime (BSD loses the path and gives up).
58  *	accept() returns 0 length path for an unbound connector. BSD returns 16
59  *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60  *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
61  *	BSD af_unix apparently has connect forgetting to block properly.
62  *		(need to check this with the POSIX spec in detail)
63  *
64  * Differences from 2.0.0-11-... (ANK)
65  *	Bug fixes and improvements.
66  *		- client shutdown killed server socket.
67  *		- removed all useless cli/sti pairs.
68  *
69  *	Semantic changes/extensions.
70  *		- generic control message passing.
71  *		- SCM_CREDENTIALS control message.
72  *		- "Abstract" (not FS based) socket bindings.
73  *		  Abstract names are sequences of bytes (not zero terminated)
74  *		  started by 0, so that this name space does not intersect
75  *		  with BSD names.
76  */
77 
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
90 #include <linux/un.h>
91 #include <linux/fcntl.h>
92 #include <linux/termios.h>
93 #include <linux/sockios.h>
94 #include <linux/net.h>
95 #include <linux/in.h>
96 #include <linux/fs.h>
97 #include <linux/slab.h>
98 #include <linux/uaccess.h>
99 #include <linux/skbuff.h>
100 #include <linux/netdevice.h>
101 #include <net/net_namespace.h>
102 #include <net/sock.h>
103 #include <net/tcp_states.h>
104 #include <net/af_unix.h>
105 #include <linux/proc_fs.h>
106 #include <linux/seq_file.h>
107 #include <net/scm.h>
108 #include <linux/init.h>
109 #include <linux/poll.h>
110 #include <linux/rtnetlink.h>
111 #include <linux/mount.h>
112 #include <net/checksum.h>
113 #include <linux/security.h>
114 #include <linux/freezer.h>
115 #include <linux/file.h>
116 
117 #include "scm.h"
118 
119 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
120 EXPORT_SYMBOL_GPL(unix_socket_table);
121 DEFINE_SPINLOCK(unix_table_lock);
122 EXPORT_SYMBOL_GPL(unix_table_lock);
123 static atomic_long_t unix_nr_socks;
124 
125 
126 static struct hlist_head *unix_sockets_unbound(void *addr)
127 {
128 	unsigned long hash = (unsigned long)addr;
129 
130 	hash ^= hash >> 16;
131 	hash ^= hash >> 8;
132 	hash %= UNIX_HASH_SIZE;
133 	return &unix_socket_table[UNIX_HASH_SIZE + hash];
134 }
135 
136 #define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
137 
138 #ifdef CONFIG_SECURITY_NETWORK
139 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 {
141 	UNIXCB(skb).secid = scm->secid;
142 }
143 
144 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
145 {
146 	scm->secid = UNIXCB(skb).secid;
147 }
148 
149 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
150 {
151 	return (scm->secid == UNIXCB(skb).secid);
152 }
153 #else
154 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
155 { }
156 
157 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
158 { }
159 
160 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
161 {
162 	return true;
163 }
164 #endif /* CONFIG_SECURITY_NETWORK */
165 
166 /*
167  *  SMP locking strategy:
168  *    hash table is protected with spinlock unix_table_lock
169  *    each socket state is protected by separate spin lock.
170  */
171 
172 static inline unsigned int unix_hash_fold(__wsum n)
173 {
174 	unsigned int hash = (__force unsigned int)csum_fold(n);
175 
176 	hash ^= hash>>8;
177 	return hash&(UNIX_HASH_SIZE-1);
178 }
179 
180 #define unix_peer(sk) (unix_sk(sk)->peer)
181 
182 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
183 {
184 	return unix_peer(osk) == sk;
185 }
186 
187 static inline int unix_may_send(struct sock *sk, struct sock *osk)
188 {
189 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
190 }
191 
192 static inline int unix_recvq_full(const struct sock *sk)
193 {
194 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
195 }
196 
197 static inline int unix_recvq_full_lockless(const struct sock *sk)
198 {
199 	return skb_queue_len_lockless(&sk->sk_receive_queue) >
200 		READ_ONCE(sk->sk_max_ack_backlog);
201 }
202 
203 struct sock *unix_peer_get(struct sock *s)
204 {
205 	struct sock *peer;
206 
207 	unix_state_lock(s);
208 	peer = unix_peer(s);
209 	if (peer)
210 		sock_hold(peer);
211 	unix_state_unlock(s);
212 	return peer;
213 }
214 EXPORT_SYMBOL_GPL(unix_peer_get);
215 
216 static inline void unix_release_addr(struct unix_address *addr)
217 {
218 	if (refcount_dec_and_test(&addr->refcnt))
219 		kfree(addr);
220 }
221 
222 /*
223  *	Check unix socket name:
224  *		- should be not zero length.
225  *	        - if started by not zero, should be NULL terminated (FS object)
226  *		- if started by zero, it is abstract name.
227  */
228 
229 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
230 {
231 	*hashp = 0;
232 
233 	if (len <= sizeof(short) || len > sizeof(*sunaddr))
234 		return -EINVAL;
235 	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
236 		return -EINVAL;
237 	if (sunaddr->sun_path[0]) {
238 		/*
239 		 * This may look like an off by one error but it is a bit more
240 		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
241 		 * sun_path[108] doesn't as such exist.  However in kernel space
242 		 * we are guaranteed that it is a valid memory location in our
243 		 * kernel address buffer.
244 		 */
245 		((char *)sunaddr)[len] = 0;
246 		len = strlen(sunaddr->sun_path)+1+sizeof(short);
247 		return len;
248 	}
249 
250 	*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
251 	return len;
252 }
253 
254 static void __unix_remove_socket(struct sock *sk)
255 {
256 	sk_del_node_init(sk);
257 }
258 
259 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
260 {
261 	WARN_ON(!sk_unhashed(sk));
262 	sk_add_node(sk, list);
263 }
264 
265 static void __unix_set_addr(struct sock *sk, struct unix_address *addr,
266 			    unsigned hash)
267 {
268 	__unix_remove_socket(sk);
269 	smp_store_release(&unix_sk(sk)->addr, addr);
270 	__unix_insert_socket(&unix_socket_table[hash], sk);
271 }
272 
273 static inline void unix_remove_socket(struct sock *sk)
274 {
275 	spin_lock(&unix_table_lock);
276 	__unix_remove_socket(sk);
277 	spin_unlock(&unix_table_lock);
278 }
279 
280 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
281 {
282 	spin_lock(&unix_table_lock);
283 	__unix_insert_socket(list, sk);
284 	spin_unlock(&unix_table_lock);
285 }
286 
287 static struct sock *__unix_find_socket_byname(struct net *net,
288 					      struct sockaddr_un *sunname,
289 					      int len, unsigned int hash)
290 {
291 	struct sock *s;
292 
293 	sk_for_each(s, &unix_socket_table[hash]) {
294 		struct unix_sock *u = unix_sk(s);
295 
296 		if (!net_eq(sock_net(s), net))
297 			continue;
298 
299 		if (u->addr->len == len &&
300 		    !memcmp(u->addr->name, sunname, len))
301 			return s;
302 	}
303 	return NULL;
304 }
305 
306 static inline struct sock *unix_find_socket_byname(struct net *net,
307 						   struct sockaddr_un *sunname,
308 						   int len, unsigned int hash)
309 {
310 	struct sock *s;
311 
312 	spin_lock(&unix_table_lock);
313 	s = __unix_find_socket_byname(net, sunname, len, hash);
314 	if (s)
315 		sock_hold(s);
316 	spin_unlock(&unix_table_lock);
317 	return s;
318 }
319 
320 static struct sock *unix_find_socket_byinode(struct inode *i)
321 {
322 	struct sock *s;
323 
324 	spin_lock(&unix_table_lock);
325 	sk_for_each(s,
326 		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
327 		struct dentry *dentry = unix_sk(s)->path.dentry;
328 
329 		if (dentry && d_backing_inode(dentry) == i) {
330 			sock_hold(s);
331 			goto found;
332 		}
333 	}
334 	s = NULL;
335 found:
336 	spin_unlock(&unix_table_lock);
337 	return s;
338 }
339 
340 /* Support code for asymmetrically connected dgram sockets
341  *
342  * If a datagram socket is connected to a socket not itself connected
343  * to the first socket (eg, /dev/log), clients may only enqueue more
344  * messages if the present receive queue of the server socket is not
345  * "too large". This means there's a second writeability condition
346  * poll and sendmsg need to test. The dgram recv code will do a wake
347  * up on the peer_wait wait queue of a socket upon reception of a
348  * datagram which needs to be propagated to sleeping would-be writers
349  * since these might not have sent anything so far. This can't be
350  * accomplished via poll_wait because the lifetime of the server
351  * socket might be less than that of its clients if these break their
352  * association with it or if the server socket is closed while clients
353  * are still connected to it and there's no way to inform "a polling
354  * implementation" that it should let go of a certain wait queue
355  *
356  * In order to propagate a wake up, a wait_queue_entry_t of the client
357  * socket is enqueued on the peer_wait queue of the server socket
358  * whose wake function does a wake_up on the ordinary client socket
359  * wait queue. This connection is established whenever a write (or
360  * poll for write) hit the flow control condition and broken when the
361  * association to the server socket is dissolved or after a wake up
362  * was relayed.
363  */
364 
365 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
366 				      void *key)
367 {
368 	struct unix_sock *u;
369 	wait_queue_head_t *u_sleep;
370 
371 	u = container_of(q, struct unix_sock, peer_wake);
372 
373 	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
374 			    q);
375 	u->peer_wake.private = NULL;
376 
377 	/* relaying can only happen while the wq still exists */
378 	u_sleep = sk_sleep(&u->sk);
379 	if (u_sleep)
380 		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
381 
382 	return 0;
383 }
384 
385 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
386 {
387 	struct unix_sock *u, *u_other;
388 	int rc;
389 
390 	u = unix_sk(sk);
391 	u_other = unix_sk(other);
392 	rc = 0;
393 	spin_lock(&u_other->peer_wait.lock);
394 
395 	if (!u->peer_wake.private) {
396 		u->peer_wake.private = other;
397 		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
398 
399 		rc = 1;
400 	}
401 
402 	spin_unlock(&u_other->peer_wait.lock);
403 	return rc;
404 }
405 
406 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
407 					    struct sock *other)
408 {
409 	struct unix_sock *u, *u_other;
410 
411 	u = unix_sk(sk);
412 	u_other = unix_sk(other);
413 	spin_lock(&u_other->peer_wait.lock);
414 
415 	if (u->peer_wake.private == other) {
416 		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
417 		u->peer_wake.private = NULL;
418 	}
419 
420 	spin_unlock(&u_other->peer_wait.lock);
421 }
422 
423 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
424 						   struct sock *other)
425 {
426 	unix_dgram_peer_wake_disconnect(sk, other);
427 	wake_up_interruptible_poll(sk_sleep(sk),
428 				   EPOLLOUT |
429 				   EPOLLWRNORM |
430 				   EPOLLWRBAND);
431 }
432 
433 /* preconditions:
434  *	- unix_peer(sk) == other
435  *	- association is stable
436  */
437 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
438 {
439 	int connected;
440 
441 	connected = unix_dgram_peer_wake_connect(sk, other);
442 
443 	/* If other is SOCK_DEAD, we want to make sure we signal
444 	 * POLLOUT, such that a subsequent write() can get a
445 	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
446 	 * to other and its full, we will hang waiting for POLLOUT.
447 	 */
448 	if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
449 		return 1;
450 
451 	if (connected)
452 		unix_dgram_peer_wake_disconnect(sk, other);
453 
454 	return 0;
455 }
456 
457 static int unix_writable(const struct sock *sk)
458 {
459 	return sk->sk_state != TCP_LISTEN &&
460 	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
461 }
462 
463 static void unix_write_space(struct sock *sk)
464 {
465 	struct socket_wq *wq;
466 
467 	rcu_read_lock();
468 	if (unix_writable(sk)) {
469 		wq = rcu_dereference(sk->sk_wq);
470 		if (skwq_has_sleeper(wq))
471 			wake_up_interruptible_sync_poll(&wq->wait,
472 				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
473 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
474 	}
475 	rcu_read_unlock();
476 }
477 
478 /* When dgram socket disconnects (or changes its peer), we clear its receive
479  * queue of packets arrived from previous peer. First, it allows to do
480  * flow control based only on wmem_alloc; second, sk connected to peer
481  * may receive messages only from that peer. */
482 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
483 {
484 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
485 		skb_queue_purge(&sk->sk_receive_queue);
486 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
487 
488 		/* If one link of bidirectional dgram pipe is disconnected,
489 		 * we signal error. Messages are lost. Do not make this,
490 		 * when peer was not connected to us.
491 		 */
492 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
493 			other->sk_err = ECONNRESET;
494 			sk_error_report(other);
495 		}
496 	}
497 }
498 
499 static void unix_sock_destructor(struct sock *sk)
500 {
501 	struct unix_sock *u = unix_sk(sk);
502 
503 	skb_queue_purge(&sk->sk_receive_queue);
504 
505 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
506 	WARN_ON(!sk_unhashed(sk));
507 	WARN_ON(sk->sk_socket);
508 	if (!sock_flag(sk, SOCK_DEAD)) {
509 		pr_info("Attempt to release alive unix socket: %p\n", sk);
510 		return;
511 	}
512 
513 	if (u->addr)
514 		unix_release_addr(u->addr);
515 
516 	atomic_long_dec(&unix_nr_socks);
517 	local_bh_disable();
518 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
519 	local_bh_enable();
520 #ifdef UNIX_REFCNT_DEBUG
521 	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
522 		atomic_long_read(&unix_nr_socks));
523 #endif
524 }
525 
526 static void unix_release_sock(struct sock *sk, int embrion)
527 {
528 	struct unix_sock *u = unix_sk(sk);
529 	struct path path;
530 	struct sock *skpair;
531 	struct sk_buff *skb;
532 	int state;
533 
534 	unix_remove_socket(sk);
535 
536 	/* Clear state */
537 	unix_state_lock(sk);
538 	sock_orphan(sk);
539 	sk->sk_shutdown = SHUTDOWN_MASK;
540 	path	     = u->path;
541 	u->path.dentry = NULL;
542 	u->path.mnt = NULL;
543 	state = sk->sk_state;
544 	sk->sk_state = TCP_CLOSE;
545 
546 	skpair = unix_peer(sk);
547 	unix_peer(sk) = NULL;
548 
549 	unix_state_unlock(sk);
550 
551 	wake_up_interruptible_all(&u->peer_wait);
552 
553 	if (skpair != NULL) {
554 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
555 			unix_state_lock(skpair);
556 			/* No more writes */
557 			skpair->sk_shutdown = SHUTDOWN_MASK;
558 			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
559 				skpair->sk_err = ECONNRESET;
560 			unix_state_unlock(skpair);
561 			skpair->sk_state_change(skpair);
562 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
563 		}
564 
565 		unix_dgram_peer_wake_disconnect(sk, skpair);
566 		sock_put(skpair); /* It may now die */
567 	}
568 
569 	/* Try to flush out this socket. Throw out buffers at least */
570 
571 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
572 		if (state == TCP_LISTEN)
573 			unix_release_sock(skb->sk, 1);
574 		/* passed fds are erased in the kfree_skb hook	      */
575 		UNIXCB(skb).consumed = skb->len;
576 		kfree_skb(skb);
577 	}
578 
579 	if (path.dentry)
580 		path_put(&path);
581 
582 	sock_put(sk);
583 
584 	/* ---- Socket is dead now and most probably destroyed ---- */
585 
586 	/*
587 	 * Fixme: BSD difference: In BSD all sockets connected to us get
588 	 *	  ECONNRESET and we die on the spot. In Linux we behave
589 	 *	  like files and pipes do and wait for the last
590 	 *	  dereference.
591 	 *
592 	 * Can't we simply set sock->err?
593 	 *
594 	 *	  What the above comment does talk about? --ANK(980817)
595 	 */
596 
597 	if (unix_tot_inflight)
598 		unix_gc();		/* Garbage collect fds */
599 }
600 
601 static void init_peercred(struct sock *sk)
602 {
603 	put_pid(sk->sk_peer_pid);
604 	if (sk->sk_peer_cred)
605 		put_cred(sk->sk_peer_cred);
606 	sk->sk_peer_pid  = get_pid(task_tgid(current));
607 	sk->sk_peer_cred = get_current_cred();
608 }
609 
610 static void copy_peercred(struct sock *sk, struct sock *peersk)
611 {
612 	put_pid(sk->sk_peer_pid);
613 	if (sk->sk_peer_cred)
614 		put_cred(sk->sk_peer_cred);
615 	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
616 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
617 }
618 
619 static int unix_listen(struct socket *sock, int backlog)
620 {
621 	int err;
622 	struct sock *sk = sock->sk;
623 	struct unix_sock *u = unix_sk(sk);
624 
625 	err = -EOPNOTSUPP;
626 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
627 		goto out;	/* Only stream/seqpacket sockets accept */
628 	err = -EINVAL;
629 	if (!u->addr)
630 		goto out;	/* No listens on an unbound socket */
631 	unix_state_lock(sk);
632 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
633 		goto out_unlock;
634 	if (backlog > sk->sk_max_ack_backlog)
635 		wake_up_interruptible_all(&u->peer_wait);
636 	sk->sk_max_ack_backlog	= backlog;
637 	sk->sk_state		= TCP_LISTEN;
638 	/* set credentials so connect can copy them */
639 	init_peercred(sk);
640 	err = 0;
641 
642 out_unlock:
643 	unix_state_unlock(sk);
644 out:
645 	return err;
646 }
647 
648 static int unix_release(struct socket *);
649 static int unix_bind(struct socket *, struct sockaddr *, int);
650 static int unix_stream_connect(struct socket *, struct sockaddr *,
651 			       int addr_len, int flags);
652 static int unix_socketpair(struct socket *, struct socket *);
653 static int unix_accept(struct socket *, struct socket *, int, bool);
654 static int unix_getname(struct socket *, struct sockaddr *, int);
655 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
656 static __poll_t unix_dgram_poll(struct file *, struct socket *,
657 				    poll_table *);
658 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
659 #ifdef CONFIG_COMPAT
660 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
661 #endif
662 static int unix_shutdown(struct socket *, int);
663 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
664 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
665 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
666 				    size_t size, int flags);
667 static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
668 				       struct pipe_inode_info *, size_t size,
669 				       unsigned int flags);
670 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
671 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
672 static int unix_dgram_connect(struct socket *, struct sockaddr *,
673 			      int, int);
674 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
675 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
676 				  int);
677 
678 static int unix_set_peek_off(struct sock *sk, int val)
679 {
680 	struct unix_sock *u = unix_sk(sk);
681 
682 	if (mutex_lock_interruptible(&u->iolock))
683 		return -EINTR;
684 
685 	sk->sk_peek_off = val;
686 	mutex_unlock(&u->iolock);
687 
688 	return 0;
689 }
690 
691 #ifdef CONFIG_PROC_FS
692 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
693 {
694 	struct sock *sk = sock->sk;
695 	struct unix_sock *u;
696 
697 	if (sk) {
698 		u = unix_sk(sock->sk);
699 		seq_printf(m, "scm_fds: %u\n",
700 			   atomic_read(&u->scm_stat.nr_fds));
701 	}
702 }
703 #else
704 #define unix_show_fdinfo NULL
705 #endif
706 
707 static const struct proto_ops unix_stream_ops = {
708 	.family =	PF_UNIX,
709 	.owner =	THIS_MODULE,
710 	.release =	unix_release,
711 	.bind =		unix_bind,
712 	.connect =	unix_stream_connect,
713 	.socketpair =	unix_socketpair,
714 	.accept =	unix_accept,
715 	.getname =	unix_getname,
716 	.poll =		unix_poll,
717 	.ioctl =	unix_ioctl,
718 #ifdef CONFIG_COMPAT
719 	.compat_ioctl =	unix_compat_ioctl,
720 #endif
721 	.listen =	unix_listen,
722 	.shutdown =	unix_shutdown,
723 	.sendmsg =	unix_stream_sendmsg,
724 	.recvmsg =	unix_stream_recvmsg,
725 	.mmap =		sock_no_mmap,
726 	.sendpage =	unix_stream_sendpage,
727 	.splice_read =	unix_stream_splice_read,
728 	.set_peek_off =	unix_set_peek_off,
729 	.show_fdinfo =	unix_show_fdinfo,
730 };
731 
732 static const struct proto_ops unix_dgram_ops = {
733 	.family =	PF_UNIX,
734 	.owner =	THIS_MODULE,
735 	.release =	unix_release,
736 	.bind =		unix_bind,
737 	.connect =	unix_dgram_connect,
738 	.socketpair =	unix_socketpair,
739 	.accept =	sock_no_accept,
740 	.getname =	unix_getname,
741 	.poll =		unix_dgram_poll,
742 	.ioctl =	unix_ioctl,
743 #ifdef CONFIG_COMPAT
744 	.compat_ioctl =	unix_compat_ioctl,
745 #endif
746 	.listen =	sock_no_listen,
747 	.shutdown =	unix_shutdown,
748 	.sendmsg =	unix_dgram_sendmsg,
749 	.recvmsg =	unix_dgram_recvmsg,
750 	.mmap =		sock_no_mmap,
751 	.sendpage =	sock_no_sendpage,
752 	.set_peek_off =	unix_set_peek_off,
753 	.show_fdinfo =	unix_show_fdinfo,
754 };
755 
756 static const struct proto_ops unix_seqpacket_ops = {
757 	.family =	PF_UNIX,
758 	.owner =	THIS_MODULE,
759 	.release =	unix_release,
760 	.bind =		unix_bind,
761 	.connect =	unix_stream_connect,
762 	.socketpair =	unix_socketpair,
763 	.accept =	unix_accept,
764 	.getname =	unix_getname,
765 	.poll =		unix_dgram_poll,
766 	.ioctl =	unix_ioctl,
767 #ifdef CONFIG_COMPAT
768 	.compat_ioctl =	unix_compat_ioctl,
769 #endif
770 	.listen =	unix_listen,
771 	.shutdown =	unix_shutdown,
772 	.sendmsg =	unix_seqpacket_sendmsg,
773 	.recvmsg =	unix_seqpacket_recvmsg,
774 	.mmap =		sock_no_mmap,
775 	.sendpage =	sock_no_sendpage,
776 	.set_peek_off =	unix_set_peek_off,
777 	.show_fdinfo =	unix_show_fdinfo,
778 };
779 
780 static struct proto unix_proto = {
781 	.name			= "UNIX",
782 	.owner			= THIS_MODULE,
783 	.obj_size		= sizeof(struct unix_sock),
784 };
785 
786 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
787 {
788 	struct sock *sk = NULL;
789 	struct unix_sock *u;
790 
791 	atomic_long_inc(&unix_nr_socks);
792 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
793 		goto out;
794 
795 	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
796 	if (!sk)
797 		goto out;
798 
799 	sock_init_data(sock, sk);
800 
801 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
802 	sk->sk_write_space	= unix_write_space;
803 	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
804 	sk->sk_destruct		= unix_sock_destructor;
805 	u	  = unix_sk(sk);
806 	u->path.dentry = NULL;
807 	u->path.mnt = NULL;
808 	spin_lock_init(&u->lock);
809 	atomic_long_set(&u->inflight, 0);
810 	INIT_LIST_HEAD(&u->link);
811 	mutex_init(&u->iolock); /* single task reading lock */
812 	mutex_init(&u->bindlock); /* single task binding lock */
813 	init_waitqueue_head(&u->peer_wait);
814 	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
815 	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
816 	unix_insert_socket(unix_sockets_unbound(sk), sk);
817 out:
818 	if (sk == NULL)
819 		atomic_long_dec(&unix_nr_socks);
820 	else {
821 		local_bh_disable();
822 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
823 		local_bh_enable();
824 	}
825 	return sk;
826 }
827 
828 static int unix_create(struct net *net, struct socket *sock, int protocol,
829 		       int kern)
830 {
831 	if (protocol && protocol != PF_UNIX)
832 		return -EPROTONOSUPPORT;
833 
834 	sock->state = SS_UNCONNECTED;
835 
836 	switch (sock->type) {
837 	case SOCK_STREAM:
838 		sock->ops = &unix_stream_ops;
839 		break;
840 		/*
841 		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
842 		 *	nothing uses it.
843 		 */
844 	case SOCK_RAW:
845 		sock->type = SOCK_DGRAM;
846 		fallthrough;
847 	case SOCK_DGRAM:
848 		sock->ops = &unix_dgram_ops;
849 		break;
850 	case SOCK_SEQPACKET:
851 		sock->ops = &unix_seqpacket_ops;
852 		break;
853 	default:
854 		return -ESOCKTNOSUPPORT;
855 	}
856 
857 	return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
858 }
859 
860 static int unix_release(struct socket *sock)
861 {
862 	struct sock *sk = sock->sk;
863 
864 	if (!sk)
865 		return 0;
866 
867 	unix_release_sock(sk, 0);
868 	sock->sk = NULL;
869 
870 	return 0;
871 }
872 
873 static int unix_autobind(struct socket *sock)
874 {
875 	struct sock *sk = sock->sk;
876 	struct net *net = sock_net(sk);
877 	struct unix_sock *u = unix_sk(sk);
878 	static u32 ordernum = 1;
879 	struct unix_address *addr;
880 	int err;
881 	unsigned int retries = 0;
882 
883 	err = mutex_lock_interruptible(&u->bindlock);
884 	if (err)
885 		return err;
886 
887 	if (u->addr)
888 		goto out;
889 
890 	err = -ENOMEM;
891 	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
892 	if (!addr)
893 		goto out;
894 
895 	addr->name->sun_family = AF_UNIX;
896 	refcount_set(&addr->refcnt, 1);
897 
898 retry:
899 	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
900 	addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
901 	addr->hash ^= sk->sk_type;
902 
903 	spin_lock(&unix_table_lock);
904 	ordernum = (ordernum+1)&0xFFFFF;
905 
906 	if (__unix_find_socket_byname(net, addr->name, addr->len, addr->hash)) {
907 		spin_unlock(&unix_table_lock);
908 		/*
909 		 * __unix_find_socket_byname() may take long time if many names
910 		 * are already in use.
911 		 */
912 		cond_resched();
913 		/* Give up if all names seems to be in use. */
914 		if (retries++ == 0xFFFFF) {
915 			err = -ENOSPC;
916 			kfree(addr);
917 			goto out;
918 		}
919 		goto retry;
920 	}
921 
922 	__unix_set_addr(sk, addr, addr->hash);
923 	spin_unlock(&unix_table_lock);
924 	err = 0;
925 
926 out:	mutex_unlock(&u->bindlock);
927 	return err;
928 }
929 
930 static struct sock *unix_find_other(struct net *net,
931 				    struct sockaddr_un *sunname, int len,
932 				    int type, unsigned int hash, int *error)
933 {
934 	struct sock *u;
935 	struct path path;
936 	int err = 0;
937 
938 	if (sunname->sun_path[0]) {
939 		struct inode *inode;
940 		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
941 		if (err)
942 			goto fail;
943 		inode = d_backing_inode(path.dentry);
944 		err = path_permission(&path, MAY_WRITE);
945 		if (err)
946 			goto put_fail;
947 
948 		err = -ECONNREFUSED;
949 		if (!S_ISSOCK(inode->i_mode))
950 			goto put_fail;
951 		u = unix_find_socket_byinode(inode);
952 		if (!u)
953 			goto put_fail;
954 
955 		if (u->sk_type == type)
956 			touch_atime(&path);
957 
958 		path_put(&path);
959 
960 		err = -EPROTOTYPE;
961 		if (u->sk_type != type) {
962 			sock_put(u);
963 			goto fail;
964 		}
965 	} else {
966 		err = -ECONNREFUSED;
967 		u = unix_find_socket_byname(net, sunname, len, type ^ hash);
968 		if (u) {
969 			struct dentry *dentry;
970 			dentry = unix_sk(u)->path.dentry;
971 			if (dentry)
972 				touch_atime(&unix_sk(u)->path);
973 		} else
974 			goto fail;
975 	}
976 	return u;
977 
978 put_fail:
979 	path_put(&path);
980 fail:
981 	*error = err;
982 	return NULL;
983 }
984 
985 static int unix_bind_bsd(struct sock *sk, struct unix_address *addr)
986 {
987 	struct unix_sock *u = unix_sk(sk);
988 	umode_t mode = S_IFSOCK |
989 	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
990 	struct user_namespace *ns; // barf...
991 	struct path parent;
992 	struct dentry *dentry;
993 	unsigned int hash;
994 	int err;
995 
996 	/*
997 	 * Get the parent directory, calculate the hash for last
998 	 * component.
999 	 */
1000 	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1001 	if (IS_ERR(dentry))
1002 		return PTR_ERR(dentry);
1003 	ns = mnt_user_ns(parent.mnt);
1004 
1005 	/*
1006 	 * All right, let's create it.
1007 	 */
1008 	err = security_path_mknod(&parent, dentry, mode, 0);
1009 	if (!err)
1010 		err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
1011 	if (err)
1012 		goto out;
1013 	err = mutex_lock_interruptible(&u->bindlock);
1014 	if (err)
1015 		goto out_unlink;
1016 	if (u->addr)
1017 		goto out_unlock;
1018 
1019 	addr->hash = UNIX_HASH_SIZE;
1020 	hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1021 	spin_lock(&unix_table_lock);
1022 	u->path.mnt = mntget(parent.mnt);
1023 	u->path.dentry = dget(dentry);
1024 	__unix_set_addr(sk, addr, hash);
1025 	spin_unlock(&unix_table_lock);
1026 	mutex_unlock(&u->bindlock);
1027 	done_path_create(&parent, dentry);
1028 	return 0;
1029 
1030 out_unlock:
1031 	mutex_unlock(&u->bindlock);
1032 	err = -EINVAL;
1033 out_unlink:
1034 	/* failed after successful mknod?  unlink what we'd created... */
1035 	vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
1036 out:
1037 	done_path_create(&parent, dentry);
1038 	return err;
1039 }
1040 
1041 static int unix_bind_abstract(struct sock *sk, struct unix_address *addr)
1042 {
1043 	struct unix_sock *u = unix_sk(sk);
1044 	int err;
1045 
1046 	err = mutex_lock_interruptible(&u->bindlock);
1047 	if (err)
1048 		return err;
1049 
1050 	if (u->addr) {
1051 		mutex_unlock(&u->bindlock);
1052 		return -EINVAL;
1053 	}
1054 
1055 	spin_lock(&unix_table_lock);
1056 	if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
1057 				      addr->hash)) {
1058 		spin_unlock(&unix_table_lock);
1059 		mutex_unlock(&u->bindlock);
1060 		return -EADDRINUSE;
1061 	}
1062 	__unix_set_addr(sk, addr, addr->hash);
1063 	spin_unlock(&unix_table_lock);
1064 	mutex_unlock(&u->bindlock);
1065 	return 0;
1066 }
1067 
1068 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1069 {
1070 	struct sock *sk = sock->sk;
1071 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1072 	char *sun_path = sunaddr->sun_path;
1073 	int err;
1074 	unsigned int hash;
1075 	struct unix_address *addr;
1076 
1077 	if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
1078 	    sunaddr->sun_family != AF_UNIX)
1079 		return -EINVAL;
1080 
1081 	if (addr_len == sizeof(short))
1082 		return unix_autobind(sock);
1083 
1084 	err = unix_mkname(sunaddr, addr_len, &hash);
1085 	if (err < 0)
1086 		return err;
1087 	addr_len = err;
1088 	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1089 	if (!addr)
1090 		return -ENOMEM;
1091 
1092 	memcpy(addr->name, sunaddr, addr_len);
1093 	addr->len = addr_len;
1094 	addr->hash = hash ^ sk->sk_type;
1095 	refcount_set(&addr->refcnt, 1);
1096 
1097 	if (sun_path[0])
1098 		err = unix_bind_bsd(sk, addr);
1099 	else
1100 		err = unix_bind_abstract(sk, addr);
1101 	if (err)
1102 		unix_release_addr(addr);
1103 	return err == -EEXIST ? -EADDRINUSE : err;
1104 }
1105 
1106 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1107 {
1108 	if (unlikely(sk1 == sk2) || !sk2) {
1109 		unix_state_lock(sk1);
1110 		return;
1111 	}
1112 	if (sk1 < sk2) {
1113 		unix_state_lock(sk1);
1114 		unix_state_lock_nested(sk2);
1115 	} else {
1116 		unix_state_lock(sk2);
1117 		unix_state_lock_nested(sk1);
1118 	}
1119 }
1120 
1121 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1122 {
1123 	if (unlikely(sk1 == sk2) || !sk2) {
1124 		unix_state_unlock(sk1);
1125 		return;
1126 	}
1127 	unix_state_unlock(sk1);
1128 	unix_state_unlock(sk2);
1129 }
1130 
1131 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1132 			      int alen, int flags)
1133 {
1134 	struct sock *sk = sock->sk;
1135 	struct net *net = sock_net(sk);
1136 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1137 	struct sock *other;
1138 	unsigned int hash;
1139 	int err;
1140 
1141 	err = -EINVAL;
1142 	if (alen < offsetofend(struct sockaddr, sa_family))
1143 		goto out;
1144 
1145 	if (addr->sa_family != AF_UNSPEC) {
1146 		err = unix_mkname(sunaddr, alen, &hash);
1147 		if (err < 0)
1148 			goto out;
1149 		alen = err;
1150 
1151 		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1152 		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1153 			goto out;
1154 
1155 restart:
1156 		other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1157 		if (!other)
1158 			goto out;
1159 
1160 		unix_state_double_lock(sk, other);
1161 
1162 		/* Apparently VFS overslept socket death. Retry. */
1163 		if (sock_flag(other, SOCK_DEAD)) {
1164 			unix_state_double_unlock(sk, other);
1165 			sock_put(other);
1166 			goto restart;
1167 		}
1168 
1169 		err = -EPERM;
1170 		if (!unix_may_send(sk, other))
1171 			goto out_unlock;
1172 
1173 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1174 		if (err)
1175 			goto out_unlock;
1176 
1177 	} else {
1178 		/*
1179 		 *	1003.1g breaking connected state with AF_UNSPEC
1180 		 */
1181 		other = NULL;
1182 		unix_state_double_lock(sk, other);
1183 	}
1184 
1185 	/*
1186 	 * If it was connected, reconnect.
1187 	 */
1188 	if (unix_peer(sk)) {
1189 		struct sock *old_peer = unix_peer(sk);
1190 		unix_peer(sk) = other;
1191 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1192 
1193 		unix_state_double_unlock(sk, other);
1194 
1195 		if (other != old_peer)
1196 			unix_dgram_disconnected(sk, old_peer);
1197 		sock_put(old_peer);
1198 	} else {
1199 		unix_peer(sk) = other;
1200 		unix_state_double_unlock(sk, other);
1201 	}
1202 	return 0;
1203 
1204 out_unlock:
1205 	unix_state_double_unlock(sk, other);
1206 	sock_put(other);
1207 out:
1208 	return err;
1209 }
1210 
1211 static long unix_wait_for_peer(struct sock *other, long timeo)
1212 	__releases(&unix_sk(other)->lock)
1213 {
1214 	struct unix_sock *u = unix_sk(other);
1215 	int sched;
1216 	DEFINE_WAIT(wait);
1217 
1218 	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1219 
1220 	sched = !sock_flag(other, SOCK_DEAD) &&
1221 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1222 		unix_recvq_full(other);
1223 
1224 	unix_state_unlock(other);
1225 
1226 	if (sched)
1227 		timeo = schedule_timeout(timeo);
1228 
1229 	finish_wait(&u->peer_wait, &wait);
1230 	return timeo;
1231 }
1232 
1233 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1234 			       int addr_len, int flags)
1235 {
1236 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1237 	struct sock *sk = sock->sk;
1238 	struct net *net = sock_net(sk);
1239 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1240 	struct sock *newsk = NULL;
1241 	struct sock *other = NULL;
1242 	struct sk_buff *skb = NULL;
1243 	unsigned int hash;
1244 	int st;
1245 	int err;
1246 	long timeo;
1247 
1248 	err = unix_mkname(sunaddr, addr_len, &hash);
1249 	if (err < 0)
1250 		goto out;
1251 	addr_len = err;
1252 
1253 	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1254 	    (err = unix_autobind(sock)) != 0)
1255 		goto out;
1256 
1257 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1258 
1259 	/* First of all allocate resources.
1260 	   If we will make it after state is locked,
1261 	   we will have to recheck all again in any case.
1262 	 */
1263 
1264 	err = -ENOMEM;
1265 
1266 	/* create new sock for complete connection */
1267 	newsk = unix_create1(sock_net(sk), NULL, 0);
1268 	if (newsk == NULL)
1269 		goto out;
1270 
1271 	/* Allocate skb for sending to listening sock */
1272 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1273 	if (skb == NULL)
1274 		goto out;
1275 
1276 restart:
1277 	/*  Find listening sock. */
1278 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1279 	if (!other)
1280 		goto out;
1281 
1282 	/* Latch state of peer */
1283 	unix_state_lock(other);
1284 
1285 	/* Apparently VFS overslept socket death. Retry. */
1286 	if (sock_flag(other, SOCK_DEAD)) {
1287 		unix_state_unlock(other);
1288 		sock_put(other);
1289 		goto restart;
1290 	}
1291 
1292 	err = -ECONNREFUSED;
1293 	if (other->sk_state != TCP_LISTEN)
1294 		goto out_unlock;
1295 	if (other->sk_shutdown & RCV_SHUTDOWN)
1296 		goto out_unlock;
1297 
1298 	if (unix_recvq_full(other)) {
1299 		err = -EAGAIN;
1300 		if (!timeo)
1301 			goto out_unlock;
1302 
1303 		timeo = unix_wait_for_peer(other, timeo);
1304 
1305 		err = sock_intr_errno(timeo);
1306 		if (signal_pending(current))
1307 			goto out;
1308 		sock_put(other);
1309 		goto restart;
1310 	}
1311 
1312 	/* Latch our state.
1313 
1314 	   It is tricky place. We need to grab our state lock and cannot
1315 	   drop lock on peer. It is dangerous because deadlock is
1316 	   possible. Connect to self case and simultaneous
1317 	   attempt to connect are eliminated by checking socket
1318 	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1319 	   check this before attempt to grab lock.
1320 
1321 	   Well, and we have to recheck the state after socket locked.
1322 	 */
1323 	st = sk->sk_state;
1324 
1325 	switch (st) {
1326 	case TCP_CLOSE:
1327 		/* This is ok... continue with connect */
1328 		break;
1329 	case TCP_ESTABLISHED:
1330 		/* Socket is already connected */
1331 		err = -EISCONN;
1332 		goto out_unlock;
1333 	default:
1334 		err = -EINVAL;
1335 		goto out_unlock;
1336 	}
1337 
1338 	unix_state_lock_nested(sk);
1339 
1340 	if (sk->sk_state != st) {
1341 		unix_state_unlock(sk);
1342 		unix_state_unlock(other);
1343 		sock_put(other);
1344 		goto restart;
1345 	}
1346 
1347 	err = security_unix_stream_connect(sk, other, newsk);
1348 	if (err) {
1349 		unix_state_unlock(sk);
1350 		goto out_unlock;
1351 	}
1352 
1353 	/* The way is open! Fastly set all the necessary fields... */
1354 
1355 	sock_hold(sk);
1356 	unix_peer(newsk)	= sk;
1357 	newsk->sk_state		= TCP_ESTABLISHED;
1358 	newsk->sk_type		= sk->sk_type;
1359 	init_peercred(newsk);
1360 	newu = unix_sk(newsk);
1361 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1362 	otheru = unix_sk(other);
1363 
1364 	/* copy address information from listening to new sock
1365 	 *
1366 	 * The contents of *(otheru->addr) and otheru->path
1367 	 * are seen fully set up here, since we have found
1368 	 * otheru in hash under unix_table_lock.  Insertion
1369 	 * into the hash chain we'd found it in had been done
1370 	 * in an earlier critical area protected by unix_table_lock,
1371 	 * the same one where we'd set *(otheru->addr) contents,
1372 	 * as well as otheru->path and otheru->addr itself.
1373 	 *
1374 	 * Using smp_store_release() here to set newu->addr
1375 	 * is enough to make those stores, as well as stores
1376 	 * to newu->path visible to anyone who gets newu->addr
1377 	 * by smp_load_acquire().  IOW, the same warranties
1378 	 * as for unix_sock instances bound in unix_bind() or
1379 	 * in unix_autobind().
1380 	 */
1381 	if (otheru->path.dentry) {
1382 		path_get(&otheru->path);
1383 		newu->path = otheru->path;
1384 	}
1385 	refcount_inc(&otheru->addr->refcnt);
1386 	smp_store_release(&newu->addr, otheru->addr);
1387 
1388 	/* Set credentials */
1389 	copy_peercred(sk, other);
1390 
1391 	sock->state	= SS_CONNECTED;
1392 	sk->sk_state	= TCP_ESTABLISHED;
1393 	sock_hold(newsk);
1394 
1395 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1396 	unix_peer(sk)	= newsk;
1397 
1398 	unix_state_unlock(sk);
1399 
1400 	/* take ten and send info to listening sock */
1401 	spin_lock(&other->sk_receive_queue.lock);
1402 	__skb_queue_tail(&other->sk_receive_queue, skb);
1403 	spin_unlock(&other->sk_receive_queue.lock);
1404 	unix_state_unlock(other);
1405 	other->sk_data_ready(other);
1406 	sock_put(other);
1407 	return 0;
1408 
1409 out_unlock:
1410 	if (other)
1411 		unix_state_unlock(other);
1412 
1413 out:
1414 	kfree_skb(skb);
1415 	if (newsk)
1416 		unix_release_sock(newsk, 0);
1417 	if (other)
1418 		sock_put(other);
1419 	return err;
1420 }
1421 
1422 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1423 {
1424 	struct sock *ska = socka->sk, *skb = sockb->sk;
1425 
1426 	/* Join our sockets back to back */
1427 	sock_hold(ska);
1428 	sock_hold(skb);
1429 	unix_peer(ska) = skb;
1430 	unix_peer(skb) = ska;
1431 	init_peercred(ska);
1432 	init_peercred(skb);
1433 
1434 	if (ska->sk_type != SOCK_DGRAM) {
1435 		ska->sk_state = TCP_ESTABLISHED;
1436 		skb->sk_state = TCP_ESTABLISHED;
1437 		socka->state  = SS_CONNECTED;
1438 		sockb->state  = SS_CONNECTED;
1439 	}
1440 	return 0;
1441 }
1442 
1443 static void unix_sock_inherit_flags(const struct socket *old,
1444 				    struct socket *new)
1445 {
1446 	if (test_bit(SOCK_PASSCRED, &old->flags))
1447 		set_bit(SOCK_PASSCRED, &new->flags);
1448 	if (test_bit(SOCK_PASSSEC, &old->flags))
1449 		set_bit(SOCK_PASSSEC, &new->flags);
1450 }
1451 
1452 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1453 		       bool kern)
1454 {
1455 	struct sock *sk = sock->sk;
1456 	struct sock *tsk;
1457 	struct sk_buff *skb;
1458 	int err;
1459 
1460 	err = -EOPNOTSUPP;
1461 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1462 		goto out;
1463 
1464 	err = -EINVAL;
1465 	if (sk->sk_state != TCP_LISTEN)
1466 		goto out;
1467 
1468 	/* If socket state is TCP_LISTEN it cannot change (for now...),
1469 	 * so that no locks are necessary.
1470 	 */
1471 
1472 	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1473 	if (!skb) {
1474 		/* This means receive shutdown. */
1475 		if (err == 0)
1476 			err = -EINVAL;
1477 		goto out;
1478 	}
1479 
1480 	tsk = skb->sk;
1481 	skb_free_datagram(sk, skb);
1482 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1483 
1484 	/* attach accepted sock to socket */
1485 	unix_state_lock(tsk);
1486 	newsock->state = SS_CONNECTED;
1487 	unix_sock_inherit_flags(sock, newsock);
1488 	sock_graft(tsk, newsock);
1489 	unix_state_unlock(tsk);
1490 	return 0;
1491 
1492 out:
1493 	return err;
1494 }
1495 
1496 
1497 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1498 {
1499 	struct sock *sk = sock->sk;
1500 	struct unix_address *addr;
1501 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1502 	int err = 0;
1503 
1504 	if (peer) {
1505 		sk = unix_peer_get(sk);
1506 
1507 		err = -ENOTCONN;
1508 		if (!sk)
1509 			goto out;
1510 		err = 0;
1511 	} else {
1512 		sock_hold(sk);
1513 	}
1514 
1515 	addr = smp_load_acquire(&unix_sk(sk)->addr);
1516 	if (!addr) {
1517 		sunaddr->sun_family = AF_UNIX;
1518 		sunaddr->sun_path[0] = 0;
1519 		err = sizeof(short);
1520 	} else {
1521 		err = addr->len;
1522 		memcpy(sunaddr, addr->name, addr->len);
1523 	}
1524 	sock_put(sk);
1525 out:
1526 	return err;
1527 }
1528 
1529 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1530 {
1531 	int err = 0;
1532 
1533 	UNIXCB(skb).pid  = get_pid(scm->pid);
1534 	UNIXCB(skb).uid = scm->creds.uid;
1535 	UNIXCB(skb).gid = scm->creds.gid;
1536 	UNIXCB(skb).fp = NULL;
1537 	unix_get_secdata(scm, skb);
1538 	if (scm->fp && send_fds)
1539 		err = unix_attach_fds(scm, skb);
1540 
1541 	skb->destructor = unix_destruct_scm;
1542 	return err;
1543 }
1544 
1545 static bool unix_passcred_enabled(const struct socket *sock,
1546 				  const struct sock *other)
1547 {
1548 	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1549 	       !other->sk_socket ||
1550 	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1551 }
1552 
1553 /*
1554  * Some apps rely on write() giving SCM_CREDENTIALS
1555  * We include credentials if source or destination socket
1556  * asserted SOCK_PASSCRED.
1557  */
1558 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1559 			    const struct sock *other)
1560 {
1561 	if (UNIXCB(skb).pid)
1562 		return;
1563 	if (unix_passcred_enabled(sock, other)) {
1564 		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1565 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1566 	}
1567 }
1568 
1569 static int maybe_init_creds(struct scm_cookie *scm,
1570 			    struct socket *socket,
1571 			    const struct sock *other)
1572 {
1573 	int err;
1574 	struct msghdr msg = { .msg_controllen = 0 };
1575 
1576 	err = scm_send(socket, &msg, scm, false);
1577 	if (err)
1578 		return err;
1579 
1580 	if (unix_passcred_enabled(socket, other)) {
1581 		scm->pid = get_pid(task_tgid(current));
1582 		current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1583 	}
1584 	return err;
1585 }
1586 
1587 static bool unix_skb_scm_eq(struct sk_buff *skb,
1588 			    struct scm_cookie *scm)
1589 {
1590 	const struct unix_skb_parms *u = &UNIXCB(skb);
1591 
1592 	return u->pid == scm->pid &&
1593 	       uid_eq(u->uid, scm->creds.uid) &&
1594 	       gid_eq(u->gid, scm->creds.gid) &&
1595 	       unix_secdata_eq(scm, skb);
1596 }
1597 
1598 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1599 {
1600 	struct scm_fp_list *fp = UNIXCB(skb).fp;
1601 	struct unix_sock *u = unix_sk(sk);
1602 
1603 	if (unlikely(fp && fp->count))
1604 		atomic_add(fp->count, &u->scm_stat.nr_fds);
1605 }
1606 
1607 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1608 {
1609 	struct scm_fp_list *fp = UNIXCB(skb).fp;
1610 	struct unix_sock *u = unix_sk(sk);
1611 
1612 	if (unlikely(fp && fp->count))
1613 		atomic_sub(fp->count, &u->scm_stat.nr_fds);
1614 }
1615 
1616 /*
1617  *	Send AF_UNIX data.
1618  */
1619 
1620 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1621 			      size_t len)
1622 {
1623 	struct sock *sk = sock->sk;
1624 	struct net *net = sock_net(sk);
1625 	struct unix_sock *u = unix_sk(sk);
1626 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1627 	struct sock *other = NULL;
1628 	int namelen = 0; /* fake GCC */
1629 	int err;
1630 	unsigned int hash;
1631 	struct sk_buff *skb;
1632 	long timeo;
1633 	struct scm_cookie scm;
1634 	int data_len = 0;
1635 	int sk_locked;
1636 
1637 	wait_for_unix_gc();
1638 	err = scm_send(sock, msg, &scm, false);
1639 	if (err < 0)
1640 		return err;
1641 
1642 	err = -EOPNOTSUPP;
1643 	if (msg->msg_flags&MSG_OOB)
1644 		goto out;
1645 
1646 	if (msg->msg_namelen) {
1647 		err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1648 		if (err < 0)
1649 			goto out;
1650 		namelen = err;
1651 	} else {
1652 		sunaddr = NULL;
1653 		err = -ENOTCONN;
1654 		other = unix_peer_get(sk);
1655 		if (!other)
1656 			goto out;
1657 	}
1658 
1659 	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1660 	    && (err = unix_autobind(sock)) != 0)
1661 		goto out;
1662 
1663 	err = -EMSGSIZE;
1664 	if (len > sk->sk_sndbuf - 32)
1665 		goto out;
1666 
1667 	if (len > SKB_MAX_ALLOC) {
1668 		data_len = min_t(size_t,
1669 				 len - SKB_MAX_ALLOC,
1670 				 MAX_SKB_FRAGS * PAGE_SIZE);
1671 		data_len = PAGE_ALIGN(data_len);
1672 
1673 		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1674 	}
1675 
1676 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1677 				   msg->msg_flags & MSG_DONTWAIT, &err,
1678 				   PAGE_ALLOC_COSTLY_ORDER);
1679 	if (skb == NULL)
1680 		goto out;
1681 
1682 	err = unix_scm_to_skb(&scm, skb, true);
1683 	if (err < 0)
1684 		goto out_free;
1685 
1686 	skb_put(skb, len - data_len);
1687 	skb->data_len = data_len;
1688 	skb->len = len;
1689 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1690 	if (err)
1691 		goto out_free;
1692 
1693 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1694 
1695 restart:
1696 	if (!other) {
1697 		err = -ECONNRESET;
1698 		if (sunaddr == NULL)
1699 			goto out_free;
1700 
1701 		other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1702 					hash, &err);
1703 		if (other == NULL)
1704 			goto out_free;
1705 	}
1706 
1707 	if (sk_filter(other, skb) < 0) {
1708 		/* Toss the packet but do not return any error to the sender */
1709 		err = len;
1710 		goto out_free;
1711 	}
1712 
1713 	sk_locked = 0;
1714 	unix_state_lock(other);
1715 restart_locked:
1716 	err = -EPERM;
1717 	if (!unix_may_send(sk, other))
1718 		goto out_unlock;
1719 
1720 	if (unlikely(sock_flag(other, SOCK_DEAD))) {
1721 		/*
1722 		 *	Check with 1003.1g - what should
1723 		 *	datagram error
1724 		 */
1725 		unix_state_unlock(other);
1726 		sock_put(other);
1727 
1728 		if (!sk_locked)
1729 			unix_state_lock(sk);
1730 
1731 		err = 0;
1732 		if (unix_peer(sk) == other) {
1733 			unix_peer(sk) = NULL;
1734 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1735 
1736 			unix_state_unlock(sk);
1737 
1738 			unix_dgram_disconnected(sk, other);
1739 			sock_put(other);
1740 			err = -ECONNREFUSED;
1741 		} else {
1742 			unix_state_unlock(sk);
1743 		}
1744 
1745 		other = NULL;
1746 		if (err)
1747 			goto out_free;
1748 		goto restart;
1749 	}
1750 
1751 	err = -EPIPE;
1752 	if (other->sk_shutdown & RCV_SHUTDOWN)
1753 		goto out_unlock;
1754 
1755 	if (sk->sk_type != SOCK_SEQPACKET) {
1756 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1757 		if (err)
1758 			goto out_unlock;
1759 	}
1760 
1761 	/* other == sk && unix_peer(other) != sk if
1762 	 * - unix_peer(sk) == NULL, destination address bound to sk
1763 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
1764 	 */
1765 	if (other != sk &&
1766 	    unlikely(unix_peer(other) != sk &&
1767 	    unix_recvq_full_lockless(other))) {
1768 		if (timeo) {
1769 			timeo = unix_wait_for_peer(other, timeo);
1770 
1771 			err = sock_intr_errno(timeo);
1772 			if (signal_pending(current))
1773 				goto out_free;
1774 
1775 			goto restart;
1776 		}
1777 
1778 		if (!sk_locked) {
1779 			unix_state_unlock(other);
1780 			unix_state_double_lock(sk, other);
1781 		}
1782 
1783 		if (unix_peer(sk) != other ||
1784 		    unix_dgram_peer_wake_me(sk, other)) {
1785 			err = -EAGAIN;
1786 			sk_locked = 1;
1787 			goto out_unlock;
1788 		}
1789 
1790 		if (!sk_locked) {
1791 			sk_locked = 1;
1792 			goto restart_locked;
1793 		}
1794 	}
1795 
1796 	if (unlikely(sk_locked))
1797 		unix_state_unlock(sk);
1798 
1799 	if (sock_flag(other, SOCK_RCVTSTAMP))
1800 		__net_timestamp(skb);
1801 	maybe_add_creds(skb, sock, other);
1802 	scm_stat_add(other, skb);
1803 	skb_queue_tail(&other->sk_receive_queue, skb);
1804 	unix_state_unlock(other);
1805 	other->sk_data_ready(other);
1806 	sock_put(other);
1807 	scm_destroy(&scm);
1808 	return len;
1809 
1810 out_unlock:
1811 	if (sk_locked)
1812 		unix_state_unlock(sk);
1813 	unix_state_unlock(other);
1814 out_free:
1815 	kfree_skb(skb);
1816 out:
1817 	if (other)
1818 		sock_put(other);
1819 	scm_destroy(&scm);
1820 	return err;
1821 }
1822 
1823 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1824  * bytes, and a minimum of a full page.
1825  */
1826 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1827 
1828 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1829 			       size_t len)
1830 {
1831 	struct sock *sk = sock->sk;
1832 	struct sock *other = NULL;
1833 	int err, size;
1834 	struct sk_buff *skb;
1835 	int sent = 0;
1836 	struct scm_cookie scm;
1837 	bool fds_sent = false;
1838 	int data_len;
1839 
1840 	wait_for_unix_gc();
1841 	err = scm_send(sock, msg, &scm, false);
1842 	if (err < 0)
1843 		return err;
1844 
1845 	err = -EOPNOTSUPP;
1846 	if (msg->msg_flags&MSG_OOB)
1847 		goto out_err;
1848 
1849 	if (msg->msg_namelen) {
1850 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1851 		goto out_err;
1852 	} else {
1853 		err = -ENOTCONN;
1854 		other = unix_peer(sk);
1855 		if (!other)
1856 			goto out_err;
1857 	}
1858 
1859 	if (sk->sk_shutdown & SEND_SHUTDOWN)
1860 		goto pipe_err;
1861 
1862 	while (sent < len) {
1863 		size = len - sent;
1864 
1865 		/* Keep two messages in the pipe so it schedules better */
1866 		size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1867 
1868 		/* allow fallback to order-0 allocations */
1869 		size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1870 
1871 		data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1872 
1873 		data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1874 
1875 		skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1876 					   msg->msg_flags & MSG_DONTWAIT, &err,
1877 					   get_order(UNIX_SKB_FRAGS_SZ));
1878 		if (!skb)
1879 			goto out_err;
1880 
1881 		/* Only send the fds in the first buffer */
1882 		err = unix_scm_to_skb(&scm, skb, !fds_sent);
1883 		if (err < 0) {
1884 			kfree_skb(skb);
1885 			goto out_err;
1886 		}
1887 		fds_sent = true;
1888 
1889 		skb_put(skb, size - data_len);
1890 		skb->data_len = data_len;
1891 		skb->len = size;
1892 		err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1893 		if (err) {
1894 			kfree_skb(skb);
1895 			goto out_err;
1896 		}
1897 
1898 		unix_state_lock(other);
1899 
1900 		if (sock_flag(other, SOCK_DEAD) ||
1901 		    (other->sk_shutdown & RCV_SHUTDOWN))
1902 			goto pipe_err_free;
1903 
1904 		maybe_add_creds(skb, sock, other);
1905 		scm_stat_add(other, skb);
1906 		skb_queue_tail(&other->sk_receive_queue, skb);
1907 		unix_state_unlock(other);
1908 		other->sk_data_ready(other);
1909 		sent += size;
1910 	}
1911 
1912 	scm_destroy(&scm);
1913 
1914 	return sent;
1915 
1916 pipe_err_free:
1917 	unix_state_unlock(other);
1918 	kfree_skb(skb);
1919 pipe_err:
1920 	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1921 		send_sig(SIGPIPE, current, 0);
1922 	err = -EPIPE;
1923 out_err:
1924 	scm_destroy(&scm);
1925 	return sent ? : err;
1926 }
1927 
1928 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1929 				    int offset, size_t size, int flags)
1930 {
1931 	int err;
1932 	bool send_sigpipe = false;
1933 	bool init_scm = true;
1934 	struct scm_cookie scm;
1935 	struct sock *other, *sk = socket->sk;
1936 	struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1937 
1938 	if (flags & MSG_OOB)
1939 		return -EOPNOTSUPP;
1940 
1941 	other = unix_peer(sk);
1942 	if (!other || sk->sk_state != TCP_ESTABLISHED)
1943 		return -ENOTCONN;
1944 
1945 	if (false) {
1946 alloc_skb:
1947 		unix_state_unlock(other);
1948 		mutex_unlock(&unix_sk(other)->iolock);
1949 		newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1950 					      &err, 0);
1951 		if (!newskb)
1952 			goto err;
1953 	}
1954 
1955 	/* we must acquire iolock as we modify already present
1956 	 * skbs in the sk_receive_queue and mess with skb->len
1957 	 */
1958 	err = mutex_lock_interruptible(&unix_sk(other)->iolock);
1959 	if (err) {
1960 		err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1961 		goto err;
1962 	}
1963 
1964 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1965 		err = -EPIPE;
1966 		send_sigpipe = true;
1967 		goto err_unlock;
1968 	}
1969 
1970 	unix_state_lock(other);
1971 
1972 	if (sock_flag(other, SOCK_DEAD) ||
1973 	    other->sk_shutdown & RCV_SHUTDOWN) {
1974 		err = -EPIPE;
1975 		send_sigpipe = true;
1976 		goto err_state_unlock;
1977 	}
1978 
1979 	if (init_scm) {
1980 		err = maybe_init_creds(&scm, socket, other);
1981 		if (err)
1982 			goto err_state_unlock;
1983 		init_scm = false;
1984 	}
1985 
1986 	skb = skb_peek_tail(&other->sk_receive_queue);
1987 	if (tail && tail == skb) {
1988 		skb = newskb;
1989 	} else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1990 		if (newskb) {
1991 			skb = newskb;
1992 		} else {
1993 			tail = skb;
1994 			goto alloc_skb;
1995 		}
1996 	} else if (newskb) {
1997 		/* this is fast path, we don't necessarily need to
1998 		 * call to kfree_skb even though with newskb == NULL
1999 		 * this - does no harm
2000 		 */
2001 		consume_skb(newskb);
2002 		newskb = NULL;
2003 	}
2004 
2005 	if (skb_append_pagefrags(skb, page, offset, size)) {
2006 		tail = skb;
2007 		goto alloc_skb;
2008 	}
2009 
2010 	skb->len += size;
2011 	skb->data_len += size;
2012 	skb->truesize += size;
2013 	refcount_add(size, &sk->sk_wmem_alloc);
2014 
2015 	if (newskb) {
2016 		err = unix_scm_to_skb(&scm, skb, false);
2017 		if (err)
2018 			goto err_state_unlock;
2019 		spin_lock(&other->sk_receive_queue.lock);
2020 		__skb_queue_tail(&other->sk_receive_queue, newskb);
2021 		spin_unlock(&other->sk_receive_queue.lock);
2022 	}
2023 
2024 	unix_state_unlock(other);
2025 	mutex_unlock(&unix_sk(other)->iolock);
2026 
2027 	other->sk_data_ready(other);
2028 	scm_destroy(&scm);
2029 	return size;
2030 
2031 err_state_unlock:
2032 	unix_state_unlock(other);
2033 err_unlock:
2034 	mutex_unlock(&unix_sk(other)->iolock);
2035 err:
2036 	kfree_skb(newskb);
2037 	if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2038 		send_sig(SIGPIPE, current, 0);
2039 	if (!init_scm)
2040 		scm_destroy(&scm);
2041 	return err;
2042 }
2043 
2044 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2045 				  size_t len)
2046 {
2047 	int err;
2048 	struct sock *sk = sock->sk;
2049 
2050 	err = sock_error(sk);
2051 	if (err)
2052 		return err;
2053 
2054 	if (sk->sk_state != TCP_ESTABLISHED)
2055 		return -ENOTCONN;
2056 
2057 	if (msg->msg_namelen)
2058 		msg->msg_namelen = 0;
2059 
2060 	return unix_dgram_sendmsg(sock, msg, len);
2061 }
2062 
2063 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2064 				  size_t size, int flags)
2065 {
2066 	struct sock *sk = sock->sk;
2067 
2068 	if (sk->sk_state != TCP_ESTABLISHED)
2069 		return -ENOTCONN;
2070 
2071 	return unix_dgram_recvmsg(sock, msg, size, flags);
2072 }
2073 
2074 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2075 {
2076 	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2077 
2078 	if (addr) {
2079 		msg->msg_namelen = addr->len;
2080 		memcpy(msg->msg_name, addr->name, addr->len);
2081 	}
2082 }
2083 
2084 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2085 			      size_t size, int flags)
2086 {
2087 	struct scm_cookie scm;
2088 	struct sock *sk = sock->sk;
2089 	struct unix_sock *u = unix_sk(sk);
2090 	struct sk_buff *skb, *last;
2091 	long timeo;
2092 	int skip;
2093 	int err;
2094 
2095 	err = -EOPNOTSUPP;
2096 	if (flags&MSG_OOB)
2097 		goto out;
2098 
2099 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2100 
2101 	do {
2102 		mutex_lock(&u->iolock);
2103 
2104 		skip = sk_peek_offset(sk, flags);
2105 		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2106 					      &skip, &err, &last);
2107 		if (skb) {
2108 			if (!(flags & MSG_PEEK))
2109 				scm_stat_del(sk, skb);
2110 			break;
2111 		}
2112 
2113 		mutex_unlock(&u->iolock);
2114 
2115 		if (err != -EAGAIN)
2116 			break;
2117 	} while (timeo &&
2118 		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2119 					      &err, &timeo, last));
2120 
2121 	if (!skb) { /* implies iolock unlocked */
2122 		unix_state_lock(sk);
2123 		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2124 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2125 		    (sk->sk_shutdown & RCV_SHUTDOWN))
2126 			err = 0;
2127 		unix_state_unlock(sk);
2128 		goto out;
2129 	}
2130 
2131 	if (wq_has_sleeper(&u->peer_wait))
2132 		wake_up_interruptible_sync_poll(&u->peer_wait,
2133 						EPOLLOUT | EPOLLWRNORM |
2134 						EPOLLWRBAND);
2135 
2136 	if (msg->msg_name)
2137 		unix_copy_addr(msg, skb->sk);
2138 
2139 	if (size > skb->len - skip)
2140 		size = skb->len - skip;
2141 	else if (size < skb->len - skip)
2142 		msg->msg_flags |= MSG_TRUNC;
2143 
2144 	err = skb_copy_datagram_msg(skb, skip, msg, size);
2145 	if (err)
2146 		goto out_free;
2147 
2148 	if (sock_flag(sk, SOCK_RCVTSTAMP))
2149 		__sock_recv_timestamp(msg, sk, skb);
2150 
2151 	memset(&scm, 0, sizeof(scm));
2152 
2153 	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2154 	unix_set_secdata(&scm, skb);
2155 
2156 	if (!(flags & MSG_PEEK)) {
2157 		if (UNIXCB(skb).fp)
2158 			unix_detach_fds(&scm, skb);
2159 
2160 		sk_peek_offset_bwd(sk, skb->len);
2161 	} else {
2162 		/* It is questionable: on PEEK we could:
2163 		   - do not return fds - good, but too simple 8)
2164 		   - return fds, and do not return them on read (old strategy,
2165 		     apparently wrong)
2166 		   - clone fds (I chose it for now, it is the most universal
2167 		     solution)
2168 
2169 		   POSIX 1003.1g does not actually define this clearly
2170 		   at all. POSIX 1003.1g doesn't define a lot of things
2171 		   clearly however!
2172 
2173 		*/
2174 
2175 		sk_peek_offset_fwd(sk, size);
2176 
2177 		if (UNIXCB(skb).fp)
2178 			scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2179 	}
2180 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2181 
2182 	scm_recv(sock, msg, &scm, flags);
2183 
2184 out_free:
2185 	skb_free_datagram(sk, skb);
2186 	mutex_unlock(&u->iolock);
2187 out:
2188 	return err;
2189 }
2190 
2191 /*
2192  *	Sleep until more data has arrived. But check for races..
2193  */
2194 static long unix_stream_data_wait(struct sock *sk, long timeo,
2195 				  struct sk_buff *last, unsigned int last_len,
2196 				  bool freezable)
2197 {
2198 	struct sk_buff *tail;
2199 	DEFINE_WAIT(wait);
2200 
2201 	unix_state_lock(sk);
2202 
2203 	for (;;) {
2204 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2205 
2206 		tail = skb_peek_tail(&sk->sk_receive_queue);
2207 		if (tail != last ||
2208 		    (tail && tail->len != last_len) ||
2209 		    sk->sk_err ||
2210 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2211 		    signal_pending(current) ||
2212 		    !timeo)
2213 			break;
2214 
2215 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2216 		unix_state_unlock(sk);
2217 		if (freezable)
2218 			timeo = freezable_schedule_timeout(timeo);
2219 		else
2220 			timeo = schedule_timeout(timeo);
2221 		unix_state_lock(sk);
2222 
2223 		if (sock_flag(sk, SOCK_DEAD))
2224 			break;
2225 
2226 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2227 	}
2228 
2229 	finish_wait(sk_sleep(sk), &wait);
2230 	unix_state_unlock(sk);
2231 	return timeo;
2232 }
2233 
2234 static unsigned int unix_skb_len(const struct sk_buff *skb)
2235 {
2236 	return skb->len - UNIXCB(skb).consumed;
2237 }
2238 
2239 struct unix_stream_read_state {
2240 	int (*recv_actor)(struct sk_buff *, int, int,
2241 			  struct unix_stream_read_state *);
2242 	struct socket *socket;
2243 	struct msghdr *msg;
2244 	struct pipe_inode_info *pipe;
2245 	size_t size;
2246 	int flags;
2247 	unsigned int splice_flags;
2248 };
2249 
2250 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2251 				    bool freezable)
2252 {
2253 	struct scm_cookie scm;
2254 	struct socket *sock = state->socket;
2255 	struct sock *sk = sock->sk;
2256 	struct unix_sock *u = unix_sk(sk);
2257 	int copied = 0;
2258 	int flags = state->flags;
2259 	int noblock = flags & MSG_DONTWAIT;
2260 	bool check_creds = false;
2261 	int target;
2262 	int err = 0;
2263 	long timeo;
2264 	int skip;
2265 	size_t size = state->size;
2266 	unsigned int last_len;
2267 
2268 	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2269 		err = -EINVAL;
2270 		goto out;
2271 	}
2272 
2273 	if (unlikely(flags & MSG_OOB)) {
2274 		err = -EOPNOTSUPP;
2275 		goto out;
2276 	}
2277 
2278 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2279 	timeo = sock_rcvtimeo(sk, noblock);
2280 
2281 	memset(&scm, 0, sizeof(scm));
2282 
2283 	/* Lock the socket to prevent queue disordering
2284 	 * while sleeps in memcpy_tomsg
2285 	 */
2286 	mutex_lock(&u->iolock);
2287 
2288 	skip = max(sk_peek_offset(sk, flags), 0);
2289 
2290 	do {
2291 		int chunk;
2292 		bool drop_skb;
2293 		struct sk_buff *skb, *last;
2294 
2295 redo:
2296 		unix_state_lock(sk);
2297 		if (sock_flag(sk, SOCK_DEAD)) {
2298 			err = -ECONNRESET;
2299 			goto unlock;
2300 		}
2301 		last = skb = skb_peek(&sk->sk_receive_queue);
2302 		last_len = last ? last->len : 0;
2303 again:
2304 		if (skb == NULL) {
2305 			if (copied >= target)
2306 				goto unlock;
2307 
2308 			/*
2309 			 *	POSIX 1003.1g mandates this order.
2310 			 */
2311 
2312 			err = sock_error(sk);
2313 			if (err)
2314 				goto unlock;
2315 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2316 				goto unlock;
2317 
2318 			unix_state_unlock(sk);
2319 			if (!timeo) {
2320 				err = -EAGAIN;
2321 				break;
2322 			}
2323 
2324 			mutex_unlock(&u->iolock);
2325 
2326 			timeo = unix_stream_data_wait(sk, timeo, last,
2327 						      last_len, freezable);
2328 
2329 			if (signal_pending(current)) {
2330 				err = sock_intr_errno(timeo);
2331 				scm_destroy(&scm);
2332 				goto out;
2333 			}
2334 
2335 			mutex_lock(&u->iolock);
2336 			goto redo;
2337 unlock:
2338 			unix_state_unlock(sk);
2339 			break;
2340 		}
2341 
2342 		while (skip >= unix_skb_len(skb)) {
2343 			skip -= unix_skb_len(skb);
2344 			last = skb;
2345 			last_len = skb->len;
2346 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2347 			if (!skb)
2348 				goto again;
2349 		}
2350 
2351 		unix_state_unlock(sk);
2352 
2353 		if (check_creds) {
2354 			/* Never glue messages from different writers */
2355 			if (!unix_skb_scm_eq(skb, &scm))
2356 				break;
2357 		} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2358 			/* Copy credentials */
2359 			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2360 			unix_set_secdata(&scm, skb);
2361 			check_creds = true;
2362 		}
2363 
2364 		/* Copy address just once */
2365 		if (state->msg && state->msg->msg_name) {
2366 			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2367 					 state->msg->msg_name);
2368 			unix_copy_addr(state->msg, skb->sk);
2369 			sunaddr = NULL;
2370 		}
2371 
2372 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2373 		skb_get(skb);
2374 		chunk = state->recv_actor(skb, skip, chunk, state);
2375 		drop_skb = !unix_skb_len(skb);
2376 		/* skb is only safe to use if !drop_skb */
2377 		consume_skb(skb);
2378 		if (chunk < 0) {
2379 			if (copied == 0)
2380 				copied = -EFAULT;
2381 			break;
2382 		}
2383 		copied += chunk;
2384 		size -= chunk;
2385 
2386 		if (drop_skb) {
2387 			/* the skb was touched by a concurrent reader;
2388 			 * we should not expect anything from this skb
2389 			 * anymore and assume it invalid - we can be
2390 			 * sure it was dropped from the socket queue
2391 			 *
2392 			 * let's report a short read
2393 			 */
2394 			err = 0;
2395 			break;
2396 		}
2397 
2398 		/* Mark read part of skb as used */
2399 		if (!(flags & MSG_PEEK)) {
2400 			UNIXCB(skb).consumed += chunk;
2401 
2402 			sk_peek_offset_bwd(sk, chunk);
2403 
2404 			if (UNIXCB(skb).fp) {
2405 				scm_stat_del(sk, skb);
2406 				unix_detach_fds(&scm, skb);
2407 			}
2408 
2409 			if (unix_skb_len(skb))
2410 				break;
2411 
2412 			skb_unlink(skb, &sk->sk_receive_queue);
2413 			consume_skb(skb);
2414 
2415 			if (scm.fp)
2416 				break;
2417 		} else {
2418 			/* It is questionable, see note in unix_dgram_recvmsg.
2419 			 */
2420 			if (UNIXCB(skb).fp)
2421 				scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2422 
2423 			sk_peek_offset_fwd(sk, chunk);
2424 
2425 			if (UNIXCB(skb).fp)
2426 				break;
2427 
2428 			skip = 0;
2429 			last = skb;
2430 			last_len = skb->len;
2431 			unix_state_lock(sk);
2432 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2433 			if (skb)
2434 				goto again;
2435 			unix_state_unlock(sk);
2436 			break;
2437 		}
2438 	} while (size);
2439 
2440 	mutex_unlock(&u->iolock);
2441 	if (state->msg)
2442 		scm_recv(sock, state->msg, &scm, flags);
2443 	else
2444 		scm_destroy(&scm);
2445 out:
2446 	return copied ? : err;
2447 }
2448 
2449 static int unix_stream_read_actor(struct sk_buff *skb,
2450 				  int skip, int chunk,
2451 				  struct unix_stream_read_state *state)
2452 {
2453 	int ret;
2454 
2455 	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2456 				    state->msg, chunk);
2457 	return ret ?: chunk;
2458 }
2459 
2460 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2461 			       size_t size, int flags)
2462 {
2463 	struct unix_stream_read_state state = {
2464 		.recv_actor = unix_stream_read_actor,
2465 		.socket = sock,
2466 		.msg = msg,
2467 		.size = size,
2468 		.flags = flags
2469 	};
2470 
2471 	return unix_stream_read_generic(&state, true);
2472 }
2473 
2474 static int unix_stream_splice_actor(struct sk_buff *skb,
2475 				    int skip, int chunk,
2476 				    struct unix_stream_read_state *state)
2477 {
2478 	return skb_splice_bits(skb, state->socket->sk,
2479 			       UNIXCB(skb).consumed + skip,
2480 			       state->pipe, chunk, state->splice_flags);
2481 }
2482 
2483 static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2484 				       struct pipe_inode_info *pipe,
2485 				       size_t size, unsigned int flags)
2486 {
2487 	struct unix_stream_read_state state = {
2488 		.recv_actor = unix_stream_splice_actor,
2489 		.socket = sock,
2490 		.pipe = pipe,
2491 		.size = size,
2492 		.splice_flags = flags,
2493 	};
2494 
2495 	if (unlikely(*ppos))
2496 		return -ESPIPE;
2497 
2498 	if (sock->file->f_flags & O_NONBLOCK ||
2499 	    flags & SPLICE_F_NONBLOCK)
2500 		state.flags = MSG_DONTWAIT;
2501 
2502 	return unix_stream_read_generic(&state, false);
2503 }
2504 
2505 static int unix_shutdown(struct socket *sock, int mode)
2506 {
2507 	struct sock *sk = sock->sk;
2508 	struct sock *other;
2509 
2510 	if (mode < SHUT_RD || mode > SHUT_RDWR)
2511 		return -EINVAL;
2512 	/* This maps:
2513 	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2514 	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2515 	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2516 	 */
2517 	++mode;
2518 
2519 	unix_state_lock(sk);
2520 	sk->sk_shutdown |= mode;
2521 	other = unix_peer(sk);
2522 	if (other)
2523 		sock_hold(other);
2524 	unix_state_unlock(sk);
2525 	sk->sk_state_change(sk);
2526 
2527 	if (other &&
2528 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2529 
2530 		int peer_mode = 0;
2531 
2532 		if (mode&RCV_SHUTDOWN)
2533 			peer_mode |= SEND_SHUTDOWN;
2534 		if (mode&SEND_SHUTDOWN)
2535 			peer_mode |= RCV_SHUTDOWN;
2536 		unix_state_lock(other);
2537 		other->sk_shutdown |= peer_mode;
2538 		unix_state_unlock(other);
2539 		other->sk_state_change(other);
2540 		if (peer_mode == SHUTDOWN_MASK)
2541 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2542 		else if (peer_mode & RCV_SHUTDOWN)
2543 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2544 	}
2545 	if (other)
2546 		sock_put(other);
2547 
2548 	return 0;
2549 }
2550 
2551 long unix_inq_len(struct sock *sk)
2552 {
2553 	struct sk_buff *skb;
2554 	long amount = 0;
2555 
2556 	if (sk->sk_state == TCP_LISTEN)
2557 		return -EINVAL;
2558 
2559 	spin_lock(&sk->sk_receive_queue.lock);
2560 	if (sk->sk_type == SOCK_STREAM ||
2561 	    sk->sk_type == SOCK_SEQPACKET) {
2562 		skb_queue_walk(&sk->sk_receive_queue, skb)
2563 			amount += unix_skb_len(skb);
2564 	} else {
2565 		skb = skb_peek(&sk->sk_receive_queue);
2566 		if (skb)
2567 			amount = skb->len;
2568 	}
2569 	spin_unlock(&sk->sk_receive_queue.lock);
2570 
2571 	return amount;
2572 }
2573 EXPORT_SYMBOL_GPL(unix_inq_len);
2574 
2575 long unix_outq_len(struct sock *sk)
2576 {
2577 	return sk_wmem_alloc_get(sk);
2578 }
2579 EXPORT_SYMBOL_GPL(unix_outq_len);
2580 
2581 static int unix_open_file(struct sock *sk)
2582 {
2583 	struct path path;
2584 	struct file *f;
2585 	int fd;
2586 
2587 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2588 		return -EPERM;
2589 
2590 	if (!smp_load_acquire(&unix_sk(sk)->addr))
2591 		return -ENOENT;
2592 
2593 	path = unix_sk(sk)->path;
2594 	if (!path.dentry)
2595 		return -ENOENT;
2596 
2597 	path_get(&path);
2598 
2599 	fd = get_unused_fd_flags(O_CLOEXEC);
2600 	if (fd < 0)
2601 		goto out;
2602 
2603 	f = dentry_open(&path, O_PATH, current_cred());
2604 	if (IS_ERR(f)) {
2605 		put_unused_fd(fd);
2606 		fd = PTR_ERR(f);
2607 		goto out;
2608 	}
2609 
2610 	fd_install(fd, f);
2611 out:
2612 	path_put(&path);
2613 
2614 	return fd;
2615 }
2616 
2617 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2618 {
2619 	struct sock *sk = sock->sk;
2620 	long amount = 0;
2621 	int err;
2622 
2623 	switch (cmd) {
2624 	case SIOCOUTQ:
2625 		amount = unix_outq_len(sk);
2626 		err = put_user(amount, (int __user *)arg);
2627 		break;
2628 	case SIOCINQ:
2629 		amount = unix_inq_len(sk);
2630 		if (amount < 0)
2631 			err = amount;
2632 		else
2633 			err = put_user(amount, (int __user *)arg);
2634 		break;
2635 	case SIOCUNIXFILE:
2636 		err = unix_open_file(sk);
2637 		break;
2638 	default:
2639 		err = -ENOIOCTLCMD;
2640 		break;
2641 	}
2642 	return err;
2643 }
2644 
2645 #ifdef CONFIG_COMPAT
2646 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2647 {
2648 	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
2649 }
2650 #endif
2651 
2652 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2653 {
2654 	struct sock *sk = sock->sk;
2655 	__poll_t mask;
2656 
2657 	sock_poll_wait(file, sock, wait);
2658 	mask = 0;
2659 
2660 	/* exceptional events? */
2661 	if (sk->sk_err)
2662 		mask |= EPOLLERR;
2663 	if (sk->sk_shutdown == SHUTDOWN_MASK)
2664 		mask |= EPOLLHUP;
2665 	if (sk->sk_shutdown & RCV_SHUTDOWN)
2666 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2667 
2668 	/* readable? */
2669 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2670 		mask |= EPOLLIN | EPOLLRDNORM;
2671 
2672 	/* Connection-based need to check for termination and startup */
2673 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2674 	    sk->sk_state == TCP_CLOSE)
2675 		mask |= EPOLLHUP;
2676 
2677 	/*
2678 	 * we set writable also when the other side has shut down the
2679 	 * connection. This prevents stuck sockets.
2680 	 */
2681 	if (unix_writable(sk))
2682 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
2683 
2684 	return mask;
2685 }
2686 
2687 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2688 				    poll_table *wait)
2689 {
2690 	struct sock *sk = sock->sk, *other;
2691 	unsigned int writable;
2692 	__poll_t mask;
2693 
2694 	sock_poll_wait(file, sock, wait);
2695 	mask = 0;
2696 
2697 	/* exceptional events? */
2698 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
2699 		mask |= EPOLLERR |
2700 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
2701 
2702 	if (sk->sk_shutdown & RCV_SHUTDOWN)
2703 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2704 	if (sk->sk_shutdown == SHUTDOWN_MASK)
2705 		mask |= EPOLLHUP;
2706 
2707 	/* readable? */
2708 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2709 		mask |= EPOLLIN | EPOLLRDNORM;
2710 
2711 	/* Connection-based need to check for termination and startup */
2712 	if (sk->sk_type == SOCK_SEQPACKET) {
2713 		if (sk->sk_state == TCP_CLOSE)
2714 			mask |= EPOLLHUP;
2715 		/* connection hasn't started yet? */
2716 		if (sk->sk_state == TCP_SYN_SENT)
2717 			return mask;
2718 	}
2719 
2720 	/* No write status requested, avoid expensive OUT tests. */
2721 	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
2722 		return mask;
2723 
2724 	writable = unix_writable(sk);
2725 	if (writable) {
2726 		unix_state_lock(sk);
2727 
2728 		other = unix_peer(sk);
2729 		if (other && unix_peer(other) != sk &&
2730 		    unix_recvq_full(other) &&
2731 		    unix_dgram_peer_wake_me(sk, other))
2732 			writable = 0;
2733 
2734 		unix_state_unlock(sk);
2735 	}
2736 
2737 	if (writable)
2738 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
2739 	else
2740 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2741 
2742 	return mask;
2743 }
2744 
2745 #ifdef CONFIG_PROC_FS
2746 
2747 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2748 
2749 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2750 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2751 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2752 
2753 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2754 {
2755 	unsigned long offset = get_offset(*pos);
2756 	unsigned long bucket = get_bucket(*pos);
2757 	struct sock *sk;
2758 	unsigned long count = 0;
2759 
2760 	for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2761 		if (sock_net(sk) != seq_file_net(seq))
2762 			continue;
2763 		if (++count == offset)
2764 			break;
2765 	}
2766 
2767 	return sk;
2768 }
2769 
2770 static struct sock *unix_next_socket(struct seq_file *seq,
2771 				     struct sock *sk,
2772 				     loff_t *pos)
2773 {
2774 	unsigned long bucket;
2775 
2776 	while (sk > (struct sock *)SEQ_START_TOKEN) {
2777 		sk = sk_next(sk);
2778 		if (!sk)
2779 			goto next_bucket;
2780 		if (sock_net(sk) == seq_file_net(seq))
2781 			return sk;
2782 	}
2783 
2784 	do {
2785 		sk = unix_from_bucket(seq, pos);
2786 		if (sk)
2787 			return sk;
2788 
2789 next_bucket:
2790 		bucket = get_bucket(*pos) + 1;
2791 		*pos = set_bucket_offset(bucket, 1);
2792 	} while (bucket < ARRAY_SIZE(unix_socket_table));
2793 
2794 	return NULL;
2795 }
2796 
2797 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2798 	__acquires(unix_table_lock)
2799 {
2800 	spin_lock(&unix_table_lock);
2801 
2802 	if (!*pos)
2803 		return SEQ_START_TOKEN;
2804 
2805 	if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2806 		return NULL;
2807 
2808 	return unix_next_socket(seq, NULL, pos);
2809 }
2810 
2811 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2812 {
2813 	++*pos;
2814 	return unix_next_socket(seq, v, pos);
2815 }
2816 
2817 static void unix_seq_stop(struct seq_file *seq, void *v)
2818 	__releases(unix_table_lock)
2819 {
2820 	spin_unlock(&unix_table_lock);
2821 }
2822 
2823 static int unix_seq_show(struct seq_file *seq, void *v)
2824 {
2825 
2826 	if (v == SEQ_START_TOKEN)
2827 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2828 			 "Inode Path\n");
2829 	else {
2830 		struct sock *s = v;
2831 		struct unix_sock *u = unix_sk(s);
2832 		unix_state_lock(s);
2833 
2834 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2835 			s,
2836 			refcount_read(&s->sk_refcnt),
2837 			0,
2838 			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2839 			s->sk_type,
2840 			s->sk_socket ?
2841 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2842 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2843 			sock_i_ino(s));
2844 
2845 		if (u->addr) {	// under unix_table_lock here
2846 			int i, len;
2847 			seq_putc(seq, ' ');
2848 
2849 			i = 0;
2850 			len = u->addr->len - sizeof(short);
2851 			if (!UNIX_ABSTRACT(s))
2852 				len--;
2853 			else {
2854 				seq_putc(seq, '@');
2855 				i++;
2856 			}
2857 			for ( ; i < len; i++)
2858 				seq_putc(seq, u->addr->name->sun_path[i] ?:
2859 					 '@');
2860 		}
2861 		unix_state_unlock(s);
2862 		seq_putc(seq, '\n');
2863 	}
2864 
2865 	return 0;
2866 }
2867 
2868 static const struct seq_operations unix_seq_ops = {
2869 	.start  = unix_seq_start,
2870 	.next   = unix_seq_next,
2871 	.stop   = unix_seq_stop,
2872 	.show   = unix_seq_show,
2873 };
2874 #endif
2875 
2876 static const struct net_proto_family unix_family_ops = {
2877 	.family = PF_UNIX,
2878 	.create = unix_create,
2879 	.owner	= THIS_MODULE,
2880 };
2881 
2882 
2883 static int __net_init unix_net_init(struct net *net)
2884 {
2885 	int error = -ENOMEM;
2886 
2887 	net->unx.sysctl_max_dgram_qlen = 10;
2888 	if (unix_sysctl_register(net))
2889 		goto out;
2890 
2891 #ifdef CONFIG_PROC_FS
2892 	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
2893 			sizeof(struct seq_net_private))) {
2894 		unix_sysctl_unregister(net);
2895 		goto out;
2896 	}
2897 #endif
2898 	error = 0;
2899 out:
2900 	return error;
2901 }
2902 
2903 static void __net_exit unix_net_exit(struct net *net)
2904 {
2905 	unix_sysctl_unregister(net);
2906 	remove_proc_entry("unix", net->proc_net);
2907 }
2908 
2909 static struct pernet_operations unix_net_ops = {
2910 	.init = unix_net_init,
2911 	.exit = unix_net_exit,
2912 };
2913 
2914 static int __init af_unix_init(void)
2915 {
2916 	int rc = -1;
2917 
2918 	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
2919 
2920 	rc = proto_register(&unix_proto, 1);
2921 	if (rc != 0) {
2922 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2923 		goto out;
2924 	}
2925 
2926 	sock_register(&unix_family_ops);
2927 	register_pernet_subsys(&unix_net_ops);
2928 out:
2929 	return rc;
2930 }
2931 
2932 static void __exit af_unix_exit(void)
2933 {
2934 	sock_unregister(PF_UNIX);
2935 	proto_unregister(&unix_proto);
2936 	unregister_pernet_subsys(&unix_net_ops);
2937 }
2938 
2939 /* Earlier than device_initcall() so that other drivers invoking
2940    request_module() don't end up in a loop when modprobe tries
2941    to use a UNIX socket. But later than subsys_initcall() because
2942    we depend on stuff initialised there */
2943 fs_initcall(af_unix_init);
2944 module_exit(af_unix_exit);
2945 
2946 MODULE_LICENSE("GPL");
2947 MODULE_ALIAS_NETPROTO(PF_UNIX);
2948