xref: /openbmc/linux/net/unix/af_unix.c (revision de1f0a65)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET4:	Implementation of BSD Unix domain sockets.
4  *
5  * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *
7  * Fixes:
8  *		Linus Torvalds	:	Assorted bug cures.
9  *		Niibe Yutaka	:	async I/O support.
10  *		Carsten Paeth	:	PF_UNIX check, address fixes.
11  *		Alan Cox	:	Limit size of allocated blocks.
12  *		Alan Cox	:	Fixed the stupid socketpair bug.
13  *		Alan Cox	:	BSD compatibility fine tuning.
14  *		Alan Cox	:	Fixed a bug in connect when interrupted.
15  *		Alan Cox	:	Sorted out a proper draft version of
16  *					file descriptor passing hacked up from
17  *					Mike Shaver's work.
18  *		Marty Leisner	:	Fixes to fd passing
19  *		Nick Nevin	:	recvmsg bugfix.
20  *		Alan Cox	:	Started proper garbage collector
21  *		Heiko EiBfeldt	:	Missing verify_area check
22  *		Alan Cox	:	Started POSIXisms
23  *		Andreas Schwab	:	Replace inode by dentry for proper
24  *					reference counting
25  *		Kirk Petersen	:	Made this a module
26  *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
27  *					Lots of bug fixes.
28  *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
29  *					by above two patches.
30  *	     Andrea Arcangeli	:	If possible we block in connect(2)
31  *					if the max backlog of the listen socket
32  *					is been reached. This won't break
33  *					old apps and it will avoid huge amount
34  *					of socks hashed (this for unix_gc()
35  *					performances reasons).
36  *					Security fix that limits the max
37  *					number of socks to 2*max_files and
38  *					the number of skb queueable in the
39  *					dgram receiver.
40  *		Artur Skawina   :	Hash function optimizations
41  *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
42  *	      Malcolm Beattie   :	Set peercred for socketpair
43  *	     Michal Ostrowski   :       Module initialization cleanup.
44  *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
45  *	     				the core infrastructure is doing that
46  *	     				for all net proto families now (2.5.69+)
47  *
48  * Known differences from reference BSD that was tested:
49  *
50  *	[TO FIX]
51  *	ECONNREFUSED is not returned from one end of a connected() socket to the
52  *		other the moment one end closes.
53  *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
54  *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
55  *	[NOT TO FIX]
56  *	accept() returns a path name even if the connecting socket has closed
57  *		in the meantime (BSD loses the path and gives up).
58  *	accept() returns 0 length path for an unbound connector. BSD returns 16
59  *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60  *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
61  *	BSD af_unix apparently has connect forgetting to block properly.
62  *		(need to check this with the POSIX spec in detail)
63  *
64  * Differences from 2.0.0-11-... (ANK)
65  *	Bug fixes and improvements.
66  *		- client shutdown killed server socket.
67  *		- removed all useless cli/sti pairs.
68  *
69  *	Semantic changes/extensions.
70  *		- generic control message passing.
71  *		- SCM_CREDENTIALS control message.
72  *		- "Abstract" (not FS based) socket bindings.
73  *		  Abstract names are sequences of bytes (not zero terminated)
74  *		  started by 0, so that this name space does not intersect
75  *		  with BSD names.
76  */
77 
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
90 #include <linux/un.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
96 #include <linux/in.h>
97 #include <linux/fs.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
108 #include <net/scm.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/splice.h>
116 #include <linux/freezer.h>
117 #include <linux/file.h>
118 #include <linux/btf_ids.h>
119 
120 #include "scm.h"
121 
122 static atomic_long_t unix_nr_socks;
123 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
124 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
125 
126 /* SMP locking strategy:
127  *    hash table is protected with spinlock.
128  *    each socket state is protected by separate spinlock.
129  */
130 
131 static unsigned int unix_unbound_hash(struct sock *sk)
132 {
133 	unsigned long hash = (unsigned long)sk;
134 
135 	hash ^= hash >> 16;
136 	hash ^= hash >> 8;
137 	hash ^= sk->sk_type;
138 
139 	return hash & UNIX_HASH_MOD;
140 }
141 
142 static unsigned int unix_bsd_hash(struct inode *i)
143 {
144 	return i->i_ino & UNIX_HASH_MOD;
145 }
146 
147 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
148 				       int addr_len, int type)
149 {
150 	__wsum csum = csum_partial(sunaddr, addr_len, 0);
151 	unsigned int hash;
152 
153 	hash = (__force unsigned int)csum_fold(csum);
154 	hash ^= hash >> 8;
155 	hash ^= type;
156 
157 	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
158 }
159 
160 static void unix_table_double_lock(struct net *net,
161 				   unsigned int hash1, unsigned int hash2)
162 {
163 	if (hash1 == hash2) {
164 		spin_lock(&net->unx.table.locks[hash1]);
165 		return;
166 	}
167 
168 	if (hash1 > hash2)
169 		swap(hash1, hash2);
170 
171 	spin_lock(&net->unx.table.locks[hash1]);
172 	spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
173 }
174 
175 static void unix_table_double_unlock(struct net *net,
176 				     unsigned int hash1, unsigned int hash2)
177 {
178 	if (hash1 == hash2) {
179 		spin_unlock(&net->unx.table.locks[hash1]);
180 		return;
181 	}
182 
183 	spin_unlock(&net->unx.table.locks[hash1]);
184 	spin_unlock(&net->unx.table.locks[hash2]);
185 }
186 
187 #ifdef CONFIG_SECURITY_NETWORK
188 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
189 {
190 	UNIXCB(skb).secid = scm->secid;
191 }
192 
193 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
194 {
195 	scm->secid = UNIXCB(skb).secid;
196 }
197 
198 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
199 {
200 	return (scm->secid == UNIXCB(skb).secid);
201 }
202 #else
203 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
204 { }
205 
206 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
207 { }
208 
209 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
210 {
211 	return true;
212 }
213 #endif /* CONFIG_SECURITY_NETWORK */
214 
215 #define unix_peer(sk) (unix_sk(sk)->peer)
216 
217 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
218 {
219 	return unix_peer(osk) == sk;
220 }
221 
222 static inline int unix_may_send(struct sock *sk, struct sock *osk)
223 {
224 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
225 }
226 
227 static inline int unix_recvq_full(const struct sock *sk)
228 {
229 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
230 }
231 
232 static inline int unix_recvq_full_lockless(const struct sock *sk)
233 {
234 	return skb_queue_len_lockless(&sk->sk_receive_queue) >
235 		READ_ONCE(sk->sk_max_ack_backlog);
236 }
237 
238 struct sock *unix_peer_get(struct sock *s)
239 {
240 	struct sock *peer;
241 
242 	unix_state_lock(s);
243 	peer = unix_peer(s);
244 	if (peer)
245 		sock_hold(peer);
246 	unix_state_unlock(s);
247 	return peer;
248 }
249 EXPORT_SYMBOL_GPL(unix_peer_get);
250 
251 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
252 					     int addr_len)
253 {
254 	struct unix_address *addr;
255 
256 	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
257 	if (!addr)
258 		return NULL;
259 
260 	refcount_set(&addr->refcnt, 1);
261 	addr->len = addr_len;
262 	memcpy(addr->name, sunaddr, addr_len);
263 
264 	return addr;
265 }
266 
267 static inline void unix_release_addr(struct unix_address *addr)
268 {
269 	if (refcount_dec_and_test(&addr->refcnt))
270 		kfree(addr);
271 }
272 
273 /*
274  *	Check unix socket name:
275  *		- should be not zero length.
276  *	        - if started by not zero, should be NULL terminated (FS object)
277  *		- if started by zero, it is abstract name.
278  */
279 
280 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
281 {
282 	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
283 	    addr_len > sizeof(*sunaddr))
284 		return -EINVAL;
285 
286 	if (sunaddr->sun_family != AF_UNIX)
287 		return -EINVAL;
288 
289 	return 0;
290 }
291 
292 static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
293 {
294 	/* This may look like an off by one error but it is a bit more
295 	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
296 	 * sun_path[108] doesn't as such exist.  However in kernel space
297 	 * we are guaranteed that it is a valid memory location in our
298 	 * kernel address buffer because syscall functions always pass
299 	 * a pointer of struct sockaddr_storage which has a bigger buffer
300 	 * than 108.
301 	 */
302 	((char *)sunaddr)[addr_len] = 0;
303 }
304 
305 static void __unix_remove_socket(struct sock *sk)
306 {
307 	sk_del_node_init(sk);
308 }
309 
310 static void __unix_insert_socket(struct net *net, struct sock *sk)
311 {
312 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
313 	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
314 }
315 
316 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
317 				 struct unix_address *addr, unsigned int hash)
318 {
319 	__unix_remove_socket(sk);
320 	smp_store_release(&unix_sk(sk)->addr, addr);
321 
322 	sk->sk_hash = hash;
323 	__unix_insert_socket(net, sk);
324 }
325 
326 static void unix_remove_socket(struct net *net, struct sock *sk)
327 {
328 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
329 	__unix_remove_socket(sk);
330 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
331 }
332 
333 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
334 {
335 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
336 	__unix_insert_socket(net, sk);
337 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
338 }
339 
340 static void unix_insert_bsd_socket(struct sock *sk)
341 {
342 	spin_lock(&bsd_socket_locks[sk->sk_hash]);
343 	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
344 	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
345 }
346 
347 static void unix_remove_bsd_socket(struct sock *sk)
348 {
349 	if (!hlist_unhashed(&sk->sk_bind_node)) {
350 		spin_lock(&bsd_socket_locks[sk->sk_hash]);
351 		__sk_del_bind_node(sk);
352 		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
353 
354 		sk_node_init(&sk->sk_bind_node);
355 	}
356 }
357 
358 static struct sock *__unix_find_socket_byname(struct net *net,
359 					      struct sockaddr_un *sunname,
360 					      int len, unsigned int hash)
361 {
362 	struct sock *s;
363 
364 	sk_for_each(s, &net->unx.table.buckets[hash]) {
365 		struct unix_sock *u = unix_sk(s);
366 
367 		if (u->addr->len == len &&
368 		    !memcmp(u->addr->name, sunname, len))
369 			return s;
370 	}
371 	return NULL;
372 }
373 
374 static inline struct sock *unix_find_socket_byname(struct net *net,
375 						   struct sockaddr_un *sunname,
376 						   int len, unsigned int hash)
377 {
378 	struct sock *s;
379 
380 	spin_lock(&net->unx.table.locks[hash]);
381 	s = __unix_find_socket_byname(net, sunname, len, hash);
382 	if (s)
383 		sock_hold(s);
384 	spin_unlock(&net->unx.table.locks[hash]);
385 	return s;
386 }
387 
388 static struct sock *unix_find_socket_byinode(struct inode *i)
389 {
390 	unsigned int hash = unix_bsd_hash(i);
391 	struct sock *s;
392 
393 	spin_lock(&bsd_socket_locks[hash]);
394 	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
395 		struct dentry *dentry = unix_sk(s)->path.dentry;
396 
397 		if (dentry && d_backing_inode(dentry) == i) {
398 			sock_hold(s);
399 			spin_unlock(&bsd_socket_locks[hash]);
400 			return s;
401 		}
402 	}
403 	spin_unlock(&bsd_socket_locks[hash]);
404 	return NULL;
405 }
406 
407 /* Support code for asymmetrically connected dgram sockets
408  *
409  * If a datagram socket is connected to a socket not itself connected
410  * to the first socket (eg, /dev/log), clients may only enqueue more
411  * messages if the present receive queue of the server socket is not
412  * "too large". This means there's a second writeability condition
413  * poll and sendmsg need to test. The dgram recv code will do a wake
414  * up on the peer_wait wait queue of a socket upon reception of a
415  * datagram which needs to be propagated to sleeping would-be writers
416  * since these might not have sent anything so far. This can't be
417  * accomplished via poll_wait because the lifetime of the server
418  * socket might be less than that of its clients if these break their
419  * association with it or if the server socket is closed while clients
420  * are still connected to it and there's no way to inform "a polling
421  * implementation" that it should let go of a certain wait queue
422  *
423  * In order to propagate a wake up, a wait_queue_entry_t of the client
424  * socket is enqueued on the peer_wait queue of the server socket
425  * whose wake function does a wake_up on the ordinary client socket
426  * wait queue. This connection is established whenever a write (or
427  * poll for write) hit the flow control condition and broken when the
428  * association to the server socket is dissolved or after a wake up
429  * was relayed.
430  */
431 
432 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
433 				      void *key)
434 {
435 	struct unix_sock *u;
436 	wait_queue_head_t *u_sleep;
437 
438 	u = container_of(q, struct unix_sock, peer_wake);
439 
440 	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
441 			    q);
442 	u->peer_wake.private = NULL;
443 
444 	/* relaying can only happen while the wq still exists */
445 	u_sleep = sk_sleep(&u->sk);
446 	if (u_sleep)
447 		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
448 
449 	return 0;
450 }
451 
452 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
453 {
454 	struct unix_sock *u, *u_other;
455 	int rc;
456 
457 	u = unix_sk(sk);
458 	u_other = unix_sk(other);
459 	rc = 0;
460 	spin_lock(&u_other->peer_wait.lock);
461 
462 	if (!u->peer_wake.private) {
463 		u->peer_wake.private = other;
464 		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
465 
466 		rc = 1;
467 	}
468 
469 	spin_unlock(&u_other->peer_wait.lock);
470 	return rc;
471 }
472 
473 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
474 					    struct sock *other)
475 {
476 	struct unix_sock *u, *u_other;
477 
478 	u = unix_sk(sk);
479 	u_other = unix_sk(other);
480 	spin_lock(&u_other->peer_wait.lock);
481 
482 	if (u->peer_wake.private == other) {
483 		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
484 		u->peer_wake.private = NULL;
485 	}
486 
487 	spin_unlock(&u_other->peer_wait.lock);
488 }
489 
490 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
491 						   struct sock *other)
492 {
493 	unix_dgram_peer_wake_disconnect(sk, other);
494 	wake_up_interruptible_poll(sk_sleep(sk),
495 				   EPOLLOUT |
496 				   EPOLLWRNORM |
497 				   EPOLLWRBAND);
498 }
499 
500 /* preconditions:
501  *	- unix_peer(sk) == other
502  *	- association is stable
503  */
504 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
505 {
506 	int connected;
507 
508 	connected = unix_dgram_peer_wake_connect(sk, other);
509 
510 	/* If other is SOCK_DEAD, we want to make sure we signal
511 	 * POLLOUT, such that a subsequent write() can get a
512 	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
513 	 * to other and its full, we will hang waiting for POLLOUT.
514 	 */
515 	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
516 		return 1;
517 
518 	if (connected)
519 		unix_dgram_peer_wake_disconnect(sk, other);
520 
521 	return 0;
522 }
523 
524 static int unix_writable(const struct sock *sk)
525 {
526 	return sk->sk_state != TCP_LISTEN &&
527 	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
528 }
529 
530 static void unix_write_space(struct sock *sk)
531 {
532 	struct socket_wq *wq;
533 
534 	rcu_read_lock();
535 	if (unix_writable(sk)) {
536 		wq = rcu_dereference(sk->sk_wq);
537 		if (skwq_has_sleeper(wq))
538 			wake_up_interruptible_sync_poll(&wq->wait,
539 				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
540 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
541 	}
542 	rcu_read_unlock();
543 }
544 
545 /* When dgram socket disconnects (or changes its peer), we clear its receive
546  * queue of packets arrived from previous peer. First, it allows to do
547  * flow control based only on wmem_alloc; second, sk connected to peer
548  * may receive messages only from that peer. */
549 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
550 {
551 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
552 		skb_queue_purge(&sk->sk_receive_queue);
553 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
554 
555 		/* If one link of bidirectional dgram pipe is disconnected,
556 		 * we signal error. Messages are lost. Do not make this,
557 		 * when peer was not connected to us.
558 		 */
559 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
560 			WRITE_ONCE(other->sk_err, ECONNRESET);
561 			sk_error_report(other);
562 		}
563 	}
564 	other->sk_state = TCP_CLOSE;
565 }
566 
567 static void unix_sock_destructor(struct sock *sk)
568 {
569 	struct unix_sock *u = unix_sk(sk);
570 
571 	skb_queue_purge(&sk->sk_receive_queue);
572 
573 	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
574 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
575 	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
576 	if (!sock_flag(sk, SOCK_DEAD)) {
577 		pr_info("Attempt to release alive unix socket: %p\n", sk);
578 		return;
579 	}
580 
581 	if (u->addr)
582 		unix_release_addr(u->addr);
583 
584 	atomic_long_dec(&unix_nr_socks);
585 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
586 #ifdef UNIX_REFCNT_DEBUG
587 	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
588 		atomic_long_read(&unix_nr_socks));
589 #endif
590 }
591 
592 static void unix_release_sock(struct sock *sk, int embrion)
593 {
594 	struct unix_sock *u = unix_sk(sk);
595 	struct sock *skpair;
596 	struct sk_buff *skb;
597 	struct path path;
598 	int state;
599 
600 	unix_remove_socket(sock_net(sk), sk);
601 	unix_remove_bsd_socket(sk);
602 
603 	/* Clear state */
604 	unix_state_lock(sk);
605 	sock_orphan(sk);
606 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
607 	path	     = u->path;
608 	u->path.dentry = NULL;
609 	u->path.mnt = NULL;
610 	state = sk->sk_state;
611 	sk->sk_state = TCP_CLOSE;
612 
613 	skpair = unix_peer(sk);
614 	unix_peer(sk) = NULL;
615 
616 	unix_state_unlock(sk);
617 
618 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
619 	if (u->oob_skb) {
620 		kfree_skb(u->oob_skb);
621 		u->oob_skb = NULL;
622 	}
623 #endif
624 
625 	wake_up_interruptible_all(&u->peer_wait);
626 
627 	if (skpair != NULL) {
628 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
629 			unix_state_lock(skpair);
630 			/* No more writes */
631 			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
632 			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
633 				WRITE_ONCE(skpair->sk_err, ECONNRESET);
634 			unix_state_unlock(skpair);
635 			skpair->sk_state_change(skpair);
636 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
637 		}
638 
639 		unix_dgram_peer_wake_disconnect(sk, skpair);
640 		sock_put(skpair); /* It may now die */
641 	}
642 
643 	/* Try to flush out this socket. Throw out buffers at least */
644 
645 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
646 		if (state == TCP_LISTEN)
647 			unix_release_sock(skb->sk, 1);
648 		/* passed fds are erased in the kfree_skb hook	      */
649 		UNIXCB(skb).consumed = skb->len;
650 		kfree_skb(skb);
651 	}
652 
653 	if (path.dentry)
654 		path_put(&path);
655 
656 	sock_put(sk);
657 
658 	/* ---- Socket is dead now and most probably destroyed ---- */
659 
660 	/*
661 	 * Fixme: BSD difference: In BSD all sockets connected to us get
662 	 *	  ECONNRESET and we die on the spot. In Linux we behave
663 	 *	  like files and pipes do and wait for the last
664 	 *	  dereference.
665 	 *
666 	 * Can't we simply set sock->err?
667 	 *
668 	 *	  What the above comment does talk about? --ANK(980817)
669 	 */
670 
671 	if (unix_tot_inflight)
672 		unix_gc();		/* Garbage collect fds */
673 }
674 
675 static void init_peercred(struct sock *sk)
676 {
677 	const struct cred *old_cred;
678 	struct pid *old_pid;
679 
680 	spin_lock(&sk->sk_peer_lock);
681 	old_pid = sk->sk_peer_pid;
682 	old_cred = sk->sk_peer_cred;
683 	sk->sk_peer_pid  = get_pid(task_tgid(current));
684 	sk->sk_peer_cred = get_current_cred();
685 	spin_unlock(&sk->sk_peer_lock);
686 
687 	put_pid(old_pid);
688 	put_cred(old_cred);
689 }
690 
691 static void copy_peercred(struct sock *sk, struct sock *peersk)
692 {
693 	const struct cred *old_cred;
694 	struct pid *old_pid;
695 
696 	if (sk < peersk) {
697 		spin_lock(&sk->sk_peer_lock);
698 		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
699 	} else {
700 		spin_lock(&peersk->sk_peer_lock);
701 		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
702 	}
703 	old_pid = sk->sk_peer_pid;
704 	old_cred = sk->sk_peer_cred;
705 	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
706 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
707 
708 	spin_unlock(&sk->sk_peer_lock);
709 	spin_unlock(&peersk->sk_peer_lock);
710 
711 	put_pid(old_pid);
712 	put_cred(old_cred);
713 }
714 
715 static int unix_listen(struct socket *sock, int backlog)
716 {
717 	int err;
718 	struct sock *sk = sock->sk;
719 	struct unix_sock *u = unix_sk(sk);
720 
721 	err = -EOPNOTSUPP;
722 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
723 		goto out;	/* Only stream/seqpacket sockets accept */
724 	err = -EINVAL;
725 	if (!u->addr)
726 		goto out;	/* No listens on an unbound socket */
727 	unix_state_lock(sk);
728 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
729 		goto out_unlock;
730 	if (backlog > sk->sk_max_ack_backlog)
731 		wake_up_interruptible_all(&u->peer_wait);
732 	sk->sk_max_ack_backlog	= backlog;
733 	sk->sk_state		= TCP_LISTEN;
734 	/* set credentials so connect can copy them */
735 	init_peercred(sk);
736 	err = 0;
737 
738 out_unlock:
739 	unix_state_unlock(sk);
740 out:
741 	return err;
742 }
743 
744 static int unix_release(struct socket *);
745 static int unix_bind(struct socket *, struct sockaddr *, int);
746 static int unix_stream_connect(struct socket *, struct sockaddr *,
747 			       int addr_len, int flags);
748 static int unix_socketpair(struct socket *, struct socket *);
749 static int unix_accept(struct socket *, struct socket *, int, bool);
750 static int unix_getname(struct socket *, struct sockaddr *, int);
751 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
752 static __poll_t unix_dgram_poll(struct file *, struct socket *,
753 				    poll_table *);
754 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
755 #ifdef CONFIG_COMPAT
756 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
757 #endif
758 static int unix_shutdown(struct socket *, int);
759 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
760 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
761 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
762 				    size_t size, int flags);
763 static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
764 				       struct pipe_inode_info *, size_t size,
765 				       unsigned int flags);
766 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
767 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
768 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
769 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
770 static int unix_dgram_connect(struct socket *, struct sockaddr *,
771 			      int, int);
772 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
773 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
774 				  int);
775 
776 static int unix_set_peek_off(struct sock *sk, int val)
777 {
778 	struct unix_sock *u = unix_sk(sk);
779 
780 	if (mutex_lock_interruptible(&u->iolock))
781 		return -EINTR;
782 
783 	sk->sk_peek_off = val;
784 	mutex_unlock(&u->iolock);
785 
786 	return 0;
787 }
788 
789 #ifdef CONFIG_PROC_FS
790 static int unix_count_nr_fds(struct sock *sk)
791 {
792 	struct sk_buff *skb;
793 	struct unix_sock *u;
794 	int nr_fds = 0;
795 
796 	spin_lock(&sk->sk_receive_queue.lock);
797 	skb = skb_peek(&sk->sk_receive_queue);
798 	while (skb) {
799 		u = unix_sk(skb->sk);
800 		nr_fds += atomic_read(&u->scm_stat.nr_fds);
801 		skb = skb_peek_next(skb, &sk->sk_receive_queue);
802 	}
803 	spin_unlock(&sk->sk_receive_queue.lock);
804 
805 	return nr_fds;
806 }
807 
808 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
809 {
810 	struct sock *sk = sock->sk;
811 	unsigned char s_state;
812 	struct unix_sock *u;
813 	int nr_fds = 0;
814 
815 	if (sk) {
816 		s_state = READ_ONCE(sk->sk_state);
817 		u = unix_sk(sk);
818 
819 		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
820 		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
821 		 * SOCK_DGRAM is ordinary. So, no lock is needed.
822 		 */
823 		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
824 			nr_fds = atomic_read(&u->scm_stat.nr_fds);
825 		else if (s_state == TCP_LISTEN)
826 			nr_fds = unix_count_nr_fds(sk);
827 
828 		seq_printf(m, "scm_fds: %u\n", nr_fds);
829 	}
830 }
831 #else
832 #define unix_show_fdinfo NULL
833 #endif
834 
835 static const struct proto_ops unix_stream_ops = {
836 	.family =	PF_UNIX,
837 	.owner =	THIS_MODULE,
838 	.release =	unix_release,
839 	.bind =		unix_bind,
840 	.connect =	unix_stream_connect,
841 	.socketpair =	unix_socketpair,
842 	.accept =	unix_accept,
843 	.getname =	unix_getname,
844 	.poll =		unix_poll,
845 	.ioctl =	unix_ioctl,
846 #ifdef CONFIG_COMPAT
847 	.compat_ioctl =	unix_compat_ioctl,
848 #endif
849 	.listen =	unix_listen,
850 	.shutdown =	unix_shutdown,
851 	.sendmsg =	unix_stream_sendmsg,
852 	.recvmsg =	unix_stream_recvmsg,
853 	.read_skb =	unix_stream_read_skb,
854 	.mmap =		sock_no_mmap,
855 	.sendpage =	unix_stream_sendpage,
856 	.splice_read =	unix_stream_splice_read,
857 	.set_peek_off =	unix_set_peek_off,
858 	.show_fdinfo =	unix_show_fdinfo,
859 };
860 
861 static const struct proto_ops unix_dgram_ops = {
862 	.family =	PF_UNIX,
863 	.owner =	THIS_MODULE,
864 	.release =	unix_release,
865 	.bind =		unix_bind,
866 	.connect =	unix_dgram_connect,
867 	.socketpair =	unix_socketpair,
868 	.accept =	sock_no_accept,
869 	.getname =	unix_getname,
870 	.poll =		unix_dgram_poll,
871 	.ioctl =	unix_ioctl,
872 #ifdef CONFIG_COMPAT
873 	.compat_ioctl =	unix_compat_ioctl,
874 #endif
875 	.listen =	sock_no_listen,
876 	.shutdown =	unix_shutdown,
877 	.sendmsg =	unix_dgram_sendmsg,
878 	.read_skb =	unix_read_skb,
879 	.recvmsg =	unix_dgram_recvmsg,
880 	.mmap =		sock_no_mmap,
881 	.sendpage =	sock_no_sendpage,
882 	.set_peek_off =	unix_set_peek_off,
883 	.show_fdinfo =	unix_show_fdinfo,
884 };
885 
886 static const struct proto_ops unix_seqpacket_ops = {
887 	.family =	PF_UNIX,
888 	.owner =	THIS_MODULE,
889 	.release =	unix_release,
890 	.bind =		unix_bind,
891 	.connect =	unix_stream_connect,
892 	.socketpair =	unix_socketpair,
893 	.accept =	unix_accept,
894 	.getname =	unix_getname,
895 	.poll =		unix_dgram_poll,
896 	.ioctl =	unix_ioctl,
897 #ifdef CONFIG_COMPAT
898 	.compat_ioctl =	unix_compat_ioctl,
899 #endif
900 	.listen =	unix_listen,
901 	.shutdown =	unix_shutdown,
902 	.sendmsg =	unix_seqpacket_sendmsg,
903 	.recvmsg =	unix_seqpacket_recvmsg,
904 	.mmap =		sock_no_mmap,
905 	.sendpage =	sock_no_sendpage,
906 	.set_peek_off =	unix_set_peek_off,
907 	.show_fdinfo =	unix_show_fdinfo,
908 };
909 
910 static void unix_close(struct sock *sk, long timeout)
911 {
912 	/* Nothing to do here, unix socket does not need a ->close().
913 	 * This is merely for sockmap.
914 	 */
915 }
916 
917 static void unix_unhash(struct sock *sk)
918 {
919 	/* Nothing to do here, unix socket does not need a ->unhash().
920 	 * This is merely for sockmap.
921 	 */
922 }
923 
924 struct proto unix_dgram_proto = {
925 	.name			= "UNIX",
926 	.owner			= THIS_MODULE,
927 	.obj_size		= sizeof(struct unix_sock),
928 	.close			= unix_close,
929 #ifdef CONFIG_BPF_SYSCALL
930 	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
931 #endif
932 };
933 
934 struct proto unix_stream_proto = {
935 	.name			= "UNIX-STREAM",
936 	.owner			= THIS_MODULE,
937 	.obj_size		= sizeof(struct unix_sock),
938 	.close			= unix_close,
939 	.unhash			= unix_unhash,
940 #ifdef CONFIG_BPF_SYSCALL
941 	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
942 #endif
943 };
944 
945 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
946 {
947 	struct unix_sock *u;
948 	struct sock *sk;
949 	int err;
950 
951 	atomic_long_inc(&unix_nr_socks);
952 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
953 		err = -ENFILE;
954 		goto err;
955 	}
956 
957 	if (type == SOCK_STREAM)
958 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
959 	else /*dgram and  seqpacket */
960 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
961 
962 	if (!sk) {
963 		err = -ENOMEM;
964 		goto err;
965 	}
966 
967 	sock_init_data(sock, sk);
968 
969 	sk->sk_hash		= unix_unbound_hash(sk);
970 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
971 	sk->sk_write_space	= unix_write_space;
972 	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
973 	sk->sk_destruct		= unix_sock_destructor;
974 	u	  = unix_sk(sk);
975 	u->path.dentry = NULL;
976 	u->path.mnt = NULL;
977 	spin_lock_init(&u->lock);
978 	atomic_long_set(&u->inflight, 0);
979 	INIT_LIST_HEAD(&u->link);
980 	mutex_init(&u->iolock); /* single task reading lock */
981 	mutex_init(&u->bindlock); /* single task binding lock */
982 	init_waitqueue_head(&u->peer_wait);
983 	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
984 	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
985 	unix_insert_unbound_socket(net, sk);
986 
987 	sock_prot_inuse_add(net, sk->sk_prot, 1);
988 
989 	return sk;
990 
991 err:
992 	atomic_long_dec(&unix_nr_socks);
993 	return ERR_PTR(err);
994 }
995 
996 static int unix_create(struct net *net, struct socket *sock, int protocol,
997 		       int kern)
998 {
999 	struct sock *sk;
1000 
1001 	if (protocol && protocol != PF_UNIX)
1002 		return -EPROTONOSUPPORT;
1003 
1004 	sock->state = SS_UNCONNECTED;
1005 
1006 	switch (sock->type) {
1007 	case SOCK_STREAM:
1008 		sock->ops = &unix_stream_ops;
1009 		break;
1010 		/*
1011 		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1012 		 *	nothing uses it.
1013 		 */
1014 	case SOCK_RAW:
1015 		sock->type = SOCK_DGRAM;
1016 		fallthrough;
1017 	case SOCK_DGRAM:
1018 		sock->ops = &unix_dgram_ops;
1019 		break;
1020 	case SOCK_SEQPACKET:
1021 		sock->ops = &unix_seqpacket_ops;
1022 		break;
1023 	default:
1024 		return -ESOCKTNOSUPPORT;
1025 	}
1026 
1027 	sk = unix_create1(net, sock, kern, sock->type);
1028 	if (IS_ERR(sk))
1029 		return PTR_ERR(sk);
1030 
1031 	return 0;
1032 }
1033 
1034 static int unix_release(struct socket *sock)
1035 {
1036 	struct sock *sk = sock->sk;
1037 
1038 	if (!sk)
1039 		return 0;
1040 
1041 	sk->sk_prot->close(sk, 0);
1042 	unix_release_sock(sk, 0);
1043 	sock->sk = NULL;
1044 
1045 	return 0;
1046 }
1047 
1048 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1049 				  int type)
1050 {
1051 	struct inode *inode;
1052 	struct path path;
1053 	struct sock *sk;
1054 	int err;
1055 
1056 	unix_mkname_bsd(sunaddr, addr_len);
1057 	err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1058 	if (err)
1059 		goto fail;
1060 
1061 	err = path_permission(&path, MAY_WRITE);
1062 	if (err)
1063 		goto path_put;
1064 
1065 	err = -ECONNREFUSED;
1066 	inode = d_backing_inode(path.dentry);
1067 	if (!S_ISSOCK(inode->i_mode))
1068 		goto path_put;
1069 
1070 	sk = unix_find_socket_byinode(inode);
1071 	if (!sk)
1072 		goto path_put;
1073 
1074 	err = -EPROTOTYPE;
1075 	if (sk->sk_type == type)
1076 		touch_atime(&path);
1077 	else
1078 		goto sock_put;
1079 
1080 	path_put(&path);
1081 
1082 	return sk;
1083 
1084 sock_put:
1085 	sock_put(sk);
1086 path_put:
1087 	path_put(&path);
1088 fail:
1089 	return ERR_PTR(err);
1090 }
1091 
1092 static struct sock *unix_find_abstract(struct net *net,
1093 				       struct sockaddr_un *sunaddr,
1094 				       int addr_len, int type)
1095 {
1096 	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1097 	struct dentry *dentry;
1098 	struct sock *sk;
1099 
1100 	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1101 	if (!sk)
1102 		return ERR_PTR(-ECONNREFUSED);
1103 
1104 	dentry = unix_sk(sk)->path.dentry;
1105 	if (dentry)
1106 		touch_atime(&unix_sk(sk)->path);
1107 
1108 	return sk;
1109 }
1110 
1111 static struct sock *unix_find_other(struct net *net,
1112 				    struct sockaddr_un *sunaddr,
1113 				    int addr_len, int type)
1114 {
1115 	struct sock *sk;
1116 
1117 	if (sunaddr->sun_path[0])
1118 		sk = unix_find_bsd(sunaddr, addr_len, type);
1119 	else
1120 		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1121 
1122 	return sk;
1123 }
1124 
1125 static int unix_autobind(struct sock *sk)
1126 {
1127 	unsigned int new_hash, old_hash = sk->sk_hash;
1128 	struct unix_sock *u = unix_sk(sk);
1129 	struct net *net = sock_net(sk);
1130 	struct unix_address *addr;
1131 	u32 lastnum, ordernum;
1132 	int err;
1133 
1134 	err = mutex_lock_interruptible(&u->bindlock);
1135 	if (err)
1136 		return err;
1137 
1138 	if (u->addr)
1139 		goto out;
1140 
1141 	err = -ENOMEM;
1142 	addr = kzalloc(sizeof(*addr) +
1143 		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1144 	if (!addr)
1145 		goto out;
1146 
1147 	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1148 	addr->name->sun_family = AF_UNIX;
1149 	refcount_set(&addr->refcnt, 1);
1150 
1151 	ordernum = get_random_u32();
1152 	lastnum = ordernum & 0xFFFFF;
1153 retry:
1154 	ordernum = (ordernum + 1) & 0xFFFFF;
1155 	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1156 
1157 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1158 	unix_table_double_lock(net, old_hash, new_hash);
1159 
1160 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1161 		unix_table_double_unlock(net, old_hash, new_hash);
1162 
1163 		/* __unix_find_socket_byname() may take long time if many names
1164 		 * are already in use.
1165 		 */
1166 		cond_resched();
1167 
1168 		if (ordernum == lastnum) {
1169 			/* Give up if all names seems to be in use. */
1170 			err = -ENOSPC;
1171 			unix_release_addr(addr);
1172 			goto out;
1173 		}
1174 
1175 		goto retry;
1176 	}
1177 
1178 	__unix_set_addr_hash(net, sk, addr, new_hash);
1179 	unix_table_double_unlock(net, old_hash, new_hash);
1180 	err = 0;
1181 
1182 out:	mutex_unlock(&u->bindlock);
1183 	return err;
1184 }
1185 
1186 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1187 			 int addr_len)
1188 {
1189 	umode_t mode = S_IFSOCK |
1190 	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1191 	unsigned int new_hash, old_hash = sk->sk_hash;
1192 	struct unix_sock *u = unix_sk(sk);
1193 	struct net *net = sock_net(sk);
1194 	struct mnt_idmap *idmap;
1195 	struct unix_address *addr;
1196 	struct dentry *dentry;
1197 	struct path parent;
1198 	int err;
1199 
1200 	unix_mkname_bsd(sunaddr, addr_len);
1201 	addr_len = strlen(sunaddr->sun_path) +
1202 		offsetof(struct sockaddr_un, sun_path) + 1;
1203 
1204 	addr = unix_create_addr(sunaddr, addr_len);
1205 	if (!addr)
1206 		return -ENOMEM;
1207 
1208 	/*
1209 	 * Get the parent directory, calculate the hash for last
1210 	 * component.
1211 	 */
1212 	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1213 	if (IS_ERR(dentry)) {
1214 		err = PTR_ERR(dentry);
1215 		goto out;
1216 	}
1217 
1218 	/*
1219 	 * All right, let's create it.
1220 	 */
1221 	idmap = mnt_idmap(parent.mnt);
1222 	err = security_path_mknod(&parent, dentry, mode, 0);
1223 	if (!err)
1224 		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1225 	if (err)
1226 		goto out_path;
1227 	err = mutex_lock_interruptible(&u->bindlock);
1228 	if (err)
1229 		goto out_unlink;
1230 	if (u->addr)
1231 		goto out_unlock;
1232 
1233 	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1234 	unix_table_double_lock(net, old_hash, new_hash);
1235 	u->path.mnt = mntget(parent.mnt);
1236 	u->path.dentry = dget(dentry);
1237 	__unix_set_addr_hash(net, sk, addr, new_hash);
1238 	unix_table_double_unlock(net, old_hash, new_hash);
1239 	unix_insert_bsd_socket(sk);
1240 	mutex_unlock(&u->bindlock);
1241 	done_path_create(&parent, dentry);
1242 	return 0;
1243 
1244 out_unlock:
1245 	mutex_unlock(&u->bindlock);
1246 	err = -EINVAL;
1247 out_unlink:
1248 	/* failed after successful mknod?  unlink what we'd created... */
1249 	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1250 out_path:
1251 	done_path_create(&parent, dentry);
1252 out:
1253 	unix_release_addr(addr);
1254 	return err == -EEXIST ? -EADDRINUSE : err;
1255 }
1256 
1257 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1258 			      int addr_len)
1259 {
1260 	unsigned int new_hash, old_hash = sk->sk_hash;
1261 	struct unix_sock *u = unix_sk(sk);
1262 	struct net *net = sock_net(sk);
1263 	struct unix_address *addr;
1264 	int err;
1265 
1266 	addr = unix_create_addr(sunaddr, addr_len);
1267 	if (!addr)
1268 		return -ENOMEM;
1269 
1270 	err = mutex_lock_interruptible(&u->bindlock);
1271 	if (err)
1272 		goto out;
1273 
1274 	if (u->addr) {
1275 		err = -EINVAL;
1276 		goto out_mutex;
1277 	}
1278 
1279 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1280 	unix_table_double_lock(net, old_hash, new_hash);
1281 
1282 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1283 		goto out_spin;
1284 
1285 	__unix_set_addr_hash(net, sk, addr, new_hash);
1286 	unix_table_double_unlock(net, old_hash, new_hash);
1287 	mutex_unlock(&u->bindlock);
1288 	return 0;
1289 
1290 out_spin:
1291 	unix_table_double_unlock(net, old_hash, new_hash);
1292 	err = -EADDRINUSE;
1293 out_mutex:
1294 	mutex_unlock(&u->bindlock);
1295 out:
1296 	unix_release_addr(addr);
1297 	return err;
1298 }
1299 
1300 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1301 {
1302 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1303 	struct sock *sk = sock->sk;
1304 	int err;
1305 
1306 	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1307 	    sunaddr->sun_family == AF_UNIX)
1308 		return unix_autobind(sk);
1309 
1310 	err = unix_validate_addr(sunaddr, addr_len);
1311 	if (err)
1312 		return err;
1313 
1314 	if (sunaddr->sun_path[0])
1315 		err = unix_bind_bsd(sk, sunaddr, addr_len);
1316 	else
1317 		err = unix_bind_abstract(sk, sunaddr, addr_len);
1318 
1319 	return err;
1320 }
1321 
1322 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1323 {
1324 	if (unlikely(sk1 == sk2) || !sk2) {
1325 		unix_state_lock(sk1);
1326 		return;
1327 	}
1328 	if (sk1 < sk2) {
1329 		unix_state_lock(sk1);
1330 		unix_state_lock_nested(sk2);
1331 	} else {
1332 		unix_state_lock(sk2);
1333 		unix_state_lock_nested(sk1);
1334 	}
1335 }
1336 
1337 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1338 {
1339 	if (unlikely(sk1 == sk2) || !sk2) {
1340 		unix_state_unlock(sk1);
1341 		return;
1342 	}
1343 	unix_state_unlock(sk1);
1344 	unix_state_unlock(sk2);
1345 }
1346 
1347 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1348 			      int alen, int flags)
1349 {
1350 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1351 	struct sock *sk = sock->sk;
1352 	struct sock *other;
1353 	int err;
1354 
1355 	err = -EINVAL;
1356 	if (alen < offsetofend(struct sockaddr, sa_family))
1357 		goto out;
1358 
1359 	if (addr->sa_family != AF_UNSPEC) {
1360 		err = unix_validate_addr(sunaddr, alen);
1361 		if (err)
1362 			goto out;
1363 
1364 		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1365 		    !unix_sk(sk)->addr) {
1366 			err = unix_autobind(sk);
1367 			if (err)
1368 				goto out;
1369 		}
1370 
1371 restart:
1372 		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1373 		if (IS_ERR(other)) {
1374 			err = PTR_ERR(other);
1375 			goto out;
1376 		}
1377 
1378 		unix_state_double_lock(sk, other);
1379 
1380 		/* Apparently VFS overslept socket death. Retry. */
1381 		if (sock_flag(other, SOCK_DEAD)) {
1382 			unix_state_double_unlock(sk, other);
1383 			sock_put(other);
1384 			goto restart;
1385 		}
1386 
1387 		err = -EPERM;
1388 		if (!unix_may_send(sk, other))
1389 			goto out_unlock;
1390 
1391 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1392 		if (err)
1393 			goto out_unlock;
1394 
1395 		sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1396 	} else {
1397 		/*
1398 		 *	1003.1g breaking connected state with AF_UNSPEC
1399 		 */
1400 		other = NULL;
1401 		unix_state_double_lock(sk, other);
1402 	}
1403 
1404 	/*
1405 	 * If it was connected, reconnect.
1406 	 */
1407 	if (unix_peer(sk)) {
1408 		struct sock *old_peer = unix_peer(sk);
1409 
1410 		unix_peer(sk) = other;
1411 		if (!other)
1412 			sk->sk_state = TCP_CLOSE;
1413 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1414 
1415 		unix_state_double_unlock(sk, other);
1416 
1417 		if (other != old_peer)
1418 			unix_dgram_disconnected(sk, old_peer);
1419 		sock_put(old_peer);
1420 	} else {
1421 		unix_peer(sk) = other;
1422 		unix_state_double_unlock(sk, other);
1423 	}
1424 
1425 	return 0;
1426 
1427 out_unlock:
1428 	unix_state_double_unlock(sk, other);
1429 	sock_put(other);
1430 out:
1431 	return err;
1432 }
1433 
1434 static long unix_wait_for_peer(struct sock *other, long timeo)
1435 	__releases(&unix_sk(other)->lock)
1436 {
1437 	struct unix_sock *u = unix_sk(other);
1438 	int sched;
1439 	DEFINE_WAIT(wait);
1440 
1441 	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1442 
1443 	sched = !sock_flag(other, SOCK_DEAD) &&
1444 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1445 		unix_recvq_full_lockless(other);
1446 
1447 	unix_state_unlock(other);
1448 
1449 	if (sched)
1450 		timeo = schedule_timeout(timeo);
1451 
1452 	finish_wait(&u->peer_wait, &wait);
1453 	return timeo;
1454 }
1455 
1456 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1457 			       int addr_len, int flags)
1458 {
1459 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1460 	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1461 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1462 	struct net *net = sock_net(sk);
1463 	struct sk_buff *skb = NULL;
1464 	long timeo;
1465 	int err;
1466 	int st;
1467 
1468 	err = unix_validate_addr(sunaddr, addr_len);
1469 	if (err)
1470 		goto out;
1471 
1472 	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1473 		err = unix_autobind(sk);
1474 		if (err)
1475 			goto out;
1476 	}
1477 
1478 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1479 
1480 	/* First of all allocate resources.
1481 	   If we will make it after state is locked,
1482 	   we will have to recheck all again in any case.
1483 	 */
1484 
1485 	/* create new sock for complete connection */
1486 	newsk = unix_create1(net, NULL, 0, sock->type);
1487 	if (IS_ERR(newsk)) {
1488 		err = PTR_ERR(newsk);
1489 		newsk = NULL;
1490 		goto out;
1491 	}
1492 
1493 	err = -ENOMEM;
1494 
1495 	/* Allocate skb for sending to listening sock */
1496 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1497 	if (skb == NULL)
1498 		goto out;
1499 
1500 restart:
1501 	/*  Find listening sock. */
1502 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1503 	if (IS_ERR(other)) {
1504 		err = PTR_ERR(other);
1505 		other = NULL;
1506 		goto out;
1507 	}
1508 
1509 	/* Latch state of peer */
1510 	unix_state_lock(other);
1511 
1512 	/* Apparently VFS overslept socket death. Retry. */
1513 	if (sock_flag(other, SOCK_DEAD)) {
1514 		unix_state_unlock(other);
1515 		sock_put(other);
1516 		goto restart;
1517 	}
1518 
1519 	err = -ECONNREFUSED;
1520 	if (other->sk_state != TCP_LISTEN)
1521 		goto out_unlock;
1522 	if (other->sk_shutdown & RCV_SHUTDOWN)
1523 		goto out_unlock;
1524 
1525 	if (unix_recvq_full(other)) {
1526 		err = -EAGAIN;
1527 		if (!timeo)
1528 			goto out_unlock;
1529 
1530 		timeo = unix_wait_for_peer(other, timeo);
1531 
1532 		err = sock_intr_errno(timeo);
1533 		if (signal_pending(current))
1534 			goto out;
1535 		sock_put(other);
1536 		goto restart;
1537 	}
1538 
1539 	/* Latch our state.
1540 
1541 	   It is tricky place. We need to grab our state lock and cannot
1542 	   drop lock on peer. It is dangerous because deadlock is
1543 	   possible. Connect to self case and simultaneous
1544 	   attempt to connect are eliminated by checking socket
1545 	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1546 	   check this before attempt to grab lock.
1547 
1548 	   Well, and we have to recheck the state after socket locked.
1549 	 */
1550 	st = sk->sk_state;
1551 
1552 	switch (st) {
1553 	case TCP_CLOSE:
1554 		/* This is ok... continue with connect */
1555 		break;
1556 	case TCP_ESTABLISHED:
1557 		/* Socket is already connected */
1558 		err = -EISCONN;
1559 		goto out_unlock;
1560 	default:
1561 		err = -EINVAL;
1562 		goto out_unlock;
1563 	}
1564 
1565 	unix_state_lock_nested(sk);
1566 
1567 	if (sk->sk_state != st) {
1568 		unix_state_unlock(sk);
1569 		unix_state_unlock(other);
1570 		sock_put(other);
1571 		goto restart;
1572 	}
1573 
1574 	err = security_unix_stream_connect(sk, other, newsk);
1575 	if (err) {
1576 		unix_state_unlock(sk);
1577 		goto out_unlock;
1578 	}
1579 
1580 	/* The way is open! Fastly set all the necessary fields... */
1581 
1582 	sock_hold(sk);
1583 	unix_peer(newsk)	= sk;
1584 	newsk->sk_state		= TCP_ESTABLISHED;
1585 	newsk->sk_type		= sk->sk_type;
1586 	init_peercred(newsk);
1587 	newu = unix_sk(newsk);
1588 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1589 	otheru = unix_sk(other);
1590 
1591 	/* copy address information from listening to new sock
1592 	 *
1593 	 * The contents of *(otheru->addr) and otheru->path
1594 	 * are seen fully set up here, since we have found
1595 	 * otheru in hash under its lock.  Insertion into the
1596 	 * hash chain we'd found it in had been done in an
1597 	 * earlier critical area protected by the chain's lock,
1598 	 * the same one where we'd set *(otheru->addr) contents,
1599 	 * as well as otheru->path and otheru->addr itself.
1600 	 *
1601 	 * Using smp_store_release() here to set newu->addr
1602 	 * is enough to make those stores, as well as stores
1603 	 * to newu->path visible to anyone who gets newu->addr
1604 	 * by smp_load_acquire().  IOW, the same warranties
1605 	 * as for unix_sock instances bound in unix_bind() or
1606 	 * in unix_autobind().
1607 	 */
1608 	if (otheru->path.dentry) {
1609 		path_get(&otheru->path);
1610 		newu->path = otheru->path;
1611 	}
1612 	refcount_inc(&otheru->addr->refcnt);
1613 	smp_store_release(&newu->addr, otheru->addr);
1614 
1615 	/* Set credentials */
1616 	copy_peercred(sk, other);
1617 
1618 	sock->state	= SS_CONNECTED;
1619 	sk->sk_state	= TCP_ESTABLISHED;
1620 	sock_hold(newsk);
1621 
1622 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1623 	unix_peer(sk)	= newsk;
1624 
1625 	unix_state_unlock(sk);
1626 
1627 	/* take ten and send info to listening sock */
1628 	spin_lock(&other->sk_receive_queue.lock);
1629 	__skb_queue_tail(&other->sk_receive_queue, skb);
1630 	spin_unlock(&other->sk_receive_queue.lock);
1631 	unix_state_unlock(other);
1632 	other->sk_data_ready(other);
1633 	sock_put(other);
1634 	return 0;
1635 
1636 out_unlock:
1637 	if (other)
1638 		unix_state_unlock(other);
1639 
1640 out:
1641 	kfree_skb(skb);
1642 	if (newsk)
1643 		unix_release_sock(newsk, 0);
1644 	if (other)
1645 		sock_put(other);
1646 	return err;
1647 }
1648 
1649 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1650 {
1651 	struct sock *ska = socka->sk, *skb = sockb->sk;
1652 
1653 	/* Join our sockets back to back */
1654 	sock_hold(ska);
1655 	sock_hold(skb);
1656 	unix_peer(ska) = skb;
1657 	unix_peer(skb) = ska;
1658 	init_peercred(ska);
1659 	init_peercred(skb);
1660 
1661 	ska->sk_state = TCP_ESTABLISHED;
1662 	skb->sk_state = TCP_ESTABLISHED;
1663 	socka->state  = SS_CONNECTED;
1664 	sockb->state  = SS_CONNECTED;
1665 	return 0;
1666 }
1667 
1668 static void unix_sock_inherit_flags(const struct socket *old,
1669 				    struct socket *new)
1670 {
1671 	if (test_bit(SOCK_PASSCRED, &old->flags))
1672 		set_bit(SOCK_PASSCRED, &new->flags);
1673 	if (test_bit(SOCK_PASSSEC, &old->flags))
1674 		set_bit(SOCK_PASSSEC, &new->flags);
1675 }
1676 
1677 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1678 		       bool kern)
1679 {
1680 	struct sock *sk = sock->sk;
1681 	struct sock *tsk;
1682 	struct sk_buff *skb;
1683 	int err;
1684 
1685 	err = -EOPNOTSUPP;
1686 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1687 		goto out;
1688 
1689 	err = -EINVAL;
1690 	if (sk->sk_state != TCP_LISTEN)
1691 		goto out;
1692 
1693 	/* If socket state is TCP_LISTEN it cannot change (for now...),
1694 	 * so that no locks are necessary.
1695 	 */
1696 
1697 	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1698 				&err);
1699 	if (!skb) {
1700 		/* This means receive shutdown. */
1701 		if (err == 0)
1702 			err = -EINVAL;
1703 		goto out;
1704 	}
1705 
1706 	tsk = skb->sk;
1707 	skb_free_datagram(sk, skb);
1708 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1709 
1710 	/* attach accepted sock to socket */
1711 	unix_state_lock(tsk);
1712 	newsock->state = SS_CONNECTED;
1713 	unix_sock_inherit_flags(sock, newsock);
1714 	sock_graft(tsk, newsock);
1715 	unix_state_unlock(tsk);
1716 	return 0;
1717 
1718 out:
1719 	return err;
1720 }
1721 
1722 
1723 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1724 {
1725 	struct sock *sk = sock->sk;
1726 	struct unix_address *addr;
1727 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1728 	int err = 0;
1729 
1730 	if (peer) {
1731 		sk = unix_peer_get(sk);
1732 
1733 		err = -ENOTCONN;
1734 		if (!sk)
1735 			goto out;
1736 		err = 0;
1737 	} else {
1738 		sock_hold(sk);
1739 	}
1740 
1741 	addr = smp_load_acquire(&unix_sk(sk)->addr);
1742 	if (!addr) {
1743 		sunaddr->sun_family = AF_UNIX;
1744 		sunaddr->sun_path[0] = 0;
1745 		err = offsetof(struct sockaddr_un, sun_path);
1746 	} else {
1747 		err = addr->len;
1748 		memcpy(sunaddr, addr->name, addr->len);
1749 	}
1750 	sock_put(sk);
1751 out:
1752 	return err;
1753 }
1754 
1755 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1756 {
1757 	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1758 
1759 	/*
1760 	 * Garbage collection of unix sockets starts by selecting a set of
1761 	 * candidate sockets which have reference only from being in flight
1762 	 * (total_refs == inflight_refs).  This condition is checked once during
1763 	 * the candidate collection phase, and candidates are marked as such, so
1764 	 * that non-candidates can later be ignored.  While inflight_refs is
1765 	 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1766 	 * is an instantaneous decision.
1767 	 *
1768 	 * Once a candidate, however, the socket must not be reinstalled into a
1769 	 * file descriptor while the garbage collection is in progress.
1770 	 *
1771 	 * If the above conditions are met, then the directed graph of
1772 	 * candidates (*) does not change while unix_gc_lock is held.
1773 	 *
1774 	 * Any operations that changes the file count through file descriptors
1775 	 * (dup, close, sendmsg) does not change the graph since candidates are
1776 	 * not installed in fds.
1777 	 *
1778 	 * Dequeing a candidate via recvmsg would install it into an fd, but
1779 	 * that takes unix_gc_lock to decrement the inflight count, so it's
1780 	 * serialized with garbage collection.
1781 	 *
1782 	 * MSG_PEEK is special in that it does not change the inflight count,
1783 	 * yet does install the socket into an fd.  The following lock/unlock
1784 	 * pair is to ensure serialization with garbage collection.  It must be
1785 	 * done between incrementing the file count and installing the file into
1786 	 * an fd.
1787 	 *
1788 	 * If garbage collection starts after the barrier provided by the
1789 	 * lock/unlock, then it will see the elevated refcount and not mark this
1790 	 * as a candidate.  If a garbage collection is already in progress
1791 	 * before the file count was incremented, then the lock/unlock pair will
1792 	 * ensure that garbage collection is finished before progressing to
1793 	 * installing the fd.
1794 	 *
1795 	 * (*) A -> B where B is on the queue of A or B is on the queue of C
1796 	 * which is on the queue of listening socket A.
1797 	 */
1798 	spin_lock(&unix_gc_lock);
1799 	spin_unlock(&unix_gc_lock);
1800 }
1801 
1802 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1803 {
1804 	int err = 0;
1805 
1806 	UNIXCB(skb).pid  = get_pid(scm->pid);
1807 	UNIXCB(skb).uid = scm->creds.uid;
1808 	UNIXCB(skb).gid = scm->creds.gid;
1809 	UNIXCB(skb).fp = NULL;
1810 	unix_get_secdata(scm, skb);
1811 	if (scm->fp && send_fds)
1812 		err = unix_attach_fds(scm, skb);
1813 
1814 	skb->destructor = unix_destruct_scm;
1815 	return err;
1816 }
1817 
1818 static bool unix_passcred_enabled(const struct socket *sock,
1819 				  const struct sock *other)
1820 {
1821 	return test_bit(SOCK_PASSCRED, &sock->flags) ||
1822 	       !other->sk_socket ||
1823 	       test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1824 }
1825 
1826 /*
1827  * Some apps rely on write() giving SCM_CREDENTIALS
1828  * We include credentials if source or destination socket
1829  * asserted SOCK_PASSCRED.
1830  */
1831 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1832 			    const struct sock *other)
1833 {
1834 	if (UNIXCB(skb).pid)
1835 		return;
1836 	if (unix_passcred_enabled(sock, other)) {
1837 		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1838 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1839 	}
1840 }
1841 
1842 static bool unix_skb_scm_eq(struct sk_buff *skb,
1843 			    struct scm_cookie *scm)
1844 {
1845 	return UNIXCB(skb).pid == scm->pid &&
1846 	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1847 	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1848 	       unix_secdata_eq(scm, skb);
1849 }
1850 
1851 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1852 {
1853 	struct scm_fp_list *fp = UNIXCB(skb).fp;
1854 	struct unix_sock *u = unix_sk(sk);
1855 
1856 	if (unlikely(fp && fp->count))
1857 		atomic_add(fp->count, &u->scm_stat.nr_fds);
1858 }
1859 
1860 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1861 {
1862 	struct scm_fp_list *fp = UNIXCB(skb).fp;
1863 	struct unix_sock *u = unix_sk(sk);
1864 
1865 	if (unlikely(fp && fp->count))
1866 		atomic_sub(fp->count, &u->scm_stat.nr_fds);
1867 }
1868 
1869 /*
1870  *	Send AF_UNIX data.
1871  */
1872 
1873 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1874 			      size_t len)
1875 {
1876 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1877 	struct sock *sk = sock->sk, *other = NULL;
1878 	struct unix_sock *u = unix_sk(sk);
1879 	struct scm_cookie scm;
1880 	struct sk_buff *skb;
1881 	int data_len = 0;
1882 	int sk_locked;
1883 	long timeo;
1884 	int err;
1885 
1886 	wait_for_unix_gc();
1887 	err = scm_send(sock, msg, &scm, false);
1888 	if (err < 0)
1889 		return err;
1890 
1891 	err = -EOPNOTSUPP;
1892 	if (msg->msg_flags&MSG_OOB)
1893 		goto out;
1894 
1895 	if (msg->msg_namelen) {
1896 		err = unix_validate_addr(sunaddr, msg->msg_namelen);
1897 		if (err)
1898 			goto out;
1899 	} else {
1900 		sunaddr = NULL;
1901 		err = -ENOTCONN;
1902 		other = unix_peer_get(sk);
1903 		if (!other)
1904 			goto out;
1905 	}
1906 
1907 	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1908 		err = unix_autobind(sk);
1909 		if (err)
1910 			goto out;
1911 	}
1912 
1913 	err = -EMSGSIZE;
1914 	if (len > sk->sk_sndbuf - 32)
1915 		goto out;
1916 
1917 	if (len > SKB_MAX_ALLOC) {
1918 		data_len = min_t(size_t,
1919 				 len - SKB_MAX_ALLOC,
1920 				 MAX_SKB_FRAGS * PAGE_SIZE);
1921 		data_len = PAGE_ALIGN(data_len);
1922 
1923 		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1924 	}
1925 
1926 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1927 				   msg->msg_flags & MSG_DONTWAIT, &err,
1928 				   PAGE_ALLOC_COSTLY_ORDER);
1929 	if (skb == NULL)
1930 		goto out;
1931 
1932 	err = unix_scm_to_skb(&scm, skb, true);
1933 	if (err < 0)
1934 		goto out_free;
1935 
1936 	skb_put(skb, len - data_len);
1937 	skb->data_len = data_len;
1938 	skb->len = len;
1939 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1940 	if (err)
1941 		goto out_free;
1942 
1943 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1944 
1945 restart:
1946 	if (!other) {
1947 		err = -ECONNRESET;
1948 		if (sunaddr == NULL)
1949 			goto out_free;
1950 
1951 		other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1952 					sk->sk_type);
1953 		if (IS_ERR(other)) {
1954 			err = PTR_ERR(other);
1955 			other = NULL;
1956 			goto out_free;
1957 		}
1958 	}
1959 
1960 	if (sk_filter(other, skb) < 0) {
1961 		/* Toss the packet but do not return any error to the sender */
1962 		err = len;
1963 		goto out_free;
1964 	}
1965 
1966 	sk_locked = 0;
1967 	unix_state_lock(other);
1968 restart_locked:
1969 	err = -EPERM;
1970 	if (!unix_may_send(sk, other))
1971 		goto out_unlock;
1972 
1973 	if (unlikely(sock_flag(other, SOCK_DEAD))) {
1974 		/*
1975 		 *	Check with 1003.1g - what should
1976 		 *	datagram error
1977 		 */
1978 		unix_state_unlock(other);
1979 		sock_put(other);
1980 
1981 		if (!sk_locked)
1982 			unix_state_lock(sk);
1983 
1984 		err = 0;
1985 		if (sk->sk_type == SOCK_SEQPACKET) {
1986 			/* We are here only when racing with unix_release_sock()
1987 			 * is clearing @other. Never change state to TCP_CLOSE
1988 			 * unlike SOCK_DGRAM wants.
1989 			 */
1990 			unix_state_unlock(sk);
1991 			err = -EPIPE;
1992 		} else if (unix_peer(sk) == other) {
1993 			unix_peer(sk) = NULL;
1994 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1995 
1996 			sk->sk_state = TCP_CLOSE;
1997 			unix_state_unlock(sk);
1998 
1999 			unix_dgram_disconnected(sk, other);
2000 			sock_put(other);
2001 			err = -ECONNREFUSED;
2002 		} else {
2003 			unix_state_unlock(sk);
2004 		}
2005 
2006 		other = NULL;
2007 		if (err)
2008 			goto out_free;
2009 		goto restart;
2010 	}
2011 
2012 	err = -EPIPE;
2013 	if (other->sk_shutdown & RCV_SHUTDOWN)
2014 		goto out_unlock;
2015 
2016 	if (sk->sk_type != SOCK_SEQPACKET) {
2017 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2018 		if (err)
2019 			goto out_unlock;
2020 	}
2021 
2022 	/* other == sk && unix_peer(other) != sk if
2023 	 * - unix_peer(sk) == NULL, destination address bound to sk
2024 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2025 	 */
2026 	if (other != sk &&
2027 	    unlikely(unix_peer(other) != sk &&
2028 	    unix_recvq_full_lockless(other))) {
2029 		if (timeo) {
2030 			timeo = unix_wait_for_peer(other, timeo);
2031 
2032 			err = sock_intr_errno(timeo);
2033 			if (signal_pending(current))
2034 				goto out_free;
2035 
2036 			goto restart;
2037 		}
2038 
2039 		if (!sk_locked) {
2040 			unix_state_unlock(other);
2041 			unix_state_double_lock(sk, other);
2042 		}
2043 
2044 		if (unix_peer(sk) != other ||
2045 		    unix_dgram_peer_wake_me(sk, other)) {
2046 			err = -EAGAIN;
2047 			sk_locked = 1;
2048 			goto out_unlock;
2049 		}
2050 
2051 		if (!sk_locked) {
2052 			sk_locked = 1;
2053 			goto restart_locked;
2054 		}
2055 	}
2056 
2057 	if (unlikely(sk_locked))
2058 		unix_state_unlock(sk);
2059 
2060 	if (sock_flag(other, SOCK_RCVTSTAMP))
2061 		__net_timestamp(skb);
2062 	maybe_add_creds(skb, sock, other);
2063 	scm_stat_add(other, skb);
2064 	skb_queue_tail(&other->sk_receive_queue, skb);
2065 	unix_state_unlock(other);
2066 	other->sk_data_ready(other);
2067 	sock_put(other);
2068 	scm_destroy(&scm);
2069 	return len;
2070 
2071 out_unlock:
2072 	if (sk_locked)
2073 		unix_state_unlock(sk);
2074 	unix_state_unlock(other);
2075 out_free:
2076 	kfree_skb(skb);
2077 out:
2078 	if (other)
2079 		sock_put(other);
2080 	scm_destroy(&scm);
2081 	return err;
2082 }
2083 
2084 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2085  * bytes, and a minimum of a full page.
2086  */
2087 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2088 
2089 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2090 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2091 		     struct scm_cookie *scm, bool fds_sent)
2092 {
2093 	struct unix_sock *ousk = unix_sk(other);
2094 	struct sk_buff *skb;
2095 	int err = 0;
2096 
2097 	skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2098 
2099 	if (!skb)
2100 		return err;
2101 
2102 	err = unix_scm_to_skb(scm, skb, !fds_sent);
2103 	if (err < 0) {
2104 		kfree_skb(skb);
2105 		return err;
2106 	}
2107 	skb_put(skb, 1);
2108 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2109 
2110 	if (err) {
2111 		kfree_skb(skb);
2112 		return err;
2113 	}
2114 
2115 	unix_state_lock(other);
2116 
2117 	if (sock_flag(other, SOCK_DEAD) ||
2118 	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2119 		unix_state_unlock(other);
2120 		kfree_skb(skb);
2121 		return -EPIPE;
2122 	}
2123 
2124 	maybe_add_creds(skb, sock, other);
2125 	skb_get(skb);
2126 
2127 	if (ousk->oob_skb)
2128 		consume_skb(ousk->oob_skb);
2129 
2130 	WRITE_ONCE(ousk->oob_skb, skb);
2131 
2132 	scm_stat_add(other, skb);
2133 	skb_queue_tail(&other->sk_receive_queue, skb);
2134 	sk_send_sigurg(other);
2135 	unix_state_unlock(other);
2136 	other->sk_data_ready(other);
2137 
2138 	return err;
2139 }
2140 #endif
2141 
2142 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2143 			       size_t len)
2144 {
2145 	struct sock *sk = sock->sk;
2146 	struct sock *other = NULL;
2147 	int err, size;
2148 	struct sk_buff *skb;
2149 	int sent = 0;
2150 	struct scm_cookie scm;
2151 	bool fds_sent = false;
2152 	int data_len;
2153 
2154 	wait_for_unix_gc();
2155 	err = scm_send(sock, msg, &scm, false);
2156 	if (err < 0)
2157 		return err;
2158 
2159 	err = -EOPNOTSUPP;
2160 	if (msg->msg_flags & MSG_OOB) {
2161 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2162 		if (len)
2163 			len--;
2164 		else
2165 #endif
2166 			goto out_err;
2167 	}
2168 
2169 	if (msg->msg_namelen) {
2170 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2171 		goto out_err;
2172 	} else {
2173 		err = -ENOTCONN;
2174 		other = unix_peer(sk);
2175 		if (!other)
2176 			goto out_err;
2177 	}
2178 
2179 	if (sk->sk_shutdown & SEND_SHUTDOWN)
2180 		goto pipe_err;
2181 
2182 	while (sent < len) {
2183 		size = len - sent;
2184 
2185 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2186 			skb = sock_alloc_send_pskb(sk, 0, 0,
2187 						   msg->msg_flags & MSG_DONTWAIT,
2188 						   &err, 0);
2189 		} else {
2190 			/* Keep two messages in the pipe so it schedules better */
2191 			size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2192 
2193 			/* allow fallback to order-0 allocations */
2194 			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2195 
2196 			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2197 
2198 			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2199 
2200 			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2201 						   msg->msg_flags & MSG_DONTWAIT, &err,
2202 						   get_order(UNIX_SKB_FRAGS_SZ));
2203 		}
2204 		if (!skb)
2205 			goto out_err;
2206 
2207 		/* Only send the fds in the first buffer */
2208 		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2209 		if (err < 0) {
2210 			kfree_skb(skb);
2211 			goto out_err;
2212 		}
2213 		fds_sent = true;
2214 
2215 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2216 			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2217 						   sk->sk_allocation);
2218 			if (err < 0) {
2219 				kfree_skb(skb);
2220 				goto out_err;
2221 			}
2222 			size = err;
2223 			refcount_add(size, &sk->sk_wmem_alloc);
2224 		} else {
2225 			skb_put(skb, size - data_len);
2226 			skb->data_len = data_len;
2227 			skb->len = size;
2228 			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2229 			if (err) {
2230 				kfree_skb(skb);
2231 				goto out_err;
2232 			}
2233 		}
2234 
2235 		unix_state_lock(other);
2236 
2237 		if (sock_flag(other, SOCK_DEAD) ||
2238 		    (other->sk_shutdown & RCV_SHUTDOWN))
2239 			goto pipe_err_free;
2240 
2241 		maybe_add_creds(skb, sock, other);
2242 		scm_stat_add(other, skb);
2243 		skb_queue_tail(&other->sk_receive_queue, skb);
2244 		unix_state_unlock(other);
2245 		other->sk_data_ready(other);
2246 		sent += size;
2247 	}
2248 
2249 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2250 	if (msg->msg_flags & MSG_OOB) {
2251 		err = queue_oob(sock, msg, other, &scm, fds_sent);
2252 		if (err)
2253 			goto out_err;
2254 		sent++;
2255 	}
2256 #endif
2257 
2258 	scm_destroy(&scm);
2259 
2260 	return sent;
2261 
2262 pipe_err_free:
2263 	unix_state_unlock(other);
2264 	kfree_skb(skb);
2265 pipe_err:
2266 	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2267 		send_sig(SIGPIPE, current, 0);
2268 	err = -EPIPE;
2269 out_err:
2270 	scm_destroy(&scm);
2271 	return sent ? : err;
2272 }
2273 
2274 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
2275 				    int offset, size_t size, int flags)
2276 {
2277 	struct bio_vec bvec;
2278 	struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES };
2279 
2280 	if (flags & MSG_SENDPAGE_NOTLAST)
2281 		msg.msg_flags |= MSG_MORE;
2282 
2283 	bvec_set_page(&bvec, page, size, offset);
2284 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
2285 	return unix_stream_sendmsg(socket, &msg, size);
2286 }
2287 
2288 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2289 				  size_t len)
2290 {
2291 	int err;
2292 	struct sock *sk = sock->sk;
2293 
2294 	err = sock_error(sk);
2295 	if (err)
2296 		return err;
2297 
2298 	if (sk->sk_state != TCP_ESTABLISHED)
2299 		return -ENOTCONN;
2300 
2301 	if (msg->msg_namelen)
2302 		msg->msg_namelen = 0;
2303 
2304 	return unix_dgram_sendmsg(sock, msg, len);
2305 }
2306 
2307 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2308 				  size_t size, int flags)
2309 {
2310 	struct sock *sk = sock->sk;
2311 
2312 	if (sk->sk_state != TCP_ESTABLISHED)
2313 		return -ENOTCONN;
2314 
2315 	return unix_dgram_recvmsg(sock, msg, size, flags);
2316 }
2317 
2318 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2319 {
2320 	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2321 
2322 	if (addr) {
2323 		msg->msg_namelen = addr->len;
2324 		memcpy(msg->msg_name, addr->name, addr->len);
2325 	}
2326 }
2327 
2328 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2329 			 int flags)
2330 {
2331 	struct scm_cookie scm;
2332 	struct socket *sock = sk->sk_socket;
2333 	struct unix_sock *u = unix_sk(sk);
2334 	struct sk_buff *skb, *last;
2335 	long timeo;
2336 	int skip;
2337 	int err;
2338 
2339 	err = -EOPNOTSUPP;
2340 	if (flags&MSG_OOB)
2341 		goto out;
2342 
2343 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2344 
2345 	do {
2346 		mutex_lock(&u->iolock);
2347 
2348 		skip = sk_peek_offset(sk, flags);
2349 		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2350 					      &skip, &err, &last);
2351 		if (skb) {
2352 			if (!(flags & MSG_PEEK))
2353 				scm_stat_del(sk, skb);
2354 			break;
2355 		}
2356 
2357 		mutex_unlock(&u->iolock);
2358 
2359 		if (err != -EAGAIN)
2360 			break;
2361 	} while (timeo &&
2362 		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2363 					      &err, &timeo, last));
2364 
2365 	if (!skb) { /* implies iolock unlocked */
2366 		unix_state_lock(sk);
2367 		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2368 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2369 		    (sk->sk_shutdown & RCV_SHUTDOWN))
2370 			err = 0;
2371 		unix_state_unlock(sk);
2372 		goto out;
2373 	}
2374 
2375 	if (wq_has_sleeper(&u->peer_wait))
2376 		wake_up_interruptible_sync_poll(&u->peer_wait,
2377 						EPOLLOUT | EPOLLWRNORM |
2378 						EPOLLWRBAND);
2379 
2380 	if (msg->msg_name)
2381 		unix_copy_addr(msg, skb->sk);
2382 
2383 	if (size > skb->len - skip)
2384 		size = skb->len - skip;
2385 	else if (size < skb->len - skip)
2386 		msg->msg_flags |= MSG_TRUNC;
2387 
2388 	err = skb_copy_datagram_msg(skb, skip, msg, size);
2389 	if (err)
2390 		goto out_free;
2391 
2392 	if (sock_flag(sk, SOCK_RCVTSTAMP))
2393 		__sock_recv_timestamp(msg, sk, skb);
2394 
2395 	memset(&scm, 0, sizeof(scm));
2396 
2397 	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2398 	unix_set_secdata(&scm, skb);
2399 
2400 	if (!(flags & MSG_PEEK)) {
2401 		if (UNIXCB(skb).fp)
2402 			unix_detach_fds(&scm, skb);
2403 
2404 		sk_peek_offset_bwd(sk, skb->len);
2405 	} else {
2406 		/* It is questionable: on PEEK we could:
2407 		   - do not return fds - good, but too simple 8)
2408 		   - return fds, and do not return them on read (old strategy,
2409 		     apparently wrong)
2410 		   - clone fds (I chose it for now, it is the most universal
2411 		     solution)
2412 
2413 		   POSIX 1003.1g does not actually define this clearly
2414 		   at all. POSIX 1003.1g doesn't define a lot of things
2415 		   clearly however!
2416 
2417 		*/
2418 
2419 		sk_peek_offset_fwd(sk, size);
2420 
2421 		if (UNIXCB(skb).fp)
2422 			unix_peek_fds(&scm, skb);
2423 	}
2424 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2425 
2426 	scm_recv(sock, msg, &scm, flags);
2427 
2428 out_free:
2429 	skb_free_datagram(sk, skb);
2430 	mutex_unlock(&u->iolock);
2431 out:
2432 	return err;
2433 }
2434 
2435 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2436 			      int flags)
2437 {
2438 	struct sock *sk = sock->sk;
2439 
2440 #ifdef CONFIG_BPF_SYSCALL
2441 	const struct proto *prot = READ_ONCE(sk->sk_prot);
2442 
2443 	if (prot != &unix_dgram_proto)
2444 		return prot->recvmsg(sk, msg, size, flags, NULL);
2445 #endif
2446 	return __unix_dgram_recvmsg(sk, msg, size, flags);
2447 }
2448 
2449 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2450 {
2451 	struct unix_sock *u = unix_sk(sk);
2452 	struct sk_buff *skb;
2453 	int err;
2454 
2455 	mutex_lock(&u->iolock);
2456 	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2457 	mutex_unlock(&u->iolock);
2458 	if (!skb)
2459 		return err;
2460 
2461 	return recv_actor(sk, skb);
2462 }
2463 
2464 /*
2465  *	Sleep until more data has arrived. But check for races..
2466  */
2467 static long unix_stream_data_wait(struct sock *sk, long timeo,
2468 				  struct sk_buff *last, unsigned int last_len,
2469 				  bool freezable)
2470 {
2471 	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2472 	struct sk_buff *tail;
2473 	DEFINE_WAIT(wait);
2474 
2475 	unix_state_lock(sk);
2476 
2477 	for (;;) {
2478 		prepare_to_wait(sk_sleep(sk), &wait, state);
2479 
2480 		tail = skb_peek_tail(&sk->sk_receive_queue);
2481 		if (tail != last ||
2482 		    (tail && tail->len != last_len) ||
2483 		    sk->sk_err ||
2484 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2485 		    signal_pending(current) ||
2486 		    !timeo)
2487 			break;
2488 
2489 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2490 		unix_state_unlock(sk);
2491 		timeo = schedule_timeout(timeo);
2492 		unix_state_lock(sk);
2493 
2494 		if (sock_flag(sk, SOCK_DEAD))
2495 			break;
2496 
2497 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2498 	}
2499 
2500 	finish_wait(sk_sleep(sk), &wait);
2501 	unix_state_unlock(sk);
2502 	return timeo;
2503 }
2504 
2505 static unsigned int unix_skb_len(const struct sk_buff *skb)
2506 {
2507 	return skb->len - UNIXCB(skb).consumed;
2508 }
2509 
2510 struct unix_stream_read_state {
2511 	int (*recv_actor)(struct sk_buff *, int, int,
2512 			  struct unix_stream_read_state *);
2513 	struct socket *socket;
2514 	struct msghdr *msg;
2515 	struct pipe_inode_info *pipe;
2516 	size_t size;
2517 	int flags;
2518 	unsigned int splice_flags;
2519 };
2520 
2521 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2522 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2523 {
2524 	struct socket *sock = state->socket;
2525 	struct sock *sk = sock->sk;
2526 	struct unix_sock *u = unix_sk(sk);
2527 	int chunk = 1;
2528 	struct sk_buff *oob_skb;
2529 
2530 	mutex_lock(&u->iolock);
2531 	unix_state_lock(sk);
2532 
2533 	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2534 		unix_state_unlock(sk);
2535 		mutex_unlock(&u->iolock);
2536 		return -EINVAL;
2537 	}
2538 
2539 	oob_skb = u->oob_skb;
2540 
2541 	if (!(state->flags & MSG_PEEK))
2542 		WRITE_ONCE(u->oob_skb, NULL);
2543 
2544 	unix_state_unlock(sk);
2545 
2546 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2547 
2548 	if (!(state->flags & MSG_PEEK)) {
2549 		UNIXCB(oob_skb).consumed += 1;
2550 		kfree_skb(oob_skb);
2551 	}
2552 
2553 	mutex_unlock(&u->iolock);
2554 
2555 	if (chunk < 0)
2556 		return -EFAULT;
2557 
2558 	state->msg->msg_flags |= MSG_OOB;
2559 	return 1;
2560 }
2561 
2562 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2563 				  int flags, int copied)
2564 {
2565 	struct unix_sock *u = unix_sk(sk);
2566 
2567 	if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2568 		skb_unlink(skb, &sk->sk_receive_queue);
2569 		consume_skb(skb);
2570 		skb = NULL;
2571 	} else {
2572 		if (skb == u->oob_skb) {
2573 			if (copied) {
2574 				skb = NULL;
2575 			} else if (sock_flag(sk, SOCK_URGINLINE)) {
2576 				if (!(flags & MSG_PEEK)) {
2577 					WRITE_ONCE(u->oob_skb, NULL);
2578 					consume_skb(skb);
2579 				}
2580 			} else if (!(flags & MSG_PEEK)) {
2581 				skb_unlink(skb, &sk->sk_receive_queue);
2582 				consume_skb(skb);
2583 				skb = skb_peek(&sk->sk_receive_queue);
2584 			}
2585 		}
2586 	}
2587 	return skb;
2588 }
2589 #endif
2590 
2591 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2592 {
2593 	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2594 		return -ENOTCONN;
2595 
2596 	return unix_read_skb(sk, recv_actor);
2597 }
2598 
2599 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2600 				    bool freezable)
2601 {
2602 	struct scm_cookie scm;
2603 	struct socket *sock = state->socket;
2604 	struct sock *sk = sock->sk;
2605 	struct unix_sock *u = unix_sk(sk);
2606 	int copied = 0;
2607 	int flags = state->flags;
2608 	int noblock = flags & MSG_DONTWAIT;
2609 	bool check_creds = false;
2610 	int target;
2611 	int err = 0;
2612 	long timeo;
2613 	int skip;
2614 	size_t size = state->size;
2615 	unsigned int last_len;
2616 
2617 	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2618 		err = -EINVAL;
2619 		goto out;
2620 	}
2621 
2622 	if (unlikely(flags & MSG_OOB)) {
2623 		err = -EOPNOTSUPP;
2624 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2625 		err = unix_stream_recv_urg(state);
2626 #endif
2627 		goto out;
2628 	}
2629 
2630 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2631 	timeo = sock_rcvtimeo(sk, noblock);
2632 
2633 	memset(&scm, 0, sizeof(scm));
2634 
2635 	/* Lock the socket to prevent queue disordering
2636 	 * while sleeps in memcpy_tomsg
2637 	 */
2638 	mutex_lock(&u->iolock);
2639 
2640 	skip = max(sk_peek_offset(sk, flags), 0);
2641 
2642 	do {
2643 		int chunk;
2644 		bool drop_skb;
2645 		struct sk_buff *skb, *last;
2646 
2647 redo:
2648 		unix_state_lock(sk);
2649 		if (sock_flag(sk, SOCK_DEAD)) {
2650 			err = -ECONNRESET;
2651 			goto unlock;
2652 		}
2653 		last = skb = skb_peek(&sk->sk_receive_queue);
2654 		last_len = last ? last->len : 0;
2655 
2656 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2657 		if (skb) {
2658 			skb = manage_oob(skb, sk, flags, copied);
2659 			if (!skb) {
2660 				unix_state_unlock(sk);
2661 				if (copied)
2662 					break;
2663 				goto redo;
2664 			}
2665 		}
2666 #endif
2667 again:
2668 		if (skb == NULL) {
2669 			if (copied >= target)
2670 				goto unlock;
2671 
2672 			/*
2673 			 *	POSIX 1003.1g mandates this order.
2674 			 */
2675 
2676 			err = sock_error(sk);
2677 			if (err)
2678 				goto unlock;
2679 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2680 				goto unlock;
2681 
2682 			unix_state_unlock(sk);
2683 			if (!timeo) {
2684 				err = -EAGAIN;
2685 				break;
2686 			}
2687 
2688 			mutex_unlock(&u->iolock);
2689 
2690 			timeo = unix_stream_data_wait(sk, timeo, last,
2691 						      last_len, freezable);
2692 
2693 			if (signal_pending(current)) {
2694 				err = sock_intr_errno(timeo);
2695 				scm_destroy(&scm);
2696 				goto out;
2697 			}
2698 
2699 			mutex_lock(&u->iolock);
2700 			goto redo;
2701 unlock:
2702 			unix_state_unlock(sk);
2703 			break;
2704 		}
2705 
2706 		while (skip >= unix_skb_len(skb)) {
2707 			skip -= unix_skb_len(skb);
2708 			last = skb;
2709 			last_len = skb->len;
2710 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2711 			if (!skb)
2712 				goto again;
2713 		}
2714 
2715 		unix_state_unlock(sk);
2716 
2717 		if (check_creds) {
2718 			/* Never glue messages from different writers */
2719 			if (!unix_skb_scm_eq(skb, &scm))
2720 				break;
2721 		} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2722 			/* Copy credentials */
2723 			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2724 			unix_set_secdata(&scm, skb);
2725 			check_creds = true;
2726 		}
2727 
2728 		/* Copy address just once */
2729 		if (state->msg && state->msg->msg_name) {
2730 			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2731 					 state->msg->msg_name);
2732 			unix_copy_addr(state->msg, skb->sk);
2733 			sunaddr = NULL;
2734 		}
2735 
2736 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2737 		skb_get(skb);
2738 		chunk = state->recv_actor(skb, skip, chunk, state);
2739 		drop_skb = !unix_skb_len(skb);
2740 		/* skb is only safe to use if !drop_skb */
2741 		consume_skb(skb);
2742 		if (chunk < 0) {
2743 			if (copied == 0)
2744 				copied = -EFAULT;
2745 			break;
2746 		}
2747 		copied += chunk;
2748 		size -= chunk;
2749 
2750 		if (drop_skb) {
2751 			/* the skb was touched by a concurrent reader;
2752 			 * we should not expect anything from this skb
2753 			 * anymore and assume it invalid - we can be
2754 			 * sure it was dropped from the socket queue
2755 			 *
2756 			 * let's report a short read
2757 			 */
2758 			err = 0;
2759 			break;
2760 		}
2761 
2762 		/* Mark read part of skb as used */
2763 		if (!(flags & MSG_PEEK)) {
2764 			UNIXCB(skb).consumed += chunk;
2765 
2766 			sk_peek_offset_bwd(sk, chunk);
2767 
2768 			if (UNIXCB(skb).fp) {
2769 				scm_stat_del(sk, skb);
2770 				unix_detach_fds(&scm, skb);
2771 			}
2772 
2773 			if (unix_skb_len(skb))
2774 				break;
2775 
2776 			skb_unlink(skb, &sk->sk_receive_queue);
2777 			consume_skb(skb);
2778 
2779 			if (scm.fp)
2780 				break;
2781 		} else {
2782 			/* It is questionable, see note in unix_dgram_recvmsg.
2783 			 */
2784 			if (UNIXCB(skb).fp)
2785 				unix_peek_fds(&scm, skb);
2786 
2787 			sk_peek_offset_fwd(sk, chunk);
2788 
2789 			if (UNIXCB(skb).fp)
2790 				break;
2791 
2792 			skip = 0;
2793 			last = skb;
2794 			last_len = skb->len;
2795 			unix_state_lock(sk);
2796 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2797 			if (skb)
2798 				goto again;
2799 			unix_state_unlock(sk);
2800 			break;
2801 		}
2802 	} while (size);
2803 
2804 	mutex_unlock(&u->iolock);
2805 	if (state->msg)
2806 		scm_recv(sock, state->msg, &scm, flags);
2807 	else
2808 		scm_destroy(&scm);
2809 out:
2810 	return copied ? : err;
2811 }
2812 
2813 static int unix_stream_read_actor(struct sk_buff *skb,
2814 				  int skip, int chunk,
2815 				  struct unix_stream_read_state *state)
2816 {
2817 	int ret;
2818 
2819 	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2820 				    state->msg, chunk);
2821 	return ret ?: chunk;
2822 }
2823 
2824 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2825 			  size_t size, int flags)
2826 {
2827 	struct unix_stream_read_state state = {
2828 		.recv_actor = unix_stream_read_actor,
2829 		.socket = sk->sk_socket,
2830 		.msg = msg,
2831 		.size = size,
2832 		.flags = flags
2833 	};
2834 
2835 	return unix_stream_read_generic(&state, true);
2836 }
2837 
2838 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2839 			       size_t size, int flags)
2840 {
2841 	struct unix_stream_read_state state = {
2842 		.recv_actor = unix_stream_read_actor,
2843 		.socket = sock,
2844 		.msg = msg,
2845 		.size = size,
2846 		.flags = flags
2847 	};
2848 
2849 #ifdef CONFIG_BPF_SYSCALL
2850 	struct sock *sk = sock->sk;
2851 	const struct proto *prot = READ_ONCE(sk->sk_prot);
2852 
2853 	if (prot != &unix_stream_proto)
2854 		return prot->recvmsg(sk, msg, size, flags, NULL);
2855 #endif
2856 	return unix_stream_read_generic(&state, true);
2857 }
2858 
2859 static int unix_stream_splice_actor(struct sk_buff *skb,
2860 				    int skip, int chunk,
2861 				    struct unix_stream_read_state *state)
2862 {
2863 	return skb_splice_bits(skb, state->socket->sk,
2864 			       UNIXCB(skb).consumed + skip,
2865 			       state->pipe, chunk, state->splice_flags);
2866 }
2867 
2868 static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
2869 				       struct pipe_inode_info *pipe,
2870 				       size_t size, unsigned int flags)
2871 {
2872 	struct unix_stream_read_state state = {
2873 		.recv_actor = unix_stream_splice_actor,
2874 		.socket = sock,
2875 		.pipe = pipe,
2876 		.size = size,
2877 		.splice_flags = flags,
2878 	};
2879 
2880 	if (unlikely(*ppos))
2881 		return -ESPIPE;
2882 
2883 	if (sock->file->f_flags & O_NONBLOCK ||
2884 	    flags & SPLICE_F_NONBLOCK)
2885 		state.flags = MSG_DONTWAIT;
2886 
2887 	return unix_stream_read_generic(&state, false);
2888 }
2889 
2890 static int unix_shutdown(struct socket *sock, int mode)
2891 {
2892 	struct sock *sk = sock->sk;
2893 	struct sock *other;
2894 
2895 	if (mode < SHUT_RD || mode > SHUT_RDWR)
2896 		return -EINVAL;
2897 	/* This maps:
2898 	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2899 	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2900 	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2901 	 */
2902 	++mode;
2903 
2904 	unix_state_lock(sk);
2905 	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
2906 	other = unix_peer(sk);
2907 	if (other)
2908 		sock_hold(other);
2909 	unix_state_unlock(sk);
2910 	sk->sk_state_change(sk);
2911 
2912 	if (other &&
2913 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2914 
2915 		int peer_mode = 0;
2916 		const struct proto *prot = READ_ONCE(other->sk_prot);
2917 
2918 		if (prot->unhash)
2919 			prot->unhash(other);
2920 		if (mode&RCV_SHUTDOWN)
2921 			peer_mode |= SEND_SHUTDOWN;
2922 		if (mode&SEND_SHUTDOWN)
2923 			peer_mode |= RCV_SHUTDOWN;
2924 		unix_state_lock(other);
2925 		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
2926 		unix_state_unlock(other);
2927 		other->sk_state_change(other);
2928 		if (peer_mode == SHUTDOWN_MASK)
2929 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2930 		else if (peer_mode & RCV_SHUTDOWN)
2931 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2932 	}
2933 	if (other)
2934 		sock_put(other);
2935 
2936 	return 0;
2937 }
2938 
2939 long unix_inq_len(struct sock *sk)
2940 {
2941 	struct sk_buff *skb;
2942 	long amount = 0;
2943 
2944 	if (sk->sk_state == TCP_LISTEN)
2945 		return -EINVAL;
2946 
2947 	spin_lock(&sk->sk_receive_queue.lock);
2948 	if (sk->sk_type == SOCK_STREAM ||
2949 	    sk->sk_type == SOCK_SEQPACKET) {
2950 		skb_queue_walk(&sk->sk_receive_queue, skb)
2951 			amount += unix_skb_len(skb);
2952 	} else {
2953 		skb = skb_peek(&sk->sk_receive_queue);
2954 		if (skb)
2955 			amount = skb->len;
2956 	}
2957 	spin_unlock(&sk->sk_receive_queue.lock);
2958 
2959 	return amount;
2960 }
2961 EXPORT_SYMBOL_GPL(unix_inq_len);
2962 
2963 long unix_outq_len(struct sock *sk)
2964 {
2965 	return sk_wmem_alloc_get(sk);
2966 }
2967 EXPORT_SYMBOL_GPL(unix_outq_len);
2968 
2969 static int unix_open_file(struct sock *sk)
2970 {
2971 	struct path path;
2972 	struct file *f;
2973 	int fd;
2974 
2975 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2976 		return -EPERM;
2977 
2978 	if (!smp_load_acquire(&unix_sk(sk)->addr))
2979 		return -ENOENT;
2980 
2981 	path = unix_sk(sk)->path;
2982 	if (!path.dentry)
2983 		return -ENOENT;
2984 
2985 	path_get(&path);
2986 
2987 	fd = get_unused_fd_flags(O_CLOEXEC);
2988 	if (fd < 0)
2989 		goto out;
2990 
2991 	f = dentry_open(&path, O_PATH, current_cred());
2992 	if (IS_ERR(f)) {
2993 		put_unused_fd(fd);
2994 		fd = PTR_ERR(f);
2995 		goto out;
2996 	}
2997 
2998 	fd_install(fd, f);
2999 out:
3000 	path_put(&path);
3001 
3002 	return fd;
3003 }
3004 
3005 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3006 {
3007 	struct sock *sk = sock->sk;
3008 	long amount = 0;
3009 	int err;
3010 
3011 	switch (cmd) {
3012 	case SIOCOUTQ:
3013 		amount = unix_outq_len(sk);
3014 		err = put_user(amount, (int __user *)arg);
3015 		break;
3016 	case SIOCINQ:
3017 		amount = unix_inq_len(sk);
3018 		if (amount < 0)
3019 			err = amount;
3020 		else
3021 			err = put_user(amount, (int __user *)arg);
3022 		break;
3023 	case SIOCUNIXFILE:
3024 		err = unix_open_file(sk);
3025 		break;
3026 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3027 	case SIOCATMARK:
3028 		{
3029 			struct sk_buff *skb;
3030 			int answ = 0;
3031 
3032 			skb = skb_peek(&sk->sk_receive_queue);
3033 			if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3034 				answ = 1;
3035 			err = put_user(answ, (int __user *)arg);
3036 		}
3037 		break;
3038 #endif
3039 	default:
3040 		err = -ENOIOCTLCMD;
3041 		break;
3042 	}
3043 	return err;
3044 }
3045 
3046 #ifdef CONFIG_COMPAT
3047 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3048 {
3049 	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3050 }
3051 #endif
3052 
3053 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3054 {
3055 	struct sock *sk = sock->sk;
3056 	__poll_t mask;
3057 	u8 shutdown;
3058 
3059 	sock_poll_wait(file, sock, wait);
3060 	mask = 0;
3061 	shutdown = READ_ONCE(sk->sk_shutdown);
3062 
3063 	/* exceptional events? */
3064 	if (READ_ONCE(sk->sk_err))
3065 		mask |= EPOLLERR;
3066 	if (shutdown == SHUTDOWN_MASK)
3067 		mask |= EPOLLHUP;
3068 	if (shutdown & RCV_SHUTDOWN)
3069 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3070 
3071 	/* readable? */
3072 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3073 		mask |= EPOLLIN | EPOLLRDNORM;
3074 	if (sk_is_readable(sk))
3075 		mask |= EPOLLIN | EPOLLRDNORM;
3076 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3077 	if (READ_ONCE(unix_sk(sk)->oob_skb))
3078 		mask |= EPOLLPRI;
3079 #endif
3080 
3081 	/* Connection-based need to check for termination and startup */
3082 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3083 	    sk->sk_state == TCP_CLOSE)
3084 		mask |= EPOLLHUP;
3085 
3086 	/*
3087 	 * we set writable also when the other side has shut down the
3088 	 * connection. This prevents stuck sockets.
3089 	 */
3090 	if (unix_writable(sk))
3091 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3092 
3093 	return mask;
3094 }
3095 
3096 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3097 				    poll_table *wait)
3098 {
3099 	struct sock *sk = sock->sk, *other;
3100 	unsigned int writable;
3101 	__poll_t mask;
3102 	u8 shutdown;
3103 
3104 	sock_poll_wait(file, sock, wait);
3105 	mask = 0;
3106 	shutdown = READ_ONCE(sk->sk_shutdown);
3107 
3108 	/* exceptional events? */
3109 	if (READ_ONCE(sk->sk_err) ||
3110 	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3111 		mask |= EPOLLERR |
3112 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3113 
3114 	if (shutdown & RCV_SHUTDOWN)
3115 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3116 	if (shutdown == SHUTDOWN_MASK)
3117 		mask |= EPOLLHUP;
3118 
3119 	/* readable? */
3120 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3121 		mask |= EPOLLIN | EPOLLRDNORM;
3122 	if (sk_is_readable(sk))
3123 		mask |= EPOLLIN | EPOLLRDNORM;
3124 
3125 	/* Connection-based need to check for termination and startup */
3126 	if (sk->sk_type == SOCK_SEQPACKET) {
3127 		if (sk->sk_state == TCP_CLOSE)
3128 			mask |= EPOLLHUP;
3129 		/* connection hasn't started yet? */
3130 		if (sk->sk_state == TCP_SYN_SENT)
3131 			return mask;
3132 	}
3133 
3134 	/* No write status requested, avoid expensive OUT tests. */
3135 	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3136 		return mask;
3137 
3138 	writable = unix_writable(sk);
3139 	if (writable) {
3140 		unix_state_lock(sk);
3141 
3142 		other = unix_peer(sk);
3143 		if (other && unix_peer(other) != sk &&
3144 		    unix_recvq_full_lockless(other) &&
3145 		    unix_dgram_peer_wake_me(sk, other))
3146 			writable = 0;
3147 
3148 		unix_state_unlock(sk);
3149 	}
3150 
3151 	if (writable)
3152 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3153 	else
3154 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3155 
3156 	return mask;
3157 }
3158 
3159 #ifdef CONFIG_PROC_FS
3160 
3161 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3162 
3163 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3164 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3165 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3166 
3167 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3168 {
3169 	unsigned long offset = get_offset(*pos);
3170 	unsigned long bucket = get_bucket(*pos);
3171 	unsigned long count = 0;
3172 	struct sock *sk;
3173 
3174 	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3175 	     sk; sk = sk_next(sk)) {
3176 		if (++count == offset)
3177 			break;
3178 	}
3179 
3180 	return sk;
3181 }
3182 
3183 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3184 {
3185 	unsigned long bucket = get_bucket(*pos);
3186 	struct net *net = seq_file_net(seq);
3187 	struct sock *sk;
3188 
3189 	while (bucket < UNIX_HASH_SIZE) {
3190 		spin_lock(&net->unx.table.locks[bucket]);
3191 
3192 		sk = unix_from_bucket(seq, pos);
3193 		if (sk)
3194 			return sk;
3195 
3196 		spin_unlock(&net->unx.table.locks[bucket]);
3197 
3198 		*pos = set_bucket_offset(++bucket, 1);
3199 	}
3200 
3201 	return NULL;
3202 }
3203 
3204 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3205 				  loff_t *pos)
3206 {
3207 	unsigned long bucket = get_bucket(*pos);
3208 
3209 	sk = sk_next(sk);
3210 	if (sk)
3211 		return sk;
3212 
3213 
3214 	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3215 
3216 	*pos = set_bucket_offset(++bucket, 1);
3217 
3218 	return unix_get_first(seq, pos);
3219 }
3220 
3221 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3222 {
3223 	if (!*pos)
3224 		return SEQ_START_TOKEN;
3225 
3226 	return unix_get_first(seq, pos);
3227 }
3228 
3229 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3230 {
3231 	++*pos;
3232 
3233 	if (v == SEQ_START_TOKEN)
3234 		return unix_get_first(seq, pos);
3235 
3236 	return unix_get_next(seq, v, pos);
3237 }
3238 
3239 static void unix_seq_stop(struct seq_file *seq, void *v)
3240 {
3241 	struct sock *sk = v;
3242 
3243 	if (sk)
3244 		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3245 }
3246 
3247 static int unix_seq_show(struct seq_file *seq, void *v)
3248 {
3249 
3250 	if (v == SEQ_START_TOKEN)
3251 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3252 			 "Inode Path\n");
3253 	else {
3254 		struct sock *s = v;
3255 		struct unix_sock *u = unix_sk(s);
3256 		unix_state_lock(s);
3257 
3258 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3259 			s,
3260 			refcount_read(&s->sk_refcnt),
3261 			0,
3262 			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3263 			s->sk_type,
3264 			s->sk_socket ?
3265 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3266 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3267 			sock_i_ino(s));
3268 
3269 		if (u->addr) {	// under a hash table lock here
3270 			int i, len;
3271 			seq_putc(seq, ' ');
3272 
3273 			i = 0;
3274 			len = u->addr->len -
3275 				offsetof(struct sockaddr_un, sun_path);
3276 			if (u->addr->name->sun_path[0]) {
3277 				len--;
3278 			} else {
3279 				seq_putc(seq, '@');
3280 				i++;
3281 			}
3282 			for ( ; i < len; i++)
3283 				seq_putc(seq, u->addr->name->sun_path[i] ?:
3284 					 '@');
3285 		}
3286 		unix_state_unlock(s);
3287 		seq_putc(seq, '\n');
3288 	}
3289 
3290 	return 0;
3291 }
3292 
3293 static const struct seq_operations unix_seq_ops = {
3294 	.start  = unix_seq_start,
3295 	.next   = unix_seq_next,
3296 	.stop   = unix_seq_stop,
3297 	.show   = unix_seq_show,
3298 };
3299 
3300 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
3301 struct bpf_unix_iter_state {
3302 	struct seq_net_private p;
3303 	unsigned int cur_sk;
3304 	unsigned int end_sk;
3305 	unsigned int max_sk;
3306 	struct sock **batch;
3307 	bool st_bucket_done;
3308 };
3309 
3310 struct bpf_iter__unix {
3311 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3312 	__bpf_md_ptr(struct unix_sock *, unix_sk);
3313 	uid_t uid __aligned(8);
3314 };
3315 
3316 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3317 			      struct unix_sock *unix_sk, uid_t uid)
3318 {
3319 	struct bpf_iter__unix ctx;
3320 
3321 	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3322 	ctx.meta = meta;
3323 	ctx.unix_sk = unix_sk;
3324 	ctx.uid = uid;
3325 	return bpf_iter_run_prog(prog, &ctx);
3326 }
3327 
3328 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3329 
3330 {
3331 	struct bpf_unix_iter_state *iter = seq->private;
3332 	unsigned int expected = 1;
3333 	struct sock *sk;
3334 
3335 	sock_hold(start_sk);
3336 	iter->batch[iter->end_sk++] = start_sk;
3337 
3338 	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3339 		if (iter->end_sk < iter->max_sk) {
3340 			sock_hold(sk);
3341 			iter->batch[iter->end_sk++] = sk;
3342 		}
3343 
3344 		expected++;
3345 	}
3346 
3347 	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3348 
3349 	return expected;
3350 }
3351 
3352 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3353 {
3354 	while (iter->cur_sk < iter->end_sk)
3355 		sock_put(iter->batch[iter->cur_sk++]);
3356 }
3357 
3358 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3359 				       unsigned int new_batch_sz)
3360 {
3361 	struct sock **new_batch;
3362 
3363 	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3364 			     GFP_USER | __GFP_NOWARN);
3365 	if (!new_batch)
3366 		return -ENOMEM;
3367 
3368 	bpf_iter_unix_put_batch(iter);
3369 	kvfree(iter->batch);
3370 	iter->batch = new_batch;
3371 	iter->max_sk = new_batch_sz;
3372 
3373 	return 0;
3374 }
3375 
3376 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3377 					loff_t *pos)
3378 {
3379 	struct bpf_unix_iter_state *iter = seq->private;
3380 	unsigned int expected;
3381 	bool resized = false;
3382 	struct sock *sk;
3383 
3384 	if (iter->st_bucket_done)
3385 		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3386 
3387 again:
3388 	/* Get a new batch */
3389 	iter->cur_sk = 0;
3390 	iter->end_sk = 0;
3391 
3392 	sk = unix_get_first(seq, pos);
3393 	if (!sk)
3394 		return NULL; /* Done */
3395 
3396 	expected = bpf_iter_unix_hold_batch(seq, sk);
3397 
3398 	if (iter->end_sk == expected) {
3399 		iter->st_bucket_done = true;
3400 		return sk;
3401 	}
3402 
3403 	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3404 		resized = true;
3405 		goto again;
3406 	}
3407 
3408 	return sk;
3409 }
3410 
3411 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3412 {
3413 	if (!*pos)
3414 		return SEQ_START_TOKEN;
3415 
3416 	/* bpf iter does not support lseek, so it always
3417 	 * continue from where it was stop()-ped.
3418 	 */
3419 	return bpf_iter_unix_batch(seq, pos);
3420 }
3421 
3422 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3423 {
3424 	struct bpf_unix_iter_state *iter = seq->private;
3425 	struct sock *sk;
3426 
3427 	/* Whenever seq_next() is called, the iter->cur_sk is
3428 	 * done with seq_show(), so advance to the next sk in
3429 	 * the batch.
3430 	 */
3431 	if (iter->cur_sk < iter->end_sk)
3432 		sock_put(iter->batch[iter->cur_sk++]);
3433 
3434 	++*pos;
3435 
3436 	if (iter->cur_sk < iter->end_sk)
3437 		sk = iter->batch[iter->cur_sk];
3438 	else
3439 		sk = bpf_iter_unix_batch(seq, pos);
3440 
3441 	return sk;
3442 }
3443 
3444 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3445 {
3446 	struct bpf_iter_meta meta;
3447 	struct bpf_prog *prog;
3448 	struct sock *sk = v;
3449 	uid_t uid;
3450 	bool slow;
3451 	int ret;
3452 
3453 	if (v == SEQ_START_TOKEN)
3454 		return 0;
3455 
3456 	slow = lock_sock_fast(sk);
3457 
3458 	if (unlikely(sk_unhashed(sk))) {
3459 		ret = SEQ_SKIP;
3460 		goto unlock;
3461 	}
3462 
3463 	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3464 	meta.seq = seq;
3465 	prog = bpf_iter_get_info(&meta, false);
3466 	ret = unix_prog_seq_show(prog, &meta, v, uid);
3467 unlock:
3468 	unlock_sock_fast(sk, slow);
3469 	return ret;
3470 }
3471 
3472 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3473 {
3474 	struct bpf_unix_iter_state *iter = seq->private;
3475 	struct bpf_iter_meta meta;
3476 	struct bpf_prog *prog;
3477 
3478 	if (!v) {
3479 		meta.seq = seq;
3480 		prog = bpf_iter_get_info(&meta, true);
3481 		if (prog)
3482 			(void)unix_prog_seq_show(prog, &meta, v, 0);
3483 	}
3484 
3485 	if (iter->cur_sk < iter->end_sk)
3486 		bpf_iter_unix_put_batch(iter);
3487 }
3488 
3489 static const struct seq_operations bpf_iter_unix_seq_ops = {
3490 	.start	= bpf_iter_unix_seq_start,
3491 	.next	= bpf_iter_unix_seq_next,
3492 	.stop	= bpf_iter_unix_seq_stop,
3493 	.show	= bpf_iter_unix_seq_show,
3494 };
3495 #endif
3496 #endif
3497 
3498 static const struct net_proto_family unix_family_ops = {
3499 	.family = PF_UNIX,
3500 	.create = unix_create,
3501 	.owner	= THIS_MODULE,
3502 };
3503 
3504 
3505 static int __net_init unix_net_init(struct net *net)
3506 {
3507 	int i;
3508 
3509 	net->unx.sysctl_max_dgram_qlen = 10;
3510 	if (unix_sysctl_register(net))
3511 		goto out;
3512 
3513 #ifdef CONFIG_PROC_FS
3514 	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3515 			     sizeof(struct seq_net_private)))
3516 		goto err_sysctl;
3517 #endif
3518 
3519 	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3520 					      sizeof(spinlock_t), GFP_KERNEL);
3521 	if (!net->unx.table.locks)
3522 		goto err_proc;
3523 
3524 	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3525 						sizeof(struct hlist_head),
3526 						GFP_KERNEL);
3527 	if (!net->unx.table.buckets)
3528 		goto free_locks;
3529 
3530 	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3531 		spin_lock_init(&net->unx.table.locks[i]);
3532 		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3533 	}
3534 
3535 	return 0;
3536 
3537 free_locks:
3538 	kvfree(net->unx.table.locks);
3539 err_proc:
3540 #ifdef CONFIG_PROC_FS
3541 	remove_proc_entry("unix", net->proc_net);
3542 err_sysctl:
3543 #endif
3544 	unix_sysctl_unregister(net);
3545 out:
3546 	return -ENOMEM;
3547 }
3548 
3549 static void __net_exit unix_net_exit(struct net *net)
3550 {
3551 	kvfree(net->unx.table.buckets);
3552 	kvfree(net->unx.table.locks);
3553 	unix_sysctl_unregister(net);
3554 	remove_proc_entry("unix", net->proc_net);
3555 }
3556 
3557 static struct pernet_operations unix_net_ops = {
3558 	.init = unix_net_init,
3559 	.exit = unix_net_exit,
3560 };
3561 
3562 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3563 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3564 		     struct unix_sock *unix_sk, uid_t uid)
3565 
3566 #define INIT_BATCH_SZ 16
3567 
3568 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3569 {
3570 	struct bpf_unix_iter_state *iter = priv_data;
3571 	int err;
3572 
3573 	err = bpf_iter_init_seq_net(priv_data, aux);
3574 	if (err)
3575 		return err;
3576 
3577 	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3578 	if (err) {
3579 		bpf_iter_fini_seq_net(priv_data);
3580 		return err;
3581 	}
3582 
3583 	return 0;
3584 }
3585 
3586 static void bpf_iter_fini_unix(void *priv_data)
3587 {
3588 	struct bpf_unix_iter_state *iter = priv_data;
3589 
3590 	bpf_iter_fini_seq_net(priv_data);
3591 	kvfree(iter->batch);
3592 }
3593 
3594 static const struct bpf_iter_seq_info unix_seq_info = {
3595 	.seq_ops		= &bpf_iter_unix_seq_ops,
3596 	.init_seq_private	= bpf_iter_init_unix,
3597 	.fini_seq_private	= bpf_iter_fini_unix,
3598 	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3599 };
3600 
3601 static const struct bpf_func_proto *
3602 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3603 			     const struct bpf_prog *prog)
3604 {
3605 	switch (func_id) {
3606 	case BPF_FUNC_setsockopt:
3607 		return &bpf_sk_setsockopt_proto;
3608 	case BPF_FUNC_getsockopt:
3609 		return &bpf_sk_getsockopt_proto;
3610 	default:
3611 		return NULL;
3612 	}
3613 }
3614 
3615 static struct bpf_iter_reg unix_reg_info = {
3616 	.target			= "unix",
3617 	.ctx_arg_info_size	= 1,
3618 	.ctx_arg_info		= {
3619 		{ offsetof(struct bpf_iter__unix, unix_sk),
3620 		  PTR_TO_BTF_ID_OR_NULL },
3621 	},
3622 	.get_func_proto         = bpf_iter_unix_get_func_proto,
3623 	.seq_info		= &unix_seq_info,
3624 };
3625 
3626 static void __init bpf_iter_register(void)
3627 {
3628 	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3629 	if (bpf_iter_reg_target(&unix_reg_info))
3630 		pr_warn("Warning: could not register bpf iterator unix\n");
3631 }
3632 #endif
3633 
3634 static int __init af_unix_init(void)
3635 {
3636 	int i, rc = -1;
3637 
3638 	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3639 
3640 	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3641 		spin_lock_init(&bsd_socket_locks[i]);
3642 		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3643 	}
3644 
3645 	rc = proto_register(&unix_dgram_proto, 1);
3646 	if (rc != 0) {
3647 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3648 		goto out;
3649 	}
3650 
3651 	rc = proto_register(&unix_stream_proto, 1);
3652 	if (rc != 0) {
3653 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3654 		proto_unregister(&unix_dgram_proto);
3655 		goto out;
3656 	}
3657 
3658 	sock_register(&unix_family_ops);
3659 	register_pernet_subsys(&unix_net_ops);
3660 	unix_bpf_build_proto();
3661 
3662 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3663 	bpf_iter_register();
3664 #endif
3665 
3666 out:
3667 	return rc;
3668 }
3669 
3670 static void __exit af_unix_exit(void)
3671 {
3672 	sock_unregister(PF_UNIX);
3673 	proto_unregister(&unix_dgram_proto);
3674 	proto_unregister(&unix_stream_proto);
3675 	unregister_pernet_subsys(&unix_net_ops);
3676 }
3677 
3678 /* Earlier than device_initcall() so that other drivers invoking
3679    request_module() don't end up in a loop when modprobe tries
3680    to use a UNIX socket. But later than subsys_initcall() because
3681    we depend on stuff initialised there */
3682 fs_initcall(af_unix_init);
3683 module_exit(af_unix_exit);
3684 
3685 MODULE_LICENSE("GPL");
3686 MODULE_ALIAS_NETPROTO(PF_UNIX);
3687