1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * NET4: Implementation of BSD Unix domain sockets.
4 *
5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 *
7 * Fixes:
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
17 * Mike Shaver's work.
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
24 * reference counting
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
27 * Lots of bug fixes.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
39 * dgram receiver.
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
47 *
48 * Known differences from reference BSD that was tested:
49 *
50 * [TO FIX]
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
55 * [NOT TO FIX]
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
63 *
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
68 *
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
75 * with BSD names.
76 */
77
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
90 #include <linux/un.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
96 #include <linux/in.h>
97 #include <linux/fs.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
108 #include <net/scm.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/splice.h>
116 #include <linux/freezer.h>
117 #include <linux/file.h>
118 #include <linux/btf_ids.h>
119
120 #include "scm.h"
121
122 static atomic_long_t unix_nr_socks;
123 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
124 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
125
126 /* SMP locking strategy:
127 * hash table is protected with spinlock.
128 * each socket state is protected by separate spinlock.
129 */
130
unix_unbound_hash(struct sock * sk)131 static unsigned int unix_unbound_hash(struct sock *sk)
132 {
133 unsigned long hash = (unsigned long)sk;
134
135 hash ^= hash >> 16;
136 hash ^= hash >> 8;
137 hash ^= sk->sk_type;
138
139 return hash & UNIX_HASH_MOD;
140 }
141
unix_bsd_hash(struct inode * i)142 static unsigned int unix_bsd_hash(struct inode *i)
143 {
144 return i->i_ino & UNIX_HASH_MOD;
145 }
146
unix_abstract_hash(struct sockaddr_un * sunaddr,int addr_len,int type)147 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
148 int addr_len, int type)
149 {
150 __wsum csum = csum_partial(sunaddr, addr_len, 0);
151 unsigned int hash;
152
153 hash = (__force unsigned int)csum_fold(csum);
154 hash ^= hash >> 8;
155 hash ^= type;
156
157 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
158 }
159
unix_table_double_lock(struct net * net,unsigned int hash1,unsigned int hash2)160 static void unix_table_double_lock(struct net *net,
161 unsigned int hash1, unsigned int hash2)
162 {
163 if (hash1 == hash2) {
164 spin_lock(&net->unx.table.locks[hash1]);
165 return;
166 }
167
168 if (hash1 > hash2)
169 swap(hash1, hash2);
170
171 spin_lock(&net->unx.table.locks[hash1]);
172 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
173 }
174
unix_table_double_unlock(struct net * net,unsigned int hash1,unsigned int hash2)175 static void unix_table_double_unlock(struct net *net,
176 unsigned int hash1, unsigned int hash2)
177 {
178 if (hash1 == hash2) {
179 spin_unlock(&net->unx.table.locks[hash1]);
180 return;
181 }
182
183 spin_unlock(&net->unx.table.locks[hash1]);
184 spin_unlock(&net->unx.table.locks[hash2]);
185 }
186
187 #ifdef CONFIG_SECURITY_NETWORK
unix_get_secdata(struct scm_cookie * scm,struct sk_buff * skb)188 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
189 {
190 UNIXCB(skb).secid = scm->secid;
191 }
192
unix_set_secdata(struct scm_cookie * scm,struct sk_buff * skb)193 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
194 {
195 scm->secid = UNIXCB(skb).secid;
196 }
197
unix_secdata_eq(struct scm_cookie * scm,struct sk_buff * skb)198 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
199 {
200 return (scm->secid == UNIXCB(skb).secid);
201 }
202 #else
unix_get_secdata(struct scm_cookie * scm,struct sk_buff * skb)203 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
204 { }
205
unix_set_secdata(struct scm_cookie * scm,struct sk_buff * skb)206 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
207 { }
208
unix_secdata_eq(struct scm_cookie * scm,struct sk_buff * skb)209 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
210 {
211 return true;
212 }
213 #endif /* CONFIG_SECURITY_NETWORK */
214
unix_our_peer(struct sock * sk,struct sock * osk)215 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
216 {
217 return unix_peer(osk) == sk;
218 }
219
unix_may_send(struct sock * sk,struct sock * osk)220 static inline int unix_may_send(struct sock *sk, struct sock *osk)
221 {
222 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
223 }
224
unix_recvq_full_lockless(const struct sock * sk)225 static inline int unix_recvq_full_lockless(const struct sock *sk)
226 {
227 return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
228 }
229
unix_peer_get(struct sock * s)230 struct sock *unix_peer_get(struct sock *s)
231 {
232 struct sock *peer;
233
234 unix_state_lock(s);
235 peer = unix_peer(s);
236 if (peer)
237 sock_hold(peer);
238 unix_state_unlock(s);
239 return peer;
240 }
241 EXPORT_SYMBOL_GPL(unix_peer_get);
242
unix_create_addr(struct sockaddr_un * sunaddr,int addr_len)243 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
244 int addr_len)
245 {
246 struct unix_address *addr;
247
248 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
249 if (!addr)
250 return NULL;
251
252 refcount_set(&addr->refcnt, 1);
253 addr->len = addr_len;
254 memcpy(addr->name, sunaddr, addr_len);
255
256 return addr;
257 }
258
unix_release_addr(struct unix_address * addr)259 static inline void unix_release_addr(struct unix_address *addr)
260 {
261 if (refcount_dec_and_test(&addr->refcnt))
262 kfree(addr);
263 }
264
265 /*
266 * Check unix socket name:
267 * - should be not zero length.
268 * - if started by not zero, should be NULL terminated (FS object)
269 * - if started by zero, it is abstract name.
270 */
271
unix_validate_addr(struct sockaddr_un * sunaddr,int addr_len)272 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
273 {
274 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
275 addr_len > sizeof(*sunaddr))
276 return -EINVAL;
277
278 if (sunaddr->sun_family != AF_UNIX)
279 return -EINVAL;
280
281 return 0;
282 }
283
unix_mkname_bsd(struct sockaddr_un * sunaddr,int addr_len)284 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
285 {
286 struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
287 short offset = offsetof(struct sockaddr_storage, __data);
288
289 BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
290
291 /* This may look like an off by one error but it is a bit more
292 * subtle. 108 is the longest valid AF_UNIX path for a binding.
293 * sun_path[108] doesn't as such exist. However in kernel space
294 * we are guaranteed that it is a valid memory location in our
295 * kernel address buffer because syscall functions always pass
296 * a pointer of struct sockaddr_storage which has a bigger buffer
297 * than 108. Also, we must terminate sun_path for strlen() in
298 * getname_kernel().
299 */
300 addr->__data[addr_len - offset] = 0;
301
302 /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
303 * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
304 * know the actual buffer.
305 */
306 return strlen(addr->__data) + offset + 1;
307 }
308
__unix_remove_socket(struct sock * sk)309 static void __unix_remove_socket(struct sock *sk)
310 {
311 sk_del_node_init(sk);
312 }
313
__unix_insert_socket(struct net * net,struct sock * sk)314 static void __unix_insert_socket(struct net *net, struct sock *sk)
315 {
316 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
317 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
318 }
319
__unix_set_addr_hash(struct net * net,struct sock * sk,struct unix_address * addr,unsigned int hash)320 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
321 struct unix_address *addr, unsigned int hash)
322 {
323 __unix_remove_socket(sk);
324 smp_store_release(&unix_sk(sk)->addr, addr);
325
326 sk->sk_hash = hash;
327 __unix_insert_socket(net, sk);
328 }
329
unix_remove_socket(struct net * net,struct sock * sk)330 static void unix_remove_socket(struct net *net, struct sock *sk)
331 {
332 spin_lock(&net->unx.table.locks[sk->sk_hash]);
333 __unix_remove_socket(sk);
334 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
335 }
336
unix_insert_unbound_socket(struct net * net,struct sock * sk)337 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
338 {
339 spin_lock(&net->unx.table.locks[sk->sk_hash]);
340 __unix_insert_socket(net, sk);
341 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
342 }
343
unix_insert_bsd_socket(struct sock * sk)344 static void unix_insert_bsd_socket(struct sock *sk)
345 {
346 spin_lock(&bsd_socket_locks[sk->sk_hash]);
347 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
348 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
349 }
350
unix_remove_bsd_socket(struct sock * sk)351 static void unix_remove_bsd_socket(struct sock *sk)
352 {
353 if (!hlist_unhashed(&sk->sk_bind_node)) {
354 spin_lock(&bsd_socket_locks[sk->sk_hash]);
355 __sk_del_bind_node(sk);
356 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
357
358 sk_node_init(&sk->sk_bind_node);
359 }
360 }
361
__unix_find_socket_byname(struct net * net,struct sockaddr_un * sunname,int len,unsigned int hash)362 static struct sock *__unix_find_socket_byname(struct net *net,
363 struct sockaddr_un *sunname,
364 int len, unsigned int hash)
365 {
366 struct sock *s;
367
368 sk_for_each(s, &net->unx.table.buckets[hash]) {
369 struct unix_sock *u = unix_sk(s);
370
371 if (u->addr->len == len &&
372 !memcmp(u->addr->name, sunname, len))
373 return s;
374 }
375 return NULL;
376 }
377
unix_find_socket_byname(struct net * net,struct sockaddr_un * sunname,int len,unsigned int hash)378 static inline struct sock *unix_find_socket_byname(struct net *net,
379 struct sockaddr_un *sunname,
380 int len, unsigned int hash)
381 {
382 struct sock *s;
383
384 spin_lock(&net->unx.table.locks[hash]);
385 s = __unix_find_socket_byname(net, sunname, len, hash);
386 if (s)
387 sock_hold(s);
388 spin_unlock(&net->unx.table.locks[hash]);
389 return s;
390 }
391
unix_find_socket_byinode(struct inode * i)392 static struct sock *unix_find_socket_byinode(struct inode *i)
393 {
394 unsigned int hash = unix_bsd_hash(i);
395 struct sock *s;
396
397 spin_lock(&bsd_socket_locks[hash]);
398 sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
399 struct dentry *dentry = unix_sk(s)->path.dentry;
400
401 if (dentry && d_backing_inode(dentry) == i) {
402 sock_hold(s);
403 spin_unlock(&bsd_socket_locks[hash]);
404 return s;
405 }
406 }
407 spin_unlock(&bsd_socket_locks[hash]);
408 return NULL;
409 }
410
411 /* Support code for asymmetrically connected dgram sockets
412 *
413 * If a datagram socket is connected to a socket not itself connected
414 * to the first socket (eg, /dev/log), clients may only enqueue more
415 * messages if the present receive queue of the server socket is not
416 * "too large". This means there's a second writeability condition
417 * poll and sendmsg need to test. The dgram recv code will do a wake
418 * up on the peer_wait wait queue of a socket upon reception of a
419 * datagram which needs to be propagated to sleeping would-be writers
420 * since these might not have sent anything so far. This can't be
421 * accomplished via poll_wait because the lifetime of the server
422 * socket might be less than that of its clients if these break their
423 * association with it or if the server socket is closed while clients
424 * are still connected to it and there's no way to inform "a polling
425 * implementation" that it should let go of a certain wait queue
426 *
427 * In order to propagate a wake up, a wait_queue_entry_t of the client
428 * socket is enqueued on the peer_wait queue of the server socket
429 * whose wake function does a wake_up on the ordinary client socket
430 * wait queue. This connection is established whenever a write (or
431 * poll for write) hit the flow control condition and broken when the
432 * association to the server socket is dissolved or after a wake up
433 * was relayed.
434 */
435
unix_dgram_peer_wake_relay(wait_queue_entry_t * q,unsigned mode,int flags,void * key)436 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
437 void *key)
438 {
439 struct unix_sock *u;
440 wait_queue_head_t *u_sleep;
441
442 u = container_of(q, struct unix_sock, peer_wake);
443
444 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
445 q);
446 u->peer_wake.private = NULL;
447
448 /* relaying can only happen while the wq still exists */
449 u_sleep = sk_sleep(&u->sk);
450 if (u_sleep)
451 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
452
453 return 0;
454 }
455
unix_dgram_peer_wake_connect(struct sock * sk,struct sock * other)456 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
457 {
458 struct unix_sock *u, *u_other;
459 int rc;
460
461 u = unix_sk(sk);
462 u_other = unix_sk(other);
463 rc = 0;
464 spin_lock(&u_other->peer_wait.lock);
465
466 if (!u->peer_wake.private) {
467 u->peer_wake.private = other;
468 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
469
470 rc = 1;
471 }
472
473 spin_unlock(&u_other->peer_wait.lock);
474 return rc;
475 }
476
unix_dgram_peer_wake_disconnect(struct sock * sk,struct sock * other)477 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
478 struct sock *other)
479 {
480 struct unix_sock *u, *u_other;
481
482 u = unix_sk(sk);
483 u_other = unix_sk(other);
484 spin_lock(&u_other->peer_wait.lock);
485
486 if (u->peer_wake.private == other) {
487 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
488 u->peer_wake.private = NULL;
489 }
490
491 spin_unlock(&u_other->peer_wait.lock);
492 }
493
unix_dgram_peer_wake_disconnect_wakeup(struct sock * sk,struct sock * other)494 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
495 struct sock *other)
496 {
497 unix_dgram_peer_wake_disconnect(sk, other);
498 wake_up_interruptible_poll(sk_sleep(sk),
499 EPOLLOUT |
500 EPOLLWRNORM |
501 EPOLLWRBAND);
502 }
503
504 /* preconditions:
505 * - unix_peer(sk) == other
506 * - association is stable
507 */
unix_dgram_peer_wake_me(struct sock * sk,struct sock * other)508 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
509 {
510 int connected;
511
512 connected = unix_dgram_peer_wake_connect(sk, other);
513
514 /* If other is SOCK_DEAD, we want to make sure we signal
515 * POLLOUT, such that a subsequent write() can get a
516 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
517 * to other and its full, we will hang waiting for POLLOUT.
518 */
519 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
520 return 1;
521
522 if (connected)
523 unix_dgram_peer_wake_disconnect(sk, other);
524
525 return 0;
526 }
527
unix_writable(const struct sock * sk,unsigned char state)528 static int unix_writable(const struct sock *sk, unsigned char state)
529 {
530 return state != TCP_LISTEN &&
531 (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
532 }
533
unix_write_space(struct sock * sk)534 static void unix_write_space(struct sock *sk)
535 {
536 struct socket_wq *wq;
537
538 rcu_read_lock();
539 if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
540 wq = rcu_dereference(sk->sk_wq);
541 if (skwq_has_sleeper(wq))
542 wake_up_interruptible_sync_poll(&wq->wait,
543 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
544 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
545 }
546 rcu_read_unlock();
547 }
548
549 /* When dgram socket disconnects (or changes its peer), we clear its receive
550 * queue of packets arrived from previous peer. First, it allows to do
551 * flow control based only on wmem_alloc; second, sk connected to peer
552 * may receive messages only from that peer. */
unix_dgram_disconnected(struct sock * sk,struct sock * other)553 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
554 {
555 if (!skb_queue_empty(&sk->sk_receive_queue)) {
556 skb_queue_purge(&sk->sk_receive_queue);
557 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
558
559 /* If one link of bidirectional dgram pipe is disconnected,
560 * we signal error. Messages are lost. Do not make this,
561 * when peer was not connected to us.
562 */
563 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
564 WRITE_ONCE(other->sk_err, ECONNRESET);
565 sk_error_report(other);
566 }
567 }
568 }
569
unix_sock_destructor(struct sock * sk)570 static void unix_sock_destructor(struct sock *sk)
571 {
572 struct unix_sock *u = unix_sk(sk);
573
574 skb_queue_purge(&sk->sk_receive_queue);
575
576 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
577 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
578 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
579 if (!sock_flag(sk, SOCK_DEAD)) {
580 pr_info("Attempt to release alive unix socket: %p\n", sk);
581 return;
582 }
583
584 if (u->addr)
585 unix_release_addr(u->addr);
586
587 atomic_long_dec(&unix_nr_socks);
588 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
589 #ifdef UNIX_REFCNT_DEBUG
590 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
591 atomic_long_read(&unix_nr_socks));
592 #endif
593 }
594
unix_release_sock(struct sock * sk,int embrion)595 static void unix_release_sock(struct sock *sk, int embrion)
596 {
597 struct unix_sock *u = unix_sk(sk);
598 struct sock *skpair;
599 struct sk_buff *skb;
600 struct path path;
601 int state;
602
603 unix_remove_socket(sock_net(sk), sk);
604 unix_remove_bsd_socket(sk);
605
606 /* Clear state */
607 unix_state_lock(sk);
608 sock_orphan(sk);
609 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
610 path = u->path;
611 u->path.dentry = NULL;
612 u->path.mnt = NULL;
613 state = sk->sk_state;
614 WRITE_ONCE(sk->sk_state, TCP_CLOSE);
615
616 skpair = unix_peer(sk);
617 unix_peer(sk) = NULL;
618
619 unix_state_unlock(sk);
620
621 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
622 if (u->oob_skb) {
623 kfree_skb(u->oob_skb);
624 u->oob_skb = NULL;
625 }
626 #endif
627
628 wake_up_interruptible_all(&u->peer_wait);
629
630 if (skpair != NULL) {
631 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
632 unix_state_lock(skpair);
633 /* No more writes */
634 WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
635 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
636 WRITE_ONCE(skpair->sk_err, ECONNRESET);
637 unix_state_unlock(skpair);
638 skpair->sk_state_change(skpair);
639 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
640 }
641
642 unix_dgram_peer_wake_disconnect(sk, skpair);
643 sock_put(skpair); /* It may now die */
644 }
645
646 /* Try to flush out this socket. Throw out buffers at least */
647
648 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
649 if (state == TCP_LISTEN)
650 unix_release_sock(skb->sk, 1);
651 /* passed fds are erased in the kfree_skb hook */
652 UNIXCB(skb).consumed = skb->len;
653 kfree_skb(skb);
654 }
655
656 if (path.dentry)
657 path_put(&path);
658
659 sock_put(sk);
660
661 /* ---- Socket is dead now and most probably destroyed ---- */
662
663 /*
664 * Fixme: BSD difference: In BSD all sockets connected to us get
665 * ECONNRESET and we die on the spot. In Linux we behave
666 * like files and pipes do and wait for the last
667 * dereference.
668 *
669 * Can't we simply set sock->err?
670 *
671 * What the above comment does talk about? --ANK(980817)
672 */
673
674 if (READ_ONCE(unix_tot_inflight))
675 unix_gc(); /* Garbage collect fds */
676 }
677
init_peercred(struct sock * sk)678 static void init_peercred(struct sock *sk)
679 {
680 const struct cred *old_cred;
681 struct pid *old_pid;
682
683 spin_lock(&sk->sk_peer_lock);
684 old_pid = sk->sk_peer_pid;
685 old_cred = sk->sk_peer_cred;
686 sk->sk_peer_pid = get_pid(task_tgid(current));
687 sk->sk_peer_cred = get_current_cred();
688 spin_unlock(&sk->sk_peer_lock);
689
690 put_pid(old_pid);
691 put_cred(old_cred);
692 }
693
copy_peercred(struct sock * sk,struct sock * peersk)694 static void copy_peercred(struct sock *sk, struct sock *peersk)
695 {
696 if (sk < peersk) {
697 spin_lock(&sk->sk_peer_lock);
698 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
699 } else {
700 spin_lock(&peersk->sk_peer_lock);
701 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
702 }
703
704 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
705 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
706
707 spin_unlock(&sk->sk_peer_lock);
708 spin_unlock(&peersk->sk_peer_lock);
709 }
710
unix_listen(struct socket * sock,int backlog)711 static int unix_listen(struct socket *sock, int backlog)
712 {
713 int err;
714 struct sock *sk = sock->sk;
715 struct unix_sock *u = unix_sk(sk);
716
717 err = -EOPNOTSUPP;
718 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
719 goto out; /* Only stream/seqpacket sockets accept */
720 err = -EINVAL;
721 if (!READ_ONCE(u->addr))
722 goto out; /* No listens on an unbound socket */
723 unix_state_lock(sk);
724 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
725 goto out_unlock;
726 if (backlog > sk->sk_max_ack_backlog)
727 wake_up_interruptible_all(&u->peer_wait);
728 sk->sk_max_ack_backlog = backlog;
729 WRITE_ONCE(sk->sk_state, TCP_LISTEN);
730
731 /* set credentials so connect can copy them */
732 init_peercred(sk);
733 err = 0;
734
735 out_unlock:
736 unix_state_unlock(sk);
737 out:
738 return err;
739 }
740
741 static int unix_release(struct socket *);
742 static int unix_bind(struct socket *, struct sockaddr *, int);
743 static int unix_stream_connect(struct socket *, struct sockaddr *,
744 int addr_len, int flags);
745 static int unix_socketpair(struct socket *, struct socket *);
746 static int unix_accept(struct socket *, struct socket *, int, bool);
747 static int unix_getname(struct socket *, struct sockaddr *, int);
748 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
749 static __poll_t unix_dgram_poll(struct file *, struct socket *,
750 poll_table *);
751 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
752 #ifdef CONFIG_COMPAT
753 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
754 #endif
755 static int unix_shutdown(struct socket *, int);
756 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
757 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
758 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
759 struct pipe_inode_info *, size_t size,
760 unsigned int flags);
761 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
762 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
763 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
764 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
765 static int unix_dgram_connect(struct socket *, struct sockaddr *,
766 int, int);
767 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
768 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
769 int);
770
unix_set_peek_off(struct sock * sk,int val)771 static int unix_set_peek_off(struct sock *sk, int val)
772 {
773 struct unix_sock *u = unix_sk(sk);
774
775 if (mutex_lock_interruptible(&u->iolock))
776 return -EINTR;
777
778 WRITE_ONCE(sk->sk_peek_off, val);
779 mutex_unlock(&u->iolock);
780
781 return 0;
782 }
783
784 #ifdef CONFIG_PROC_FS
unix_count_nr_fds(struct sock * sk)785 static int unix_count_nr_fds(struct sock *sk)
786 {
787 struct sk_buff *skb;
788 struct unix_sock *u;
789 int nr_fds = 0;
790
791 spin_lock(&sk->sk_receive_queue.lock);
792 skb = skb_peek(&sk->sk_receive_queue);
793 while (skb) {
794 u = unix_sk(skb->sk);
795 nr_fds += atomic_read(&u->scm_stat.nr_fds);
796 skb = skb_peek_next(skb, &sk->sk_receive_queue);
797 }
798 spin_unlock(&sk->sk_receive_queue.lock);
799
800 return nr_fds;
801 }
802
unix_show_fdinfo(struct seq_file * m,struct socket * sock)803 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
804 {
805 struct sock *sk = sock->sk;
806 unsigned char s_state;
807 struct unix_sock *u;
808 int nr_fds = 0;
809
810 if (sk) {
811 s_state = READ_ONCE(sk->sk_state);
812 u = unix_sk(sk);
813
814 /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
815 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
816 * SOCK_DGRAM is ordinary. So, no lock is needed.
817 */
818 if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
819 nr_fds = atomic_read(&u->scm_stat.nr_fds);
820 else if (s_state == TCP_LISTEN)
821 nr_fds = unix_count_nr_fds(sk);
822
823 seq_printf(m, "scm_fds: %u\n", nr_fds);
824 }
825 }
826 #else
827 #define unix_show_fdinfo NULL
828 #endif
829
830 static const struct proto_ops unix_stream_ops = {
831 .family = PF_UNIX,
832 .owner = THIS_MODULE,
833 .release = unix_release,
834 .bind = unix_bind,
835 .connect = unix_stream_connect,
836 .socketpair = unix_socketpair,
837 .accept = unix_accept,
838 .getname = unix_getname,
839 .poll = unix_poll,
840 .ioctl = unix_ioctl,
841 #ifdef CONFIG_COMPAT
842 .compat_ioctl = unix_compat_ioctl,
843 #endif
844 .listen = unix_listen,
845 .shutdown = unix_shutdown,
846 .sendmsg = unix_stream_sendmsg,
847 .recvmsg = unix_stream_recvmsg,
848 .read_skb = unix_stream_read_skb,
849 .mmap = sock_no_mmap,
850 .splice_read = unix_stream_splice_read,
851 .set_peek_off = unix_set_peek_off,
852 .show_fdinfo = unix_show_fdinfo,
853 };
854
855 static const struct proto_ops unix_dgram_ops = {
856 .family = PF_UNIX,
857 .owner = THIS_MODULE,
858 .release = unix_release,
859 .bind = unix_bind,
860 .connect = unix_dgram_connect,
861 .socketpair = unix_socketpair,
862 .accept = sock_no_accept,
863 .getname = unix_getname,
864 .poll = unix_dgram_poll,
865 .ioctl = unix_ioctl,
866 #ifdef CONFIG_COMPAT
867 .compat_ioctl = unix_compat_ioctl,
868 #endif
869 .listen = sock_no_listen,
870 .shutdown = unix_shutdown,
871 .sendmsg = unix_dgram_sendmsg,
872 .read_skb = unix_read_skb,
873 .recvmsg = unix_dgram_recvmsg,
874 .mmap = sock_no_mmap,
875 .set_peek_off = unix_set_peek_off,
876 .show_fdinfo = unix_show_fdinfo,
877 };
878
879 static const struct proto_ops unix_seqpacket_ops = {
880 .family = PF_UNIX,
881 .owner = THIS_MODULE,
882 .release = unix_release,
883 .bind = unix_bind,
884 .connect = unix_stream_connect,
885 .socketpair = unix_socketpair,
886 .accept = unix_accept,
887 .getname = unix_getname,
888 .poll = unix_dgram_poll,
889 .ioctl = unix_ioctl,
890 #ifdef CONFIG_COMPAT
891 .compat_ioctl = unix_compat_ioctl,
892 #endif
893 .listen = unix_listen,
894 .shutdown = unix_shutdown,
895 .sendmsg = unix_seqpacket_sendmsg,
896 .recvmsg = unix_seqpacket_recvmsg,
897 .mmap = sock_no_mmap,
898 .set_peek_off = unix_set_peek_off,
899 .show_fdinfo = unix_show_fdinfo,
900 };
901
unix_close(struct sock * sk,long timeout)902 static void unix_close(struct sock *sk, long timeout)
903 {
904 /* Nothing to do here, unix socket does not need a ->close().
905 * This is merely for sockmap.
906 */
907 }
908
unix_unhash(struct sock * sk)909 static void unix_unhash(struct sock *sk)
910 {
911 /* Nothing to do here, unix socket does not need a ->unhash().
912 * This is merely for sockmap.
913 */
914 }
915
unix_bpf_bypass_getsockopt(int level,int optname)916 static bool unix_bpf_bypass_getsockopt(int level, int optname)
917 {
918 if (level == SOL_SOCKET) {
919 switch (optname) {
920 case SO_PEERPIDFD:
921 return true;
922 default:
923 return false;
924 }
925 }
926
927 return false;
928 }
929
930 struct proto unix_dgram_proto = {
931 .name = "UNIX",
932 .owner = THIS_MODULE,
933 .obj_size = sizeof(struct unix_sock),
934 .close = unix_close,
935 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
936 #ifdef CONFIG_BPF_SYSCALL
937 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
938 #endif
939 };
940
941 struct proto unix_stream_proto = {
942 .name = "UNIX-STREAM",
943 .owner = THIS_MODULE,
944 .obj_size = sizeof(struct unix_sock),
945 .close = unix_close,
946 .unhash = unix_unhash,
947 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
948 #ifdef CONFIG_BPF_SYSCALL
949 .psock_update_sk_prot = unix_stream_bpf_update_proto,
950 #endif
951 };
952
unix_create1(struct net * net,struct socket * sock,int kern,int type)953 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
954 {
955 struct unix_sock *u;
956 struct sock *sk;
957 int err;
958
959 atomic_long_inc(&unix_nr_socks);
960 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
961 err = -ENFILE;
962 goto err;
963 }
964
965 if (type == SOCK_STREAM)
966 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
967 else /*dgram and seqpacket */
968 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
969
970 if (!sk) {
971 err = -ENOMEM;
972 goto err;
973 }
974
975 sock_init_data(sock, sk);
976
977 sk->sk_hash = unix_unbound_hash(sk);
978 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
979 sk->sk_write_space = unix_write_space;
980 sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
981 sk->sk_destruct = unix_sock_destructor;
982 u = unix_sk(sk);
983 u->inflight = 0;
984 u->path.dentry = NULL;
985 u->path.mnt = NULL;
986 spin_lock_init(&u->lock);
987 INIT_LIST_HEAD(&u->link);
988 mutex_init(&u->iolock); /* single task reading lock */
989 mutex_init(&u->bindlock); /* single task binding lock */
990 init_waitqueue_head(&u->peer_wait);
991 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
992 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
993 unix_insert_unbound_socket(net, sk);
994
995 sock_prot_inuse_add(net, sk->sk_prot, 1);
996
997 return sk;
998
999 err:
1000 atomic_long_dec(&unix_nr_socks);
1001 return ERR_PTR(err);
1002 }
1003
unix_create(struct net * net,struct socket * sock,int protocol,int kern)1004 static int unix_create(struct net *net, struct socket *sock, int protocol,
1005 int kern)
1006 {
1007 struct sock *sk;
1008
1009 if (protocol && protocol != PF_UNIX)
1010 return -EPROTONOSUPPORT;
1011
1012 sock->state = SS_UNCONNECTED;
1013
1014 switch (sock->type) {
1015 case SOCK_STREAM:
1016 sock->ops = &unix_stream_ops;
1017 break;
1018 /*
1019 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
1020 * nothing uses it.
1021 */
1022 case SOCK_RAW:
1023 sock->type = SOCK_DGRAM;
1024 fallthrough;
1025 case SOCK_DGRAM:
1026 sock->ops = &unix_dgram_ops;
1027 break;
1028 case SOCK_SEQPACKET:
1029 sock->ops = &unix_seqpacket_ops;
1030 break;
1031 default:
1032 return -ESOCKTNOSUPPORT;
1033 }
1034
1035 sk = unix_create1(net, sock, kern, sock->type);
1036 if (IS_ERR(sk))
1037 return PTR_ERR(sk);
1038
1039 return 0;
1040 }
1041
unix_release(struct socket * sock)1042 static int unix_release(struct socket *sock)
1043 {
1044 struct sock *sk = sock->sk;
1045
1046 if (!sk)
1047 return 0;
1048
1049 sk->sk_prot->close(sk, 0);
1050 unix_release_sock(sk, 0);
1051 sock->sk = NULL;
1052
1053 return 0;
1054 }
1055
unix_find_bsd(struct sockaddr_un * sunaddr,int addr_len,int type)1056 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1057 int type)
1058 {
1059 struct inode *inode;
1060 struct path path;
1061 struct sock *sk;
1062 int err;
1063
1064 unix_mkname_bsd(sunaddr, addr_len);
1065 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1066 if (err)
1067 goto fail;
1068
1069 err = path_permission(&path, MAY_WRITE);
1070 if (err)
1071 goto path_put;
1072
1073 err = -ECONNREFUSED;
1074 inode = d_backing_inode(path.dentry);
1075 if (!S_ISSOCK(inode->i_mode))
1076 goto path_put;
1077
1078 sk = unix_find_socket_byinode(inode);
1079 if (!sk)
1080 goto path_put;
1081
1082 err = -EPROTOTYPE;
1083 if (sk->sk_type == type)
1084 touch_atime(&path);
1085 else
1086 goto sock_put;
1087
1088 path_put(&path);
1089
1090 return sk;
1091
1092 sock_put:
1093 sock_put(sk);
1094 path_put:
1095 path_put(&path);
1096 fail:
1097 return ERR_PTR(err);
1098 }
1099
unix_find_abstract(struct net * net,struct sockaddr_un * sunaddr,int addr_len,int type)1100 static struct sock *unix_find_abstract(struct net *net,
1101 struct sockaddr_un *sunaddr,
1102 int addr_len, int type)
1103 {
1104 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1105 struct dentry *dentry;
1106 struct sock *sk;
1107
1108 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1109 if (!sk)
1110 return ERR_PTR(-ECONNREFUSED);
1111
1112 dentry = unix_sk(sk)->path.dentry;
1113 if (dentry)
1114 touch_atime(&unix_sk(sk)->path);
1115
1116 return sk;
1117 }
1118
unix_find_other(struct net * net,struct sockaddr_un * sunaddr,int addr_len,int type)1119 static struct sock *unix_find_other(struct net *net,
1120 struct sockaddr_un *sunaddr,
1121 int addr_len, int type)
1122 {
1123 struct sock *sk;
1124
1125 if (sunaddr->sun_path[0])
1126 sk = unix_find_bsd(sunaddr, addr_len, type);
1127 else
1128 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1129
1130 return sk;
1131 }
1132
unix_autobind(struct sock * sk)1133 static int unix_autobind(struct sock *sk)
1134 {
1135 struct unix_sock *u = unix_sk(sk);
1136 unsigned int new_hash, old_hash;
1137 struct net *net = sock_net(sk);
1138 struct unix_address *addr;
1139 u32 lastnum, ordernum;
1140 int err;
1141
1142 err = mutex_lock_interruptible(&u->bindlock);
1143 if (err)
1144 return err;
1145
1146 if (u->addr)
1147 goto out;
1148
1149 err = -ENOMEM;
1150 addr = kzalloc(sizeof(*addr) +
1151 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1152 if (!addr)
1153 goto out;
1154
1155 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1156 addr->name->sun_family = AF_UNIX;
1157 refcount_set(&addr->refcnt, 1);
1158
1159 old_hash = sk->sk_hash;
1160 ordernum = get_random_u32();
1161 lastnum = ordernum & 0xFFFFF;
1162 retry:
1163 ordernum = (ordernum + 1) & 0xFFFFF;
1164 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1165
1166 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1167 unix_table_double_lock(net, old_hash, new_hash);
1168
1169 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1170 unix_table_double_unlock(net, old_hash, new_hash);
1171
1172 /* __unix_find_socket_byname() may take long time if many names
1173 * are already in use.
1174 */
1175 cond_resched();
1176
1177 if (ordernum == lastnum) {
1178 /* Give up if all names seems to be in use. */
1179 err = -ENOSPC;
1180 unix_release_addr(addr);
1181 goto out;
1182 }
1183
1184 goto retry;
1185 }
1186
1187 __unix_set_addr_hash(net, sk, addr, new_hash);
1188 unix_table_double_unlock(net, old_hash, new_hash);
1189 err = 0;
1190
1191 out: mutex_unlock(&u->bindlock);
1192 return err;
1193 }
1194
unix_bind_bsd(struct sock * sk,struct sockaddr_un * sunaddr,int addr_len)1195 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1196 int addr_len)
1197 {
1198 umode_t mode = S_IFSOCK |
1199 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1200 struct unix_sock *u = unix_sk(sk);
1201 unsigned int new_hash, old_hash;
1202 struct net *net = sock_net(sk);
1203 struct mnt_idmap *idmap;
1204 struct unix_address *addr;
1205 struct dentry *dentry;
1206 struct path parent;
1207 int err;
1208
1209 addr_len = unix_mkname_bsd(sunaddr, addr_len);
1210 addr = unix_create_addr(sunaddr, addr_len);
1211 if (!addr)
1212 return -ENOMEM;
1213
1214 /*
1215 * Get the parent directory, calculate the hash for last
1216 * component.
1217 */
1218 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1219 if (IS_ERR(dentry)) {
1220 err = PTR_ERR(dentry);
1221 goto out;
1222 }
1223
1224 /*
1225 * All right, let's create it.
1226 */
1227 idmap = mnt_idmap(parent.mnt);
1228 err = security_path_mknod(&parent, dentry, mode, 0);
1229 if (!err)
1230 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1231 if (err)
1232 goto out_path;
1233 err = mutex_lock_interruptible(&u->bindlock);
1234 if (err)
1235 goto out_unlink;
1236 if (u->addr)
1237 goto out_unlock;
1238
1239 old_hash = sk->sk_hash;
1240 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1241 unix_table_double_lock(net, old_hash, new_hash);
1242 u->path.mnt = mntget(parent.mnt);
1243 u->path.dentry = dget(dentry);
1244 __unix_set_addr_hash(net, sk, addr, new_hash);
1245 unix_table_double_unlock(net, old_hash, new_hash);
1246 unix_insert_bsd_socket(sk);
1247 mutex_unlock(&u->bindlock);
1248 done_path_create(&parent, dentry);
1249 return 0;
1250
1251 out_unlock:
1252 mutex_unlock(&u->bindlock);
1253 err = -EINVAL;
1254 out_unlink:
1255 /* failed after successful mknod? unlink what we'd created... */
1256 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1257 out_path:
1258 done_path_create(&parent, dentry);
1259 out:
1260 unix_release_addr(addr);
1261 return err == -EEXIST ? -EADDRINUSE : err;
1262 }
1263
unix_bind_abstract(struct sock * sk,struct sockaddr_un * sunaddr,int addr_len)1264 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1265 int addr_len)
1266 {
1267 struct unix_sock *u = unix_sk(sk);
1268 unsigned int new_hash, old_hash;
1269 struct net *net = sock_net(sk);
1270 struct unix_address *addr;
1271 int err;
1272
1273 addr = unix_create_addr(sunaddr, addr_len);
1274 if (!addr)
1275 return -ENOMEM;
1276
1277 err = mutex_lock_interruptible(&u->bindlock);
1278 if (err)
1279 goto out;
1280
1281 if (u->addr) {
1282 err = -EINVAL;
1283 goto out_mutex;
1284 }
1285
1286 old_hash = sk->sk_hash;
1287 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1288 unix_table_double_lock(net, old_hash, new_hash);
1289
1290 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1291 goto out_spin;
1292
1293 __unix_set_addr_hash(net, sk, addr, new_hash);
1294 unix_table_double_unlock(net, old_hash, new_hash);
1295 mutex_unlock(&u->bindlock);
1296 return 0;
1297
1298 out_spin:
1299 unix_table_double_unlock(net, old_hash, new_hash);
1300 err = -EADDRINUSE;
1301 out_mutex:
1302 mutex_unlock(&u->bindlock);
1303 out:
1304 unix_release_addr(addr);
1305 return err;
1306 }
1307
unix_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)1308 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1309 {
1310 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1311 struct sock *sk = sock->sk;
1312 int err;
1313
1314 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1315 sunaddr->sun_family == AF_UNIX)
1316 return unix_autobind(sk);
1317
1318 err = unix_validate_addr(sunaddr, addr_len);
1319 if (err)
1320 return err;
1321
1322 if (sunaddr->sun_path[0])
1323 err = unix_bind_bsd(sk, sunaddr, addr_len);
1324 else
1325 err = unix_bind_abstract(sk, sunaddr, addr_len);
1326
1327 return err;
1328 }
1329
unix_state_double_lock(struct sock * sk1,struct sock * sk2)1330 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1331 {
1332 if (unlikely(sk1 == sk2) || !sk2) {
1333 unix_state_lock(sk1);
1334 return;
1335 }
1336 if (sk1 > sk2)
1337 swap(sk1, sk2);
1338
1339 unix_state_lock(sk1);
1340 unix_state_lock_nested(sk2, U_LOCK_SECOND);
1341 }
1342
unix_state_double_unlock(struct sock * sk1,struct sock * sk2)1343 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1344 {
1345 if (unlikely(sk1 == sk2) || !sk2) {
1346 unix_state_unlock(sk1);
1347 return;
1348 }
1349 unix_state_unlock(sk1);
1350 unix_state_unlock(sk2);
1351 }
1352
unix_dgram_connect(struct socket * sock,struct sockaddr * addr,int alen,int flags)1353 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1354 int alen, int flags)
1355 {
1356 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1357 struct sock *sk = sock->sk;
1358 struct sock *other;
1359 int err;
1360
1361 err = -EINVAL;
1362 if (alen < offsetofend(struct sockaddr, sa_family))
1363 goto out;
1364
1365 if (addr->sa_family != AF_UNSPEC) {
1366 err = unix_validate_addr(sunaddr, alen);
1367 if (err)
1368 goto out;
1369
1370 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1371 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1372 !READ_ONCE(unix_sk(sk)->addr)) {
1373 err = unix_autobind(sk);
1374 if (err)
1375 goto out;
1376 }
1377
1378 restart:
1379 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1380 if (IS_ERR(other)) {
1381 err = PTR_ERR(other);
1382 goto out;
1383 }
1384
1385 unix_state_double_lock(sk, other);
1386
1387 /* Apparently VFS overslept socket death. Retry. */
1388 if (sock_flag(other, SOCK_DEAD)) {
1389 unix_state_double_unlock(sk, other);
1390 sock_put(other);
1391 goto restart;
1392 }
1393
1394 err = -EPERM;
1395 if (!unix_may_send(sk, other))
1396 goto out_unlock;
1397
1398 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1399 if (err)
1400 goto out_unlock;
1401
1402 WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1403 WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
1404 } else {
1405 /*
1406 * 1003.1g breaking connected state with AF_UNSPEC
1407 */
1408 other = NULL;
1409 unix_state_double_lock(sk, other);
1410 }
1411
1412 /*
1413 * If it was connected, reconnect.
1414 */
1415 if (unix_peer(sk)) {
1416 struct sock *old_peer = unix_peer(sk);
1417
1418 unix_peer(sk) = other;
1419 if (!other)
1420 WRITE_ONCE(sk->sk_state, TCP_CLOSE);
1421 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1422
1423 unix_state_double_unlock(sk, other);
1424
1425 if (other != old_peer) {
1426 unix_dgram_disconnected(sk, old_peer);
1427
1428 unix_state_lock(old_peer);
1429 if (!unix_peer(old_peer))
1430 WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
1431 unix_state_unlock(old_peer);
1432 }
1433
1434 sock_put(old_peer);
1435 } else {
1436 unix_peer(sk) = other;
1437 unix_state_double_unlock(sk, other);
1438 }
1439
1440 return 0;
1441
1442 out_unlock:
1443 unix_state_double_unlock(sk, other);
1444 sock_put(other);
1445 out:
1446 return err;
1447 }
1448
unix_wait_for_peer(struct sock * other,long timeo)1449 static long unix_wait_for_peer(struct sock *other, long timeo)
1450 __releases(&unix_sk(other)->lock)
1451 {
1452 struct unix_sock *u = unix_sk(other);
1453 int sched;
1454 DEFINE_WAIT(wait);
1455
1456 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1457
1458 sched = !sock_flag(other, SOCK_DEAD) &&
1459 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1460 unix_recvq_full_lockless(other);
1461
1462 unix_state_unlock(other);
1463
1464 if (sched)
1465 timeo = schedule_timeout(timeo);
1466
1467 finish_wait(&u->peer_wait, &wait);
1468 return timeo;
1469 }
1470
unix_stream_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)1471 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1472 int addr_len, int flags)
1473 {
1474 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1475 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1476 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1477 struct net *net = sock_net(sk);
1478 struct sk_buff *skb = NULL;
1479 unsigned char state;
1480 long timeo;
1481 int err;
1482
1483 err = unix_validate_addr(sunaddr, addr_len);
1484 if (err)
1485 goto out;
1486
1487 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1488 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1489 !READ_ONCE(u->addr)) {
1490 err = unix_autobind(sk);
1491 if (err)
1492 goto out;
1493 }
1494
1495 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1496
1497 /* First of all allocate resources.
1498 If we will make it after state is locked,
1499 we will have to recheck all again in any case.
1500 */
1501
1502 /* create new sock for complete connection */
1503 newsk = unix_create1(net, NULL, 0, sock->type);
1504 if (IS_ERR(newsk)) {
1505 err = PTR_ERR(newsk);
1506 newsk = NULL;
1507 goto out;
1508 }
1509
1510 err = -ENOMEM;
1511
1512 /* Allocate skb for sending to listening sock */
1513 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1514 if (skb == NULL)
1515 goto out;
1516
1517 restart:
1518 /* Find listening sock. */
1519 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1520 if (IS_ERR(other)) {
1521 err = PTR_ERR(other);
1522 other = NULL;
1523 goto out;
1524 }
1525
1526 unix_state_lock(other);
1527
1528 /* Apparently VFS overslept socket death. Retry. */
1529 if (sock_flag(other, SOCK_DEAD)) {
1530 unix_state_unlock(other);
1531 sock_put(other);
1532 goto restart;
1533 }
1534
1535 err = -ECONNREFUSED;
1536 if (other->sk_state != TCP_LISTEN)
1537 goto out_unlock;
1538 if (other->sk_shutdown & RCV_SHUTDOWN)
1539 goto out_unlock;
1540
1541 if (unix_recvq_full_lockless(other)) {
1542 err = -EAGAIN;
1543 if (!timeo)
1544 goto out_unlock;
1545
1546 timeo = unix_wait_for_peer(other, timeo);
1547
1548 err = sock_intr_errno(timeo);
1549 if (signal_pending(current))
1550 goto out;
1551 sock_put(other);
1552 goto restart;
1553 }
1554
1555 /* self connect and simultaneous connect are eliminated
1556 * by rejecting TCP_LISTEN socket to avoid deadlock.
1557 */
1558 state = READ_ONCE(sk->sk_state);
1559 if (unlikely(state != TCP_CLOSE)) {
1560 err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1561 goto out_unlock;
1562 }
1563
1564 unix_state_lock_nested(sk, U_LOCK_SECOND);
1565
1566 if (unlikely(sk->sk_state != TCP_CLOSE)) {
1567 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1568 unix_state_unlock(sk);
1569 goto out_unlock;
1570 }
1571
1572 err = security_unix_stream_connect(sk, other, newsk);
1573 if (err) {
1574 unix_state_unlock(sk);
1575 goto out_unlock;
1576 }
1577
1578 /* The way is open! Fastly set all the necessary fields... */
1579
1580 sock_hold(sk);
1581 unix_peer(newsk) = sk;
1582 newsk->sk_state = TCP_ESTABLISHED;
1583 newsk->sk_type = sk->sk_type;
1584 init_peercred(newsk);
1585 newu = unix_sk(newsk);
1586 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1587 otheru = unix_sk(other);
1588
1589 /* copy address information from listening to new sock
1590 *
1591 * The contents of *(otheru->addr) and otheru->path
1592 * are seen fully set up here, since we have found
1593 * otheru in hash under its lock. Insertion into the
1594 * hash chain we'd found it in had been done in an
1595 * earlier critical area protected by the chain's lock,
1596 * the same one where we'd set *(otheru->addr) contents,
1597 * as well as otheru->path and otheru->addr itself.
1598 *
1599 * Using smp_store_release() here to set newu->addr
1600 * is enough to make those stores, as well as stores
1601 * to newu->path visible to anyone who gets newu->addr
1602 * by smp_load_acquire(). IOW, the same warranties
1603 * as for unix_sock instances bound in unix_bind() or
1604 * in unix_autobind().
1605 */
1606 if (otheru->path.dentry) {
1607 path_get(&otheru->path);
1608 newu->path = otheru->path;
1609 }
1610 refcount_inc(&otheru->addr->refcnt);
1611 smp_store_release(&newu->addr, otheru->addr);
1612
1613 /* Set credentials */
1614 copy_peercred(sk, other);
1615
1616 sock->state = SS_CONNECTED;
1617 WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1618 sock_hold(newsk);
1619
1620 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1621 unix_peer(sk) = newsk;
1622
1623 unix_state_unlock(sk);
1624
1625 /* take ten and send info to listening sock */
1626 spin_lock(&other->sk_receive_queue.lock);
1627 __skb_queue_tail(&other->sk_receive_queue, skb);
1628 spin_unlock(&other->sk_receive_queue.lock);
1629 unix_state_unlock(other);
1630 other->sk_data_ready(other);
1631 sock_put(other);
1632 return 0;
1633
1634 out_unlock:
1635 if (other)
1636 unix_state_unlock(other);
1637
1638 out:
1639 kfree_skb(skb);
1640 if (newsk)
1641 unix_release_sock(newsk, 0);
1642 if (other)
1643 sock_put(other);
1644 return err;
1645 }
1646
unix_socketpair(struct socket * socka,struct socket * sockb)1647 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1648 {
1649 struct sock *ska = socka->sk, *skb = sockb->sk;
1650
1651 /* Join our sockets back to back */
1652 sock_hold(ska);
1653 sock_hold(skb);
1654 unix_peer(ska) = skb;
1655 unix_peer(skb) = ska;
1656 init_peercred(ska);
1657 init_peercred(skb);
1658
1659 ska->sk_state = TCP_ESTABLISHED;
1660 skb->sk_state = TCP_ESTABLISHED;
1661 socka->state = SS_CONNECTED;
1662 sockb->state = SS_CONNECTED;
1663 return 0;
1664 }
1665
unix_sock_inherit_flags(const struct socket * old,struct socket * new)1666 static void unix_sock_inherit_flags(const struct socket *old,
1667 struct socket *new)
1668 {
1669 if (test_bit(SOCK_PASSCRED, &old->flags))
1670 set_bit(SOCK_PASSCRED, &new->flags);
1671 if (test_bit(SOCK_PASSPIDFD, &old->flags))
1672 set_bit(SOCK_PASSPIDFD, &new->flags);
1673 if (test_bit(SOCK_PASSSEC, &old->flags))
1674 set_bit(SOCK_PASSSEC, &new->flags);
1675 }
1676
unix_accept(struct socket * sock,struct socket * newsock,int flags,bool kern)1677 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1678 bool kern)
1679 {
1680 struct sock *sk = sock->sk;
1681 struct sock *tsk;
1682 struct sk_buff *skb;
1683 int err;
1684
1685 err = -EOPNOTSUPP;
1686 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1687 goto out;
1688
1689 err = -EINVAL;
1690 if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
1691 goto out;
1692
1693 /* If socket state is TCP_LISTEN it cannot change (for now...),
1694 * so that no locks are necessary.
1695 */
1696
1697 skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1698 &err);
1699 if (!skb) {
1700 /* This means receive shutdown. */
1701 if (err == 0)
1702 err = -EINVAL;
1703 goto out;
1704 }
1705
1706 tsk = skb->sk;
1707 skb_free_datagram(sk, skb);
1708 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1709
1710 /* attach accepted sock to socket */
1711 unix_state_lock(tsk);
1712 newsock->state = SS_CONNECTED;
1713 unix_sock_inherit_flags(sock, newsock);
1714 sock_graft(tsk, newsock);
1715 unix_state_unlock(tsk);
1716 return 0;
1717
1718 out:
1719 return err;
1720 }
1721
1722
unix_getname(struct socket * sock,struct sockaddr * uaddr,int peer)1723 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1724 {
1725 struct sock *sk = sock->sk;
1726 struct unix_address *addr;
1727 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1728 int err = 0;
1729
1730 if (peer) {
1731 sk = unix_peer_get(sk);
1732
1733 err = -ENOTCONN;
1734 if (!sk)
1735 goto out;
1736 err = 0;
1737 } else {
1738 sock_hold(sk);
1739 }
1740
1741 addr = smp_load_acquire(&unix_sk(sk)->addr);
1742 if (!addr) {
1743 sunaddr->sun_family = AF_UNIX;
1744 sunaddr->sun_path[0] = 0;
1745 err = offsetof(struct sockaddr_un, sun_path);
1746 } else {
1747 err = addr->len;
1748 memcpy(sunaddr, addr->name, addr->len);
1749 }
1750 sock_put(sk);
1751 out:
1752 return err;
1753 }
1754
unix_peek_fds(struct scm_cookie * scm,struct sk_buff * skb)1755 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1756 {
1757 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1758
1759 /*
1760 * Garbage collection of unix sockets starts by selecting a set of
1761 * candidate sockets which have reference only from being in flight
1762 * (total_refs == inflight_refs). This condition is checked once during
1763 * the candidate collection phase, and candidates are marked as such, so
1764 * that non-candidates can later be ignored. While inflight_refs is
1765 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1766 * is an instantaneous decision.
1767 *
1768 * Once a candidate, however, the socket must not be reinstalled into a
1769 * file descriptor while the garbage collection is in progress.
1770 *
1771 * If the above conditions are met, then the directed graph of
1772 * candidates (*) does not change while unix_gc_lock is held.
1773 *
1774 * Any operations that changes the file count through file descriptors
1775 * (dup, close, sendmsg) does not change the graph since candidates are
1776 * not installed in fds.
1777 *
1778 * Dequeing a candidate via recvmsg would install it into an fd, but
1779 * that takes unix_gc_lock to decrement the inflight count, so it's
1780 * serialized with garbage collection.
1781 *
1782 * MSG_PEEK is special in that it does not change the inflight count,
1783 * yet does install the socket into an fd. The following lock/unlock
1784 * pair is to ensure serialization with garbage collection. It must be
1785 * done between incrementing the file count and installing the file into
1786 * an fd.
1787 *
1788 * If garbage collection starts after the barrier provided by the
1789 * lock/unlock, then it will see the elevated refcount and not mark this
1790 * as a candidate. If a garbage collection is already in progress
1791 * before the file count was incremented, then the lock/unlock pair will
1792 * ensure that garbage collection is finished before progressing to
1793 * installing the fd.
1794 *
1795 * (*) A -> B where B is on the queue of A or B is on the queue of C
1796 * which is on the queue of listening socket A.
1797 */
1798 spin_lock(&unix_gc_lock);
1799 spin_unlock(&unix_gc_lock);
1800 }
1801
unix_scm_to_skb(struct scm_cookie * scm,struct sk_buff * skb,bool send_fds)1802 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1803 {
1804 int err = 0;
1805
1806 UNIXCB(skb).pid = get_pid(scm->pid);
1807 UNIXCB(skb).uid = scm->creds.uid;
1808 UNIXCB(skb).gid = scm->creds.gid;
1809 UNIXCB(skb).fp = NULL;
1810 unix_get_secdata(scm, skb);
1811 if (scm->fp && send_fds)
1812 err = unix_attach_fds(scm, skb);
1813
1814 skb->destructor = unix_destruct_scm;
1815 return err;
1816 }
1817
unix_passcred_enabled(const struct socket * sock,const struct sock * other)1818 static bool unix_passcred_enabled(const struct socket *sock,
1819 const struct sock *other)
1820 {
1821 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1822 test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1823 !other->sk_socket ||
1824 test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1825 test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1826 }
1827
1828 /*
1829 * Some apps rely on write() giving SCM_CREDENTIALS
1830 * We include credentials if source or destination socket
1831 * asserted SOCK_PASSCRED.
1832 */
maybe_add_creds(struct sk_buff * skb,const struct socket * sock,const struct sock * other)1833 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1834 const struct sock *other)
1835 {
1836 if (UNIXCB(skb).pid)
1837 return;
1838 if (unix_passcred_enabled(sock, other)) {
1839 UNIXCB(skb).pid = get_pid(task_tgid(current));
1840 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1841 }
1842 }
1843
unix_skb_scm_eq(struct sk_buff * skb,struct scm_cookie * scm)1844 static bool unix_skb_scm_eq(struct sk_buff *skb,
1845 struct scm_cookie *scm)
1846 {
1847 return UNIXCB(skb).pid == scm->pid &&
1848 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1849 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1850 unix_secdata_eq(scm, skb);
1851 }
1852
scm_stat_add(struct sock * sk,struct sk_buff * skb)1853 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1854 {
1855 struct scm_fp_list *fp = UNIXCB(skb).fp;
1856 struct unix_sock *u = unix_sk(sk);
1857
1858 if (unlikely(fp && fp->count))
1859 atomic_add(fp->count, &u->scm_stat.nr_fds);
1860 }
1861
scm_stat_del(struct sock * sk,struct sk_buff * skb)1862 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1863 {
1864 struct scm_fp_list *fp = UNIXCB(skb).fp;
1865 struct unix_sock *u = unix_sk(sk);
1866
1867 if (unlikely(fp && fp->count))
1868 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1869 }
1870
1871 /*
1872 * Send AF_UNIX data.
1873 */
1874
unix_dgram_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1875 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1876 size_t len)
1877 {
1878 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1879 struct sock *sk = sock->sk, *other = NULL;
1880 struct unix_sock *u = unix_sk(sk);
1881 struct scm_cookie scm;
1882 struct sk_buff *skb;
1883 int data_len = 0;
1884 int sk_locked;
1885 long timeo;
1886 int err;
1887
1888 wait_for_unix_gc();
1889 err = scm_send(sock, msg, &scm, false);
1890 if (err < 0)
1891 return err;
1892
1893 err = -EOPNOTSUPP;
1894 if (msg->msg_flags&MSG_OOB)
1895 goto out;
1896
1897 if (msg->msg_namelen) {
1898 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1899 if (err)
1900 goto out;
1901 } else {
1902 sunaddr = NULL;
1903 err = -ENOTCONN;
1904 other = unix_peer_get(sk);
1905 if (!other)
1906 goto out;
1907 }
1908
1909 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1910 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1911 !READ_ONCE(u->addr)) {
1912 err = unix_autobind(sk);
1913 if (err)
1914 goto out;
1915 }
1916
1917 err = -EMSGSIZE;
1918 if (len > READ_ONCE(sk->sk_sndbuf) - 32)
1919 goto out;
1920
1921 if (len > SKB_MAX_ALLOC) {
1922 data_len = min_t(size_t,
1923 len - SKB_MAX_ALLOC,
1924 MAX_SKB_FRAGS * PAGE_SIZE);
1925 data_len = PAGE_ALIGN(data_len);
1926
1927 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1928 }
1929
1930 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1931 msg->msg_flags & MSG_DONTWAIT, &err,
1932 PAGE_ALLOC_COSTLY_ORDER);
1933 if (skb == NULL)
1934 goto out;
1935
1936 err = unix_scm_to_skb(&scm, skb, true);
1937 if (err < 0)
1938 goto out_free;
1939
1940 skb_put(skb, len - data_len);
1941 skb->data_len = data_len;
1942 skb->len = len;
1943 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1944 if (err)
1945 goto out_free;
1946
1947 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1948
1949 restart:
1950 if (!other) {
1951 err = -ECONNRESET;
1952 if (sunaddr == NULL)
1953 goto out_free;
1954
1955 other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1956 sk->sk_type);
1957 if (IS_ERR(other)) {
1958 err = PTR_ERR(other);
1959 other = NULL;
1960 goto out_free;
1961 }
1962 }
1963
1964 if (sk_filter(other, skb) < 0) {
1965 /* Toss the packet but do not return any error to the sender */
1966 err = len;
1967 goto out_free;
1968 }
1969
1970 sk_locked = 0;
1971 unix_state_lock(other);
1972 restart_locked:
1973 err = -EPERM;
1974 if (!unix_may_send(sk, other))
1975 goto out_unlock;
1976
1977 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1978 /*
1979 * Check with 1003.1g - what should
1980 * datagram error
1981 */
1982 unix_state_unlock(other);
1983 sock_put(other);
1984
1985 if (!sk_locked)
1986 unix_state_lock(sk);
1987
1988 err = 0;
1989 if (sk->sk_type == SOCK_SEQPACKET) {
1990 /* We are here only when racing with unix_release_sock()
1991 * is clearing @other. Never change state to TCP_CLOSE
1992 * unlike SOCK_DGRAM wants.
1993 */
1994 unix_state_unlock(sk);
1995 err = -EPIPE;
1996 } else if (unix_peer(sk) == other) {
1997 unix_peer(sk) = NULL;
1998 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1999
2000 WRITE_ONCE(sk->sk_state, TCP_CLOSE);
2001 unix_state_unlock(sk);
2002
2003 unix_dgram_disconnected(sk, other);
2004 sock_put(other);
2005 err = -ECONNREFUSED;
2006 } else {
2007 unix_state_unlock(sk);
2008 }
2009
2010 other = NULL;
2011 if (err)
2012 goto out_free;
2013 goto restart;
2014 }
2015
2016 err = -EPIPE;
2017 if (other->sk_shutdown & RCV_SHUTDOWN)
2018 goto out_unlock;
2019
2020 if (sk->sk_type != SOCK_SEQPACKET) {
2021 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2022 if (err)
2023 goto out_unlock;
2024 }
2025
2026 /* other == sk && unix_peer(other) != sk if
2027 * - unix_peer(sk) == NULL, destination address bound to sk
2028 * - unix_peer(sk) == sk by time of get but disconnected before lock
2029 */
2030 if (other != sk &&
2031 unlikely(unix_peer(other) != sk &&
2032 unix_recvq_full_lockless(other))) {
2033 if (timeo) {
2034 timeo = unix_wait_for_peer(other, timeo);
2035
2036 err = sock_intr_errno(timeo);
2037 if (signal_pending(current))
2038 goto out_free;
2039
2040 goto restart;
2041 }
2042
2043 if (!sk_locked) {
2044 unix_state_unlock(other);
2045 unix_state_double_lock(sk, other);
2046 }
2047
2048 if (unix_peer(sk) != other ||
2049 unix_dgram_peer_wake_me(sk, other)) {
2050 err = -EAGAIN;
2051 sk_locked = 1;
2052 goto out_unlock;
2053 }
2054
2055 if (!sk_locked) {
2056 sk_locked = 1;
2057 goto restart_locked;
2058 }
2059 }
2060
2061 if (unlikely(sk_locked))
2062 unix_state_unlock(sk);
2063
2064 if (sock_flag(other, SOCK_RCVTSTAMP))
2065 __net_timestamp(skb);
2066 maybe_add_creds(skb, sock, other);
2067 scm_stat_add(other, skb);
2068 skb_queue_tail(&other->sk_receive_queue, skb);
2069 unix_state_unlock(other);
2070 other->sk_data_ready(other);
2071 sock_put(other);
2072 scm_destroy(&scm);
2073 return len;
2074
2075 out_unlock:
2076 if (sk_locked)
2077 unix_state_unlock(sk);
2078 unix_state_unlock(other);
2079 out_free:
2080 kfree_skb(skb);
2081 out:
2082 if (other)
2083 sock_put(other);
2084 scm_destroy(&scm);
2085 return err;
2086 }
2087
2088 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2089 * bytes, and a minimum of a full page.
2090 */
2091 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2092
2093 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
queue_oob(struct socket * sock,struct msghdr * msg,struct sock * other,struct scm_cookie * scm,bool fds_sent)2094 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2095 struct scm_cookie *scm, bool fds_sent)
2096 {
2097 struct unix_sock *ousk = unix_sk(other);
2098 struct sk_buff *skb;
2099 int err = 0;
2100
2101 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2102
2103 if (!skb)
2104 return err;
2105
2106 err = unix_scm_to_skb(scm, skb, !fds_sent);
2107 if (err < 0) {
2108 kfree_skb(skb);
2109 return err;
2110 }
2111 skb_put(skb, 1);
2112 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2113
2114 if (err) {
2115 kfree_skb(skb);
2116 return err;
2117 }
2118
2119 unix_state_lock(other);
2120
2121 if (sock_flag(other, SOCK_DEAD) ||
2122 (other->sk_shutdown & RCV_SHUTDOWN)) {
2123 unix_state_unlock(other);
2124 kfree_skb(skb);
2125 return -EPIPE;
2126 }
2127
2128 maybe_add_creds(skb, sock, other);
2129 skb_get(skb);
2130
2131 scm_stat_add(other, skb);
2132
2133 spin_lock(&other->sk_receive_queue.lock);
2134 if (ousk->oob_skb)
2135 consume_skb(ousk->oob_skb);
2136 WRITE_ONCE(ousk->oob_skb, skb);
2137 __skb_queue_tail(&other->sk_receive_queue, skb);
2138 spin_unlock(&other->sk_receive_queue.lock);
2139
2140 sk_send_sigurg(other);
2141 unix_state_unlock(other);
2142 other->sk_data_ready(other);
2143
2144 return err;
2145 }
2146 #endif
2147
unix_stream_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)2148 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2149 size_t len)
2150 {
2151 struct sock *sk = sock->sk;
2152 struct sock *other = NULL;
2153 int err, size;
2154 struct sk_buff *skb;
2155 int sent = 0;
2156 struct scm_cookie scm;
2157 bool fds_sent = false;
2158 int data_len;
2159
2160 wait_for_unix_gc();
2161 err = scm_send(sock, msg, &scm, false);
2162 if (err < 0)
2163 return err;
2164
2165 err = -EOPNOTSUPP;
2166 if (msg->msg_flags & MSG_OOB) {
2167 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2168 if (len)
2169 len--;
2170 else
2171 #endif
2172 goto out_err;
2173 }
2174
2175 if (msg->msg_namelen) {
2176 err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2177 goto out_err;
2178 } else {
2179 err = -ENOTCONN;
2180 other = unix_peer(sk);
2181 if (!other)
2182 goto out_err;
2183 }
2184
2185 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2186 goto pipe_err;
2187
2188 while (sent < len) {
2189 size = len - sent;
2190
2191 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2192 skb = sock_alloc_send_pskb(sk, 0, 0,
2193 msg->msg_flags & MSG_DONTWAIT,
2194 &err, 0);
2195 } else {
2196 /* Keep two messages in the pipe so it schedules better */
2197 size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
2198
2199 /* allow fallback to order-0 allocations */
2200 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2201
2202 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2203
2204 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2205
2206 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2207 msg->msg_flags & MSG_DONTWAIT, &err,
2208 get_order(UNIX_SKB_FRAGS_SZ));
2209 }
2210 if (!skb)
2211 goto out_err;
2212
2213 /* Only send the fds in the first buffer */
2214 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2215 if (err < 0) {
2216 kfree_skb(skb);
2217 goto out_err;
2218 }
2219 fds_sent = true;
2220
2221 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2222 err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2223 sk->sk_allocation);
2224 if (err < 0) {
2225 kfree_skb(skb);
2226 goto out_err;
2227 }
2228 size = err;
2229 refcount_add(size, &sk->sk_wmem_alloc);
2230 } else {
2231 skb_put(skb, size - data_len);
2232 skb->data_len = data_len;
2233 skb->len = size;
2234 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2235 if (err) {
2236 kfree_skb(skb);
2237 goto out_err;
2238 }
2239 }
2240
2241 unix_state_lock(other);
2242
2243 if (sock_flag(other, SOCK_DEAD) ||
2244 (other->sk_shutdown & RCV_SHUTDOWN))
2245 goto pipe_err_free;
2246
2247 maybe_add_creds(skb, sock, other);
2248 scm_stat_add(other, skb);
2249 skb_queue_tail(&other->sk_receive_queue, skb);
2250 unix_state_unlock(other);
2251 other->sk_data_ready(other);
2252 sent += size;
2253 }
2254
2255 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2256 if (msg->msg_flags & MSG_OOB) {
2257 err = queue_oob(sock, msg, other, &scm, fds_sent);
2258 if (err)
2259 goto out_err;
2260 sent++;
2261 }
2262 #endif
2263
2264 scm_destroy(&scm);
2265
2266 return sent;
2267
2268 pipe_err_free:
2269 unix_state_unlock(other);
2270 kfree_skb(skb);
2271 pipe_err:
2272 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2273 send_sig(SIGPIPE, current, 0);
2274 err = -EPIPE;
2275 out_err:
2276 scm_destroy(&scm);
2277 return sent ? : err;
2278 }
2279
unix_seqpacket_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)2280 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2281 size_t len)
2282 {
2283 int err;
2284 struct sock *sk = sock->sk;
2285
2286 err = sock_error(sk);
2287 if (err)
2288 return err;
2289
2290 if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2291 return -ENOTCONN;
2292
2293 if (msg->msg_namelen)
2294 msg->msg_namelen = 0;
2295
2296 return unix_dgram_sendmsg(sock, msg, len);
2297 }
2298
unix_seqpacket_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)2299 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2300 size_t size, int flags)
2301 {
2302 struct sock *sk = sock->sk;
2303
2304 if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2305 return -ENOTCONN;
2306
2307 return unix_dgram_recvmsg(sock, msg, size, flags);
2308 }
2309
unix_copy_addr(struct msghdr * msg,struct sock * sk)2310 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2311 {
2312 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2313
2314 if (addr) {
2315 msg->msg_namelen = addr->len;
2316 memcpy(msg->msg_name, addr->name, addr->len);
2317 }
2318 }
2319
__unix_dgram_recvmsg(struct sock * sk,struct msghdr * msg,size_t size,int flags)2320 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2321 int flags)
2322 {
2323 struct scm_cookie scm;
2324 struct socket *sock = sk->sk_socket;
2325 struct unix_sock *u = unix_sk(sk);
2326 struct sk_buff *skb, *last;
2327 long timeo;
2328 int skip;
2329 int err;
2330
2331 err = -EOPNOTSUPP;
2332 if (flags&MSG_OOB)
2333 goto out;
2334
2335 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2336
2337 do {
2338 mutex_lock(&u->iolock);
2339
2340 skip = sk_peek_offset(sk, flags);
2341 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2342 &skip, &err, &last);
2343 if (skb) {
2344 if (!(flags & MSG_PEEK))
2345 scm_stat_del(sk, skb);
2346 break;
2347 }
2348
2349 mutex_unlock(&u->iolock);
2350
2351 if (err != -EAGAIN)
2352 break;
2353 } while (timeo &&
2354 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2355 &err, &timeo, last));
2356
2357 if (!skb) { /* implies iolock unlocked */
2358 unix_state_lock(sk);
2359 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2360 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2361 (sk->sk_shutdown & RCV_SHUTDOWN))
2362 err = 0;
2363 unix_state_unlock(sk);
2364 goto out;
2365 }
2366
2367 if (wq_has_sleeper(&u->peer_wait))
2368 wake_up_interruptible_sync_poll(&u->peer_wait,
2369 EPOLLOUT | EPOLLWRNORM |
2370 EPOLLWRBAND);
2371
2372 if (msg->msg_name)
2373 unix_copy_addr(msg, skb->sk);
2374
2375 if (size > skb->len - skip)
2376 size = skb->len - skip;
2377 else if (size < skb->len - skip)
2378 msg->msg_flags |= MSG_TRUNC;
2379
2380 err = skb_copy_datagram_msg(skb, skip, msg, size);
2381 if (err)
2382 goto out_free;
2383
2384 if (sock_flag(sk, SOCK_RCVTSTAMP))
2385 __sock_recv_timestamp(msg, sk, skb);
2386
2387 memset(&scm, 0, sizeof(scm));
2388
2389 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2390 unix_set_secdata(&scm, skb);
2391
2392 if (!(flags & MSG_PEEK)) {
2393 if (UNIXCB(skb).fp)
2394 unix_detach_fds(&scm, skb);
2395
2396 sk_peek_offset_bwd(sk, skb->len);
2397 } else {
2398 /* It is questionable: on PEEK we could:
2399 - do not return fds - good, but too simple 8)
2400 - return fds, and do not return them on read (old strategy,
2401 apparently wrong)
2402 - clone fds (I chose it for now, it is the most universal
2403 solution)
2404
2405 POSIX 1003.1g does not actually define this clearly
2406 at all. POSIX 1003.1g doesn't define a lot of things
2407 clearly however!
2408
2409 */
2410
2411 sk_peek_offset_fwd(sk, size);
2412
2413 if (UNIXCB(skb).fp)
2414 unix_peek_fds(&scm, skb);
2415 }
2416 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2417
2418 scm_recv_unix(sock, msg, &scm, flags);
2419
2420 out_free:
2421 skb_free_datagram(sk, skb);
2422 mutex_unlock(&u->iolock);
2423 out:
2424 return err;
2425 }
2426
unix_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)2427 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2428 int flags)
2429 {
2430 struct sock *sk = sock->sk;
2431
2432 #ifdef CONFIG_BPF_SYSCALL
2433 const struct proto *prot = READ_ONCE(sk->sk_prot);
2434
2435 if (prot != &unix_dgram_proto)
2436 return prot->recvmsg(sk, msg, size, flags, NULL);
2437 #endif
2438 return __unix_dgram_recvmsg(sk, msg, size, flags);
2439 }
2440
unix_read_skb(struct sock * sk,skb_read_actor_t recv_actor)2441 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2442 {
2443 struct unix_sock *u = unix_sk(sk);
2444 struct sk_buff *skb;
2445 int err;
2446
2447 mutex_lock(&u->iolock);
2448 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2449 mutex_unlock(&u->iolock);
2450 if (!skb)
2451 return err;
2452
2453 return recv_actor(sk, skb);
2454 }
2455
2456 /*
2457 * Sleep until more data has arrived. But check for races..
2458 */
unix_stream_data_wait(struct sock * sk,long timeo,struct sk_buff * last,unsigned int last_len,bool freezable)2459 static long unix_stream_data_wait(struct sock *sk, long timeo,
2460 struct sk_buff *last, unsigned int last_len,
2461 bool freezable)
2462 {
2463 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2464 struct sk_buff *tail;
2465 DEFINE_WAIT(wait);
2466
2467 unix_state_lock(sk);
2468
2469 for (;;) {
2470 prepare_to_wait(sk_sleep(sk), &wait, state);
2471
2472 tail = skb_peek_tail(&sk->sk_receive_queue);
2473 if (tail != last ||
2474 (tail && tail->len != last_len) ||
2475 sk->sk_err ||
2476 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2477 signal_pending(current) ||
2478 !timeo)
2479 break;
2480
2481 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2482 unix_state_unlock(sk);
2483 timeo = schedule_timeout(timeo);
2484 unix_state_lock(sk);
2485
2486 if (sock_flag(sk, SOCK_DEAD))
2487 break;
2488
2489 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2490 }
2491
2492 finish_wait(sk_sleep(sk), &wait);
2493 unix_state_unlock(sk);
2494 return timeo;
2495 }
2496
unix_skb_len(const struct sk_buff * skb)2497 static unsigned int unix_skb_len(const struct sk_buff *skb)
2498 {
2499 return skb->len - UNIXCB(skb).consumed;
2500 }
2501
2502 struct unix_stream_read_state {
2503 int (*recv_actor)(struct sk_buff *, int, int,
2504 struct unix_stream_read_state *);
2505 struct socket *socket;
2506 struct msghdr *msg;
2507 struct pipe_inode_info *pipe;
2508 size_t size;
2509 int flags;
2510 unsigned int splice_flags;
2511 };
2512
2513 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
unix_stream_recv_urg(struct unix_stream_read_state * state)2514 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2515 {
2516 struct socket *sock = state->socket;
2517 struct sock *sk = sock->sk;
2518 struct unix_sock *u = unix_sk(sk);
2519 int chunk = 1;
2520 struct sk_buff *oob_skb;
2521
2522 mutex_lock(&u->iolock);
2523 unix_state_lock(sk);
2524 spin_lock(&sk->sk_receive_queue.lock);
2525
2526 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2527 spin_unlock(&sk->sk_receive_queue.lock);
2528 unix_state_unlock(sk);
2529 mutex_unlock(&u->iolock);
2530 return -EINVAL;
2531 }
2532
2533 oob_skb = u->oob_skb;
2534
2535 if (!(state->flags & MSG_PEEK))
2536 WRITE_ONCE(u->oob_skb, NULL);
2537 else
2538 skb_get(oob_skb);
2539
2540 spin_unlock(&sk->sk_receive_queue.lock);
2541 unix_state_unlock(sk);
2542
2543 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2544
2545 if (!(state->flags & MSG_PEEK))
2546 UNIXCB(oob_skb).consumed += 1;
2547
2548 consume_skb(oob_skb);
2549
2550 mutex_unlock(&u->iolock);
2551
2552 if (chunk < 0)
2553 return -EFAULT;
2554
2555 state->msg->msg_flags |= MSG_OOB;
2556 return 1;
2557 }
2558
manage_oob(struct sk_buff * skb,struct sock * sk,int flags,int copied)2559 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2560 int flags, int copied)
2561 {
2562 struct unix_sock *u = unix_sk(sk);
2563
2564 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2565 skb_unlink(skb, &sk->sk_receive_queue);
2566 consume_skb(skb);
2567 skb = NULL;
2568 } else {
2569 struct sk_buff *unlinked_skb = NULL;
2570
2571 spin_lock(&sk->sk_receive_queue.lock);
2572
2573 if (skb == u->oob_skb) {
2574 if (copied) {
2575 skb = NULL;
2576 } else if (!(flags & MSG_PEEK)) {
2577 if (sock_flag(sk, SOCK_URGINLINE)) {
2578 WRITE_ONCE(u->oob_skb, NULL);
2579 consume_skb(skb);
2580 } else {
2581 __skb_unlink(skb, &sk->sk_receive_queue);
2582 WRITE_ONCE(u->oob_skb, NULL);
2583 unlinked_skb = skb;
2584 skb = skb_peek(&sk->sk_receive_queue);
2585 }
2586 } else if (!sock_flag(sk, SOCK_URGINLINE)) {
2587 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2588 }
2589 }
2590
2591 spin_unlock(&sk->sk_receive_queue.lock);
2592
2593 if (unlinked_skb) {
2594 WARN_ON_ONCE(skb_unref(unlinked_skb));
2595 kfree_skb(unlinked_skb);
2596 }
2597 }
2598 return skb;
2599 }
2600 #endif
2601
unix_stream_read_skb(struct sock * sk,skb_read_actor_t recv_actor)2602 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2603 {
2604 struct unix_sock *u = unix_sk(sk);
2605 struct sk_buff *skb;
2606 int err;
2607
2608 if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
2609 return -ENOTCONN;
2610
2611 mutex_lock(&u->iolock);
2612 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2613 mutex_unlock(&u->iolock);
2614 if (!skb)
2615 return err;
2616
2617 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2618 if (unlikely(skb == READ_ONCE(u->oob_skb))) {
2619 bool drop = false;
2620
2621 unix_state_lock(sk);
2622
2623 if (sock_flag(sk, SOCK_DEAD)) {
2624 unix_state_unlock(sk);
2625 kfree_skb(skb);
2626 return -ECONNRESET;
2627 }
2628
2629 spin_lock(&sk->sk_receive_queue.lock);
2630 if (likely(skb == u->oob_skb)) {
2631 WRITE_ONCE(u->oob_skb, NULL);
2632 drop = true;
2633 }
2634 spin_unlock(&sk->sk_receive_queue.lock);
2635
2636 unix_state_unlock(sk);
2637
2638 if (drop) {
2639 WARN_ON_ONCE(skb_unref(skb));
2640 kfree_skb(skb);
2641 return -EAGAIN;
2642 }
2643 }
2644 #endif
2645
2646 return recv_actor(sk, skb);
2647 }
2648
unix_stream_read_generic(struct unix_stream_read_state * state,bool freezable)2649 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2650 bool freezable)
2651 {
2652 struct scm_cookie scm;
2653 struct socket *sock = state->socket;
2654 struct sock *sk = sock->sk;
2655 struct unix_sock *u = unix_sk(sk);
2656 int copied = 0;
2657 int flags = state->flags;
2658 int noblock = flags & MSG_DONTWAIT;
2659 bool check_creds = false;
2660 int target;
2661 int err = 0;
2662 long timeo;
2663 int skip;
2664 size_t size = state->size;
2665 unsigned int last_len;
2666
2667 if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
2668 err = -EINVAL;
2669 goto out;
2670 }
2671
2672 if (unlikely(flags & MSG_OOB)) {
2673 err = -EOPNOTSUPP;
2674 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2675 err = unix_stream_recv_urg(state);
2676 #endif
2677 goto out;
2678 }
2679
2680 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2681 timeo = sock_rcvtimeo(sk, noblock);
2682
2683 memset(&scm, 0, sizeof(scm));
2684
2685 /* Lock the socket to prevent queue disordering
2686 * while sleeps in memcpy_tomsg
2687 */
2688 mutex_lock(&u->iolock);
2689
2690 skip = max(sk_peek_offset(sk, flags), 0);
2691
2692 do {
2693 int chunk;
2694 bool drop_skb;
2695 struct sk_buff *skb, *last;
2696
2697 redo:
2698 unix_state_lock(sk);
2699 if (sock_flag(sk, SOCK_DEAD)) {
2700 err = -ECONNRESET;
2701 goto unlock;
2702 }
2703 last = skb = skb_peek(&sk->sk_receive_queue);
2704 last_len = last ? last->len : 0;
2705
2706 again:
2707 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2708 if (skb) {
2709 skb = manage_oob(skb, sk, flags, copied);
2710 if (!skb && copied) {
2711 unix_state_unlock(sk);
2712 break;
2713 }
2714 }
2715 #endif
2716 if (skb == NULL) {
2717 if (copied >= target)
2718 goto unlock;
2719
2720 /*
2721 * POSIX 1003.1g mandates this order.
2722 */
2723
2724 err = sock_error(sk);
2725 if (err)
2726 goto unlock;
2727 if (sk->sk_shutdown & RCV_SHUTDOWN)
2728 goto unlock;
2729
2730 unix_state_unlock(sk);
2731 if (!timeo) {
2732 err = -EAGAIN;
2733 break;
2734 }
2735
2736 mutex_unlock(&u->iolock);
2737
2738 timeo = unix_stream_data_wait(sk, timeo, last,
2739 last_len, freezable);
2740
2741 if (signal_pending(current)) {
2742 err = sock_intr_errno(timeo);
2743 scm_destroy(&scm);
2744 goto out;
2745 }
2746
2747 mutex_lock(&u->iolock);
2748 goto redo;
2749 unlock:
2750 unix_state_unlock(sk);
2751 break;
2752 }
2753
2754 while (skip >= unix_skb_len(skb)) {
2755 skip -= unix_skb_len(skb);
2756 last = skb;
2757 last_len = skb->len;
2758 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2759 if (!skb)
2760 goto again;
2761 }
2762
2763 unix_state_unlock(sk);
2764
2765 if (check_creds) {
2766 /* Never glue messages from different writers */
2767 if (!unix_skb_scm_eq(skb, &scm))
2768 break;
2769 } else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2770 test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2771 /* Copy credentials */
2772 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2773 unix_set_secdata(&scm, skb);
2774 check_creds = true;
2775 }
2776
2777 /* Copy address just once */
2778 if (state->msg && state->msg->msg_name) {
2779 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2780 state->msg->msg_name);
2781 unix_copy_addr(state->msg, skb->sk);
2782 sunaddr = NULL;
2783 }
2784
2785 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2786 skb_get(skb);
2787 chunk = state->recv_actor(skb, skip, chunk, state);
2788 drop_skb = !unix_skb_len(skb);
2789 /* skb is only safe to use if !drop_skb */
2790 consume_skb(skb);
2791 if (chunk < 0) {
2792 if (copied == 0)
2793 copied = -EFAULT;
2794 break;
2795 }
2796 copied += chunk;
2797 size -= chunk;
2798
2799 if (drop_skb) {
2800 /* the skb was touched by a concurrent reader;
2801 * we should not expect anything from this skb
2802 * anymore and assume it invalid - we can be
2803 * sure it was dropped from the socket queue
2804 *
2805 * let's report a short read
2806 */
2807 err = 0;
2808 break;
2809 }
2810
2811 /* Mark read part of skb as used */
2812 if (!(flags & MSG_PEEK)) {
2813 UNIXCB(skb).consumed += chunk;
2814
2815 sk_peek_offset_bwd(sk, chunk);
2816
2817 if (UNIXCB(skb).fp) {
2818 scm_stat_del(sk, skb);
2819 unix_detach_fds(&scm, skb);
2820 }
2821
2822 if (unix_skb_len(skb))
2823 break;
2824
2825 skb_unlink(skb, &sk->sk_receive_queue);
2826 consume_skb(skb);
2827
2828 if (scm.fp)
2829 break;
2830 } else {
2831 /* It is questionable, see note in unix_dgram_recvmsg.
2832 */
2833 if (UNIXCB(skb).fp)
2834 unix_peek_fds(&scm, skb);
2835
2836 sk_peek_offset_fwd(sk, chunk);
2837
2838 if (UNIXCB(skb).fp)
2839 break;
2840
2841 skip = 0;
2842 last = skb;
2843 last_len = skb->len;
2844 unix_state_lock(sk);
2845 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2846 if (skb)
2847 goto again;
2848 unix_state_unlock(sk);
2849 break;
2850 }
2851 } while (size);
2852
2853 mutex_unlock(&u->iolock);
2854 if (state->msg)
2855 scm_recv_unix(sock, state->msg, &scm, flags);
2856 else
2857 scm_destroy(&scm);
2858 out:
2859 return copied ? : err;
2860 }
2861
unix_stream_read_actor(struct sk_buff * skb,int skip,int chunk,struct unix_stream_read_state * state)2862 static int unix_stream_read_actor(struct sk_buff *skb,
2863 int skip, int chunk,
2864 struct unix_stream_read_state *state)
2865 {
2866 int ret;
2867
2868 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2869 state->msg, chunk);
2870 return ret ?: chunk;
2871 }
2872
__unix_stream_recvmsg(struct sock * sk,struct msghdr * msg,size_t size,int flags)2873 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2874 size_t size, int flags)
2875 {
2876 struct unix_stream_read_state state = {
2877 .recv_actor = unix_stream_read_actor,
2878 .socket = sk->sk_socket,
2879 .msg = msg,
2880 .size = size,
2881 .flags = flags
2882 };
2883
2884 return unix_stream_read_generic(&state, true);
2885 }
2886
unix_stream_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)2887 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2888 size_t size, int flags)
2889 {
2890 struct unix_stream_read_state state = {
2891 .recv_actor = unix_stream_read_actor,
2892 .socket = sock,
2893 .msg = msg,
2894 .size = size,
2895 .flags = flags
2896 };
2897
2898 #ifdef CONFIG_BPF_SYSCALL
2899 struct sock *sk = sock->sk;
2900 const struct proto *prot = READ_ONCE(sk->sk_prot);
2901
2902 if (prot != &unix_stream_proto)
2903 return prot->recvmsg(sk, msg, size, flags, NULL);
2904 #endif
2905 return unix_stream_read_generic(&state, true);
2906 }
2907
unix_stream_splice_actor(struct sk_buff * skb,int skip,int chunk,struct unix_stream_read_state * state)2908 static int unix_stream_splice_actor(struct sk_buff *skb,
2909 int skip, int chunk,
2910 struct unix_stream_read_state *state)
2911 {
2912 return skb_splice_bits(skb, state->socket->sk,
2913 UNIXCB(skb).consumed + skip,
2914 state->pipe, chunk, state->splice_flags);
2915 }
2916
unix_stream_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t size,unsigned int flags)2917 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2918 struct pipe_inode_info *pipe,
2919 size_t size, unsigned int flags)
2920 {
2921 struct unix_stream_read_state state = {
2922 .recv_actor = unix_stream_splice_actor,
2923 .socket = sock,
2924 .pipe = pipe,
2925 .size = size,
2926 .splice_flags = flags,
2927 };
2928
2929 if (unlikely(*ppos))
2930 return -ESPIPE;
2931
2932 if (sock->file->f_flags & O_NONBLOCK ||
2933 flags & SPLICE_F_NONBLOCK)
2934 state.flags = MSG_DONTWAIT;
2935
2936 return unix_stream_read_generic(&state, false);
2937 }
2938
unix_shutdown(struct socket * sock,int mode)2939 static int unix_shutdown(struct socket *sock, int mode)
2940 {
2941 struct sock *sk = sock->sk;
2942 struct sock *other;
2943
2944 if (mode < SHUT_RD || mode > SHUT_RDWR)
2945 return -EINVAL;
2946 /* This maps:
2947 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2948 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2949 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2950 */
2951 ++mode;
2952
2953 unix_state_lock(sk);
2954 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
2955 other = unix_peer(sk);
2956 if (other)
2957 sock_hold(other);
2958 unix_state_unlock(sk);
2959 sk->sk_state_change(sk);
2960
2961 if (other &&
2962 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2963
2964 int peer_mode = 0;
2965 const struct proto *prot = READ_ONCE(other->sk_prot);
2966
2967 if (prot->unhash)
2968 prot->unhash(other);
2969 if (mode&RCV_SHUTDOWN)
2970 peer_mode |= SEND_SHUTDOWN;
2971 if (mode&SEND_SHUTDOWN)
2972 peer_mode |= RCV_SHUTDOWN;
2973 unix_state_lock(other);
2974 WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
2975 unix_state_unlock(other);
2976 other->sk_state_change(other);
2977 if (peer_mode == SHUTDOWN_MASK)
2978 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2979 else if (peer_mode & RCV_SHUTDOWN)
2980 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2981 }
2982 if (other)
2983 sock_put(other);
2984
2985 return 0;
2986 }
2987
unix_inq_len(struct sock * sk)2988 long unix_inq_len(struct sock *sk)
2989 {
2990 struct sk_buff *skb;
2991 long amount = 0;
2992
2993 if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
2994 return -EINVAL;
2995
2996 spin_lock(&sk->sk_receive_queue.lock);
2997 if (sk->sk_type == SOCK_STREAM ||
2998 sk->sk_type == SOCK_SEQPACKET) {
2999 skb_queue_walk(&sk->sk_receive_queue, skb)
3000 amount += unix_skb_len(skb);
3001 } else {
3002 skb = skb_peek(&sk->sk_receive_queue);
3003 if (skb)
3004 amount = skb->len;
3005 }
3006 spin_unlock(&sk->sk_receive_queue.lock);
3007
3008 return amount;
3009 }
3010 EXPORT_SYMBOL_GPL(unix_inq_len);
3011
unix_outq_len(struct sock * sk)3012 long unix_outq_len(struct sock *sk)
3013 {
3014 return sk_wmem_alloc_get(sk);
3015 }
3016 EXPORT_SYMBOL_GPL(unix_outq_len);
3017
unix_open_file(struct sock * sk)3018 static int unix_open_file(struct sock *sk)
3019 {
3020 struct path path;
3021 struct file *f;
3022 int fd;
3023
3024 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3025 return -EPERM;
3026
3027 if (!smp_load_acquire(&unix_sk(sk)->addr))
3028 return -ENOENT;
3029
3030 path = unix_sk(sk)->path;
3031 if (!path.dentry)
3032 return -ENOENT;
3033
3034 path_get(&path);
3035
3036 fd = get_unused_fd_flags(O_CLOEXEC);
3037 if (fd < 0)
3038 goto out;
3039
3040 f = dentry_open(&path, O_PATH, current_cred());
3041 if (IS_ERR(f)) {
3042 put_unused_fd(fd);
3043 fd = PTR_ERR(f);
3044 goto out;
3045 }
3046
3047 fd_install(fd, f);
3048 out:
3049 path_put(&path);
3050
3051 return fd;
3052 }
3053
unix_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)3054 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3055 {
3056 struct sock *sk = sock->sk;
3057 long amount = 0;
3058 int err;
3059
3060 switch (cmd) {
3061 case SIOCOUTQ:
3062 amount = unix_outq_len(sk);
3063 err = put_user(amount, (int __user *)arg);
3064 break;
3065 case SIOCINQ:
3066 amount = unix_inq_len(sk);
3067 if (amount < 0)
3068 err = amount;
3069 else
3070 err = put_user(amount, (int __user *)arg);
3071 break;
3072 case SIOCUNIXFILE:
3073 err = unix_open_file(sk);
3074 break;
3075 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3076 case SIOCATMARK:
3077 {
3078 struct sk_buff *skb;
3079 int answ = 0;
3080
3081 skb = skb_peek(&sk->sk_receive_queue);
3082 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3083 answ = 1;
3084 err = put_user(answ, (int __user *)arg);
3085 }
3086 break;
3087 #endif
3088 default:
3089 err = -ENOIOCTLCMD;
3090 break;
3091 }
3092 return err;
3093 }
3094
3095 #ifdef CONFIG_COMPAT
unix_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)3096 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3097 {
3098 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3099 }
3100 #endif
3101
unix_poll(struct file * file,struct socket * sock,poll_table * wait)3102 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3103 {
3104 struct sock *sk = sock->sk;
3105 unsigned char state;
3106 __poll_t mask;
3107 u8 shutdown;
3108
3109 sock_poll_wait(file, sock, wait);
3110 mask = 0;
3111 shutdown = READ_ONCE(sk->sk_shutdown);
3112 state = READ_ONCE(sk->sk_state);
3113
3114 /* exceptional events? */
3115 if (READ_ONCE(sk->sk_err))
3116 mask |= EPOLLERR;
3117 if (shutdown == SHUTDOWN_MASK)
3118 mask |= EPOLLHUP;
3119 if (shutdown & RCV_SHUTDOWN)
3120 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3121
3122 /* readable? */
3123 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3124 mask |= EPOLLIN | EPOLLRDNORM;
3125 if (sk_is_readable(sk))
3126 mask |= EPOLLIN | EPOLLRDNORM;
3127 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3128 if (READ_ONCE(unix_sk(sk)->oob_skb))
3129 mask |= EPOLLPRI;
3130 #endif
3131
3132 /* Connection-based need to check for termination and startup */
3133 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3134 state == TCP_CLOSE)
3135 mask |= EPOLLHUP;
3136
3137 /*
3138 * we set writable also when the other side has shut down the
3139 * connection. This prevents stuck sockets.
3140 */
3141 if (unix_writable(sk, state))
3142 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3143
3144 return mask;
3145 }
3146
unix_dgram_poll(struct file * file,struct socket * sock,poll_table * wait)3147 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3148 poll_table *wait)
3149 {
3150 struct sock *sk = sock->sk, *other;
3151 unsigned int writable;
3152 unsigned char state;
3153 __poll_t mask;
3154 u8 shutdown;
3155
3156 sock_poll_wait(file, sock, wait);
3157 mask = 0;
3158 shutdown = READ_ONCE(sk->sk_shutdown);
3159 state = READ_ONCE(sk->sk_state);
3160
3161 /* exceptional events? */
3162 if (READ_ONCE(sk->sk_err) ||
3163 !skb_queue_empty_lockless(&sk->sk_error_queue))
3164 mask |= EPOLLERR |
3165 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3166
3167 if (shutdown & RCV_SHUTDOWN)
3168 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3169 if (shutdown == SHUTDOWN_MASK)
3170 mask |= EPOLLHUP;
3171
3172 /* readable? */
3173 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3174 mask |= EPOLLIN | EPOLLRDNORM;
3175 if (sk_is_readable(sk))
3176 mask |= EPOLLIN | EPOLLRDNORM;
3177
3178 /* Connection-based need to check for termination and startup */
3179 if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
3180 mask |= EPOLLHUP;
3181
3182 /* No write status requested, avoid expensive OUT tests. */
3183 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3184 return mask;
3185
3186 writable = unix_writable(sk, state);
3187 if (writable) {
3188 unix_state_lock(sk);
3189
3190 other = unix_peer(sk);
3191 if (other && unix_peer(other) != sk &&
3192 unix_recvq_full_lockless(other) &&
3193 unix_dgram_peer_wake_me(sk, other))
3194 writable = 0;
3195
3196 unix_state_unlock(sk);
3197 }
3198
3199 if (writable)
3200 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3201 else
3202 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3203
3204 return mask;
3205 }
3206
3207 #ifdef CONFIG_PROC_FS
3208
3209 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3210
3211 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3212 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3213 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3214
unix_from_bucket(struct seq_file * seq,loff_t * pos)3215 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3216 {
3217 unsigned long offset = get_offset(*pos);
3218 unsigned long bucket = get_bucket(*pos);
3219 unsigned long count = 0;
3220 struct sock *sk;
3221
3222 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3223 sk; sk = sk_next(sk)) {
3224 if (++count == offset)
3225 break;
3226 }
3227
3228 return sk;
3229 }
3230
unix_get_first(struct seq_file * seq,loff_t * pos)3231 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3232 {
3233 unsigned long bucket = get_bucket(*pos);
3234 struct net *net = seq_file_net(seq);
3235 struct sock *sk;
3236
3237 while (bucket < UNIX_HASH_SIZE) {
3238 spin_lock(&net->unx.table.locks[bucket]);
3239
3240 sk = unix_from_bucket(seq, pos);
3241 if (sk)
3242 return sk;
3243
3244 spin_unlock(&net->unx.table.locks[bucket]);
3245
3246 *pos = set_bucket_offset(++bucket, 1);
3247 }
3248
3249 return NULL;
3250 }
3251
unix_get_next(struct seq_file * seq,struct sock * sk,loff_t * pos)3252 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3253 loff_t *pos)
3254 {
3255 unsigned long bucket = get_bucket(*pos);
3256
3257 sk = sk_next(sk);
3258 if (sk)
3259 return sk;
3260
3261
3262 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3263
3264 *pos = set_bucket_offset(++bucket, 1);
3265
3266 return unix_get_first(seq, pos);
3267 }
3268
unix_seq_start(struct seq_file * seq,loff_t * pos)3269 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3270 {
3271 if (!*pos)
3272 return SEQ_START_TOKEN;
3273
3274 return unix_get_first(seq, pos);
3275 }
3276
unix_seq_next(struct seq_file * seq,void * v,loff_t * pos)3277 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3278 {
3279 ++*pos;
3280
3281 if (v == SEQ_START_TOKEN)
3282 return unix_get_first(seq, pos);
3283
3284 return unix_get_next(seq, v, pos);
3285 }
3286
unix_seq_stop(struct seq_file * seq,void * v)3287 static void unix_seq_stop(struct seq_file *seq, void *v)
3288 {
3289 struct sock *sk = v;
3290
3291 if (sk)
3292 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3293 }
3294
unix_seq_show(struct seq_file * seq,void * v)3295 static int unix_seq_show(struct seq_file *seq, void *v)
3296 {
3297
3298 if (v == SEQ_START_TOKEN)
3299 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3300 "Inode Path\n");
3301 else {
3302 struct sock *s = v;
3303 struct unix_sock *u = unix_sk(s);
3304 unix_state_lock(s);
3305
3306 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3307 s,
3308 refcount_read(&s->sk_refcnt),
3309 0,
3310 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3311 s->sk_type,
3312 s->sk_socket ?
3313 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3314 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3315 sock_i_ino(s));
3316
3317 if (u->addr) { // under a hash table lock here
3318 int i, len;
3319 seq_putc(seq, ' ');
3320
3321 i = 0;
3322 len = u->addr->len -
3323 offsetof(struct sockaddr_un, sun_path);
3324 if (u->addr->name->sun_path[0]) {
3325 len--;
3326 } else {
3327 seq_putc(seq, '@');
3328 i++;
3329 }
3330 for ( ; i < len; i++)
3331 seq_putc(seq, u->addr->name->sun_path[i] ?:
3332 '@');
3333 }
3334 unix_state_unlock(s);
3335 seq_putc(seq, '\n');
3336 }
3337
3338 return 0;
3339 }
3340
3341 static const struct seq_operations unix_seq_ops = {
3342 .start = unix_seq_start,
3343 .next = unix_seq_next,
3344 .stop = unix_seq_stop,
3345 .show = unix_seq_show,
3346 };
3347
3348 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
3349 struct bpf_unix_iter_state {
3350 struct seq_net_private p;
3351 unsigned int cur_sk;
3352 unsigned int end_sk;
3353 unsigned int max_sk;
3354 struct sock **batch;
3355 bool st_bucket_done;
3356 };
3357
3358 struct bpf_iter__unix {
3359 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3360 __bpf_md_ptr(struct unix_sock *, unix_sk);
3361 uid_t uid __aligned(8);
3362 };
3363
unix_prog_seq_show(struct bpf_prog * prog,struct bpf_iter_meta * meta,struct unix_sock * unix_sk,uid_t uid)3364 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3365 struct unix_sock *unix_sk, uid_t uid)
3366 {
3367 struct bpf_iter__unix ctx;
3368
3369 meta->seq_num--; /* skip SEQ_START_TOKEN */
3370 ctx.meta = meta;
3371 ctx.unix_sk = unix_sk;
3372 ctx.uid = uid;
3373 return bpf_iter_run_prog(prog, &ctx);
3374 }
3375
bpf_iter_unix_hold_batch(struct seq_file * seq,struct sock * start_sk)3376 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3377
3378 {
3379 struct bpf_unix_iter_state *iter = seq->private;
3380 unsigned int expected = 1;
3381 struct sock *sk;
3382
3383 sock_hold(start_sk);
3384 iter->batch[iter->end_sk++] = start_sk;
3385
3386 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3387 if (iter->end_sk < iter->max_sk) {
3388 sock_hold(sk);
3389 iter->batch[iter->end_sk++] = sk;
3390 }
3391
3392 expected++;
3393 }
3394
3395 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3396
3397 return expected;
3398 }
3399
bpf_iter_unix_put_batch(struct bpf_unix_iter_state * iter)3400 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3401 {
3402 while (iter->cur_sk < iter->end_sk)
3403 sock_put(iter->batch[iter->cur_sk++]);
3404 }
3405
bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state * iter,unsigned int new_batch_sz)3406 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3407 unsigned int new_batch_sz)
3408 {
3409 struct sock **new_batch;
3410
3411 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3412 GFP_USER | __GFP_NOWARN);
3413 if (!new_batch)
3414 return -ENOMEM;
3415
3416 bpf_iter_unix_put_batch(iter);
3417 kvfree(iter->batch);
3418 iter->batch = new_batch;
3419 iter->max_sk = new_batch_sz;
3420
3421 return 0;
3422 }
3423
bpf_iter_unix_batch(struct seq_file * seq,loff_t * pos)3424 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3425 loff_t *pos)
3426 {
3427 struct bpf_unix_iter_state *iter = seq->private;
3428 unsigned int expected;
3429 bool resized = false;
3430 struct sock *sk;
3431
3432 if (iter->st_bucket_done)
3433 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3434
3435 again:
3436 /* Get a new batch */
3437 iter->cur_sk = 0;
3438 iter->end_sk = 0;
3439
3440 sk = unix_get_first(seq, pos);
3441 if (!sk)
3442 return NULL; /* Done */
3443
3444 expected = bpf_iter_unix_hold_batch(seq, sk);
3445
3446 if (iter->end_sk == expected) {
3447 iter->st_bucket_done = true;
3448 return sk;
3449 }
3450
3451 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3452 resized = true;
3453 goto again;
3454 }
3455
3456 return sk;
3457 }
3458
bpf_iter_unix_seq_start(struct seq_file * seq,loff_t * pos)3459 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3460 {
3461 if (!*pos)
3462 return SEQ_START_TOKEN;
3463
3464 /* bpf iter does not support lseek, so it always
3465 * continue from where it was stop()-ped.
3466 */
3467 return bpf_iter_unix_batch(seq, pos);
3468 }
3469
bpf_iter_unix_seq_next(struct seq_file * seq,void * v,loff_t * pos)3470 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3471 {
3472 struct bpf_unix_iter_state *iter = seq->private;
3473 struct sock *sk;
3474
3475 /* Whenever seq_next() is called, the iter->cur_sk is
3476 * done with seq_show(), so advance to the next sk in
3477 * the batch.
3478 */
3479 if (iter->cur_sk < iter->end_sk)
3480 sock_put(iter->batch[iter->cur_sk++]);
3481
3482 ++*pos;
3483
3484 if (iter->cur_sk < iter->end_sk)
3485 sk = iter->batch[iter->cur_sk];
3486 else
3487 sk = bpf_iter_unix_batch(seq, pos);
3488
3489 return sk;
3490 }
3491
bpf_iter_unix_seq_show(struct seq_file * seq,void * v)3492 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3493 {
3494 struct bpf_iter_meta meta;
3495 struct bpf_prog *prog;
3496 struct sock *sk = v;
3497 uid_t uid;
3498 bool slow;
3499 int ret;
3500
3501 if (v == SEQ_START_TOKEN)
3502 return 0;
3503
3504 slow = lock_sock_fast(sk);
3505
3506 if (unlikely(sk_unhashed(sk))) {
3507 ret = SEQ_SKIP;
3508 goto unlock;
3509 }
3510
3511 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3512 meta.seq = seq;
3513 prog = bpf_iter_get_info(&meta, false);
3514 ret = unix_prog_seq_show(prog, &meta, v, uid);
3515 unlock:
3516 unlock_sock_fast(sk, slow);
3517 return ret;
3518 }
3519
bpf_iter_unix_seq_stop(struct seq_file * seq,void * v)3520 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3521 {
3522 struct bpf_unix_iter_state *iter = seq->private;
3523 struct bpf_iter_meta meta;
3524 struct bpf_prog *prog;
3525
3526 if (!v) {
3527 meta.seq = seq;
3528 prog = bpf_iter_get_info(&meta, true);
3529 if (prog)
3530 (void)unix_prog_seq_show(prog, &meta, v, 0);
3531 }
3532
3533 if (iter->cur_sk < iter->end_sk)
3534 bpf_iter_unix_put_batch(iter);
3535 }
3536
3537 static const struct seq_operations bpf_iter_unix_seq_ops = {
3538 .start = bpf_iter_unix_seq_start,
3539 .next = bpf_iter_unix_seq_next,
3540 .stop = bpf_iter_unix_seq_stop,
3541 .show = bpf_iter_unix_seq_show,
3542 };
3543 #endif
3544 #endif
3545
3546 static const struct net_proto_family unix_family_ops = {
3547 .family = PF_UNIX,
3548 .create = unix_create,
3549 .owner = THIS_MODULE,
3550 };
3551
3552
unix_net_init(struct net * net)3553 static int __net_init unix_net_init(struct net *net)
3554 {
3555 int i;
3556
3557 net->unx.sysctl_max_dgram_qlen = 10;
3558 if (unix_sysctl_register(net))
3559 goto out;
3560
3561 #ifdef CONFIG_PROC_FS
3562 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3563 sizeof(struct seq_net_private)))
3564 goto err_sysctl;
3565 #endif
3566
3567 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3568 sizeof(spinlock_t), GFP_KERNEL);
3569 if (!net->unx.table.locks)
3570 goto err_proc;
3571
3572 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3573 sizeof(struct hlist_head),
3574 GFP_KERNEL);
3575 if (!net->unx.table.buckets)
3576 goto free_locks;
3577
3578 for (i = 0; i < UNIX_HASH_SIZE; i++) {
3579 spin_lock_init(&net->unx.table.locks[i]);
3580 INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3581 }
3582
3583 return 0;
3584
3585 free_locks:
3586 kvfree(net->unx.table.locks);
3587 err_proc:
3588 #ifdef CONFIG_PROC_FS
3589 remove_proc_entry("unix", net->proc_net);
3590 err_sysctl:
3591 #endif
3592 unix_sysctl_unregister(net);
3593 out:
3594 return -ENOMEM;
3595 }
3596
unix_net_exit(struct net * net)3597 static void __net_exit unix_net_exit(struct net *net)
3598 {
3599 kvfree(net->unx.table.buckets);
3600 kvfree(net->unx.table.locks);
3601 unix_sysctl_unregister(net);
3602 remove_proc_entry("unix", net->proc_net);
3603 }
3604
3605 static struct pernet_operations unix_net_ops = {
3606 .init = unix_net_init,
3607 .exit = unix_net_exit,
3608 };
3609
3610 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(unix,struct bpf_iter_meta * meta,struct unix_sock * unix_sk,uid_t uid)3611 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3612 struct unix_sock *unix_sk, uid_t uid)
3613
3614 #define INIT_BATCH_SZ 16
3615
3616 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3617 {
3618 struct bpf_unix_iter_state *iter = priv_data;
3619 int err;
3620
3621 err = bpf_iter_init_seq_net(priv_data, aux);
3622 if (err)
3623 return err;
3624
3625 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3626 if (err) {
3627 bpf_iter_fini_seq_net(priv_data);
3628 return err;
3629 }
3630
3631 return 0;
3632 }
3633
bpf_iter_fini_unix(void * priv_data)3634 static void bpf_iter_fini_unix(void *priv_data)
3635 {
3636 struct bpf_unix_iter_state *iter = priv_data;
3637
3638 bpf_iter_fini_seq_net(priv_data);
3639 kvfree(iter->batch);
3640 }
3641
3642 static const struct bpf_iter_seq_info unix_seq_info = {
3643 .seq_ops = &bpf_iter_unix_seq_ops,
3644 .init_seq_private = bpf_iter_init_unix,
3645 .fini_seq_private = bpf_iter_fini_unix,
3646 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3647 };
3648
3649 static const struct bpf_func_proto *
bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)3650 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3651 const struct bpf_prog *prog)
3652 {
3653 switch (func_id) {
3654 case BPF_FUNC_setsockopt:
3655 return &bpf_sk_setsockopt_proto;
3656 case BPF_FUNC_getsockopt:
3657 return &bpf_sk_getsockopt_proto;
3658 default:
3659 return NULL;
3660 }
3661 }
3662
3663 static struct bpf_iter_reg unix_reg_info = {
3664 .target = "unix",
3665 .ctx_arg_info_size = 1,
3666 .ctx_arg_info = {
3667 { offsetof(struct bpf_iter__unix, unix_sk),
3668 PTR_TO_BTF_ID_OR_NULL },
3669 },
3670 .get_func_proto = bpf_iter_unix_get_func_proto,
3671 .seq_info = &unix_seq_info,
3672 };
3673
bpf_iter_register(void)3674 static void __init bpf_iter_register(void)
3675 {
3676 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3677 if (bpf_iter_reg_target(&unix_reg_info))
3678 pr_warn("Warning: could not register bpf iterator unix\n");
3679 }
3680 #endif
3681
af_unix_init(void)3682 static int __init af_unix_init(void)
3683 {
3684 int i, rc = -1;
3685
3686 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3687
3688 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3689 spin_lock_init(&bsd_socket_locks[i]);
3690 INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3691 }
3692
3693 rc = proto_register(&unix_dgram_proto, 1);
3694 if (rc != 0) {
3695 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3696 goto out;
3697 }
3698
3699 rc = proto_register(&unix_stream_proto, 1);
3700 if (rc != 0) {
3701 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3702 proto_unregister(&unix_dgram_proto);
3703 goto out;
3704 }
3705
3706 sock_register(&unix_family_ops);
3707 register_pernet_subsys(&unix_net_ops);
3708 unix_bpf_build_proto();
3709
3710 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3711 bpf_iter_register();
3712 #endif
3713
3714 out:
3715 return rc;
3716 }
3717
af_unix_exit(void)3718 static void __exit af_unix_exit(void)
3719 {
3720 sock_unregister(PF_UNIX);
3721 proto_unregister(&unix_dgram_proto);
3722 proto_unregister(&unix_stream_proto);
3723 unregister_pernet_subsys(&unix_net_ops);
3724 }
3725
3726 /* Earlier than device_initcall() so that other drivers invoking
3727 request_module() don't end up in a loop when modprobe tries
3728 to use a UNIX socket. But later than subsys_initcall() because
3729 we depend on stuff initialised there */
3730 fs_initcall(af_unix_init);
3731 module_exit(af_unix_exit);
3732
3733 MODULE_LICENSE("GPL");
3734 MODULE_ALIAS_NETPROTO(PF_UNIX);
3735