xref: /openbmc/linux/net/phonet/socket.c (revision 4800cd83)
1 /*
2  * File: socket.c
3  *
4  * Phonet sockets
5  *
6  * Copyright (C) 2008 Nokia Corporation.
7  *
8  * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9  * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * version 2 as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23  * 02110-1301 USA
24  */
25 
26 #include <linux/gfp.h>
27 #include <linux/kernel.h>
28 #include <linux/net.h>
29 #include <linux/poll.h>
30 #include <net/sock.h>
31 #include <net/tcp_states.h>
32 
33 #include <linux/phonet.h>
34 #include <net/phonet/phonet.h>
35 #include <net/phonet/pep.h>
36 #include <net/phonet/pn_dev.h>
37 
38 static int pn_socket_release(struct socket *sock)
39 {
40 	struct sock *sk = sock->sk;
41 
42 	if (sk) {
43 		sock->sk = NULL;
44 		sk->sk_prot->close(sk, 0);
45 	}
46 	return 0;
47 }
48 
49 #define PN_HASHSIZE	16
50 #define PN_HASHMASK	(PN_HASHSIZE-1)
51 
52 
53 static struct  {
54 	struct hlist_head hlist[PN_HASHSIZE];
55 	spinlock_t lock;
56 } pnsocks;
57 
58 void __init pn_sock_init(void)
59 {
60 	unsigned i;
61 
62 	for (i = 0; i < PN_HASHSIZE; i++)
63 		INIT_HLIST_HEAD(pnsocks.hlist + i);
64 	spin_lock_init(&pnsocks.lock);
65 }
66 
67 static struct hlist_head *pn_hash_list(u16 obj)
68 {
69 	return pnsocks.hlist + (obj & PN_HASHMASK);
70 }
71 
72 /*
73  * Find address based on socket address, match only certain fields.
74  * Also grab sock if it was found. Remember to sock_put it later.
75  */
76 struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
77 {
78 	struct hlist_node *node;
79 	struct sock *sknode;
80 	struct sock *rval = NULL;
81 	u16 obj = pn_sockaddr_get_object(spn);
82 	u8 res = spn->spn_resource;
83 	struct hlist_head *hlist = pn_hash_list(obj);
84 
85 	spin_lock_bh(&pnsocks.lock);
86 
87 	sk_for_each(sknode, node, hlist) {
88 		struct pn_sock *pn = pn_sk(sknode);
89 		BUG_ON(!pn->sobject); /* unbound socket */
90 
91 		if (!net_eq(sock_net(sknode), net))
92 			continue;
93 		if (pn_port(obj)) {
94 			/* Look up socket by port */
95 			if (pn_port(pn->sobject) != pn_port(obj))
96 				continue;
97 		} else {
98 			/* If port is zero, look up by resource */
99 			if (pn->resource != res)
100 				continue;
101 		}
102 		if (pn_addr(pn->sobject) &&
103 		    pn_addr(pn->sobject) != pn_addr(obj))
104 			continue;
105 
106 		rval = sknode;
107 		sock_hold(sknode);
108 		break;
109 	}
110 
111 	spin_unlock_bh(&pnsocks.lock);
112 
113 	return rval;
114 }
115 
116 /* Deliver a broadcast packet (only in bottom-half) */
117 void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
118 {
119 	struct hlist_head *hlist = pnsocks.hlist;
120 	unsigned h;
121 
122 	spin_lock(&pnsocks.lock);
123 	for (h = 0; h < PN_HASHSIZE; h++) {
124 		struct hlist_node *node;
125 		struct sock *sknode;
126 
127 		sk_for_each(sknode, node, hlist) {
128 			struct sk_buff *clone;
129 
130 			if (!net_eq(sock_net(sknode), net))
131 				continue;
132 			if (!sock_flag(sknode, SOCK_BROADCAST))
133 				continue;
134 
135 			clone = skb_clone(skb, GFP_ATOMIC);
136 			if (clone) {
137 				sock_hold(sknode);
138 				sk_receive_skb(sknode, clone, 0);
139 			}
140 		}
141 		hlist++;
142 	}
143 	spin_unlock(&pnsocks.lock);
144 }
145 
146 void pn_sock_hash(struct sock *sk)
147 {
148 	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
149 
150 	spin_lock_bh(&pnsocks.lock);
151 	sk_add_node(sk, hlist);
152 	spin_unlock_bh(&pnsocks.lock);
153 }
154 EXPORT_SYMBOL(pn_sock_hash);
155 
156 void pn_sock_unhash(struct sock *sk)
157 {
158 	spin_lock_bh(&pnsocks.lock);
159 	sk_del_node_init(sk);
160 	spin_unlock_bh(&pnsocks.lock);
161 	pn_sock_unbind_all_res(sk);
162 }
163 EXPORT_SYMBOL(pn_sock_unhash);
164 
165 static DEFINE_MUTEX(port_mutex);
166 
167 static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
168 {
169 	struct sock *sk = sock->sk;
170 	struct pn_sock *pn = pn_sk(sk);
171 	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
172 	int err;
173 	u16 handle;
174 	u8 saddr;
175 
176 	if (sk->sk_prot->bind)
177 		return sk->sk_prot->bind(sk, addr, len);
178 
179 	if (len < sizeof(struct sockaddr_pn))
180 		return -EINVAL;
181 	if (spn->spn_family != AF_PHONET)
182 		return -EAFNOSUPPORT;
183 
184 	handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
185 	saddr = pn_addr(handle);
186 	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
187 		return -EADDRNOTAVAIL;
188 
189 	lock_sock(sk);
190 	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
191 		err = -EINVAL; /* attempt to rebind */
192 		goto out;
193 	}
194 	WARN_ON(sk_hashed(sk));
195 	mutex_lock(&port_mutex);
196 	err = sk->sk_prot->get_port(sk, pn_port(handle));
197 	if (err)
198 		goto out_port;
199 
200 	/* get_port() sets the port, bind() sets the address if applicable */
201 	pn->sobject = pn_object(saddr, pn_port(pn->sobject));
202 	pn->resource = spn->spn_resource;
203 
204 	/* Enable RX on the socket */
205 	sk->sk_prot->hash(sk);
206 out_port:
207 	mutex_unlock(&port_mutex);
208 out:
209 	release_sock(sk);
210 	return err;
211 }
212 
213 static int pn_socket_autobind(struct socket *sock)
214 {
215 	struct sockaddr_pn sa;
216 	int err;
217 
218 	memset(&sa, 0, sizeof(sa));
219 	sa.spn_family = AF_PHONET;
220 	err = pn_socket_bind(sock, (struct sockaddr *)&sa,
221 				sizeof(struct sockaddr_pn));
222 	if (err != -EINVAL)
223 		return err;
224 	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
225 	return 0; /* socket was already bound */
226 }
227 
228 #ifdef CONFIG_PHONET_PIPECTRLR
229 static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
230 		int len, int flags)
231 {
232 	struct sock *sk = sock->sk;
233 	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
234 	long timeo;
235 	int err;
236 
237 	if (len < sizeof(struct sockaddr_pn))
238 		return -EINVAL;
239 	if (spn->spn_family != AF_PHONET)
240 		return -EAFNOSUPPORT;
241 
242 	lock_sock(sk);
243 
244 	switch (sock->state) {
245 	case SS_UNCONNECTED:
246 		sk->sk_state = TCP_CLOSE;
247 		break;
248 	case SS_CONNECTING:
249 		switch (sk->sk_state) {
250 		case TCP_SYN_RECV:
251 			sock->state = SS_CONNECTED;
252 			err = -EISCONN;
253 			goto out;
254 		case TCP_CLOSE:
255 			err = -EALREADY;
256 			if (flags & O_NONBLOCK)
257 				goto out;
258 			goto wait_connect;
259 		}
260 		break;
261 	case SS_CONNECTED:
262 		switch (sk->sk_state) {
263 		case TCP_SYN_RECV:
264 			err = -EISCONN;
265 			goto out;
266 		case TCP_CLOSE:
267 			sock->state = SS_UNCONNECTED;
268 			break;
269 		}
270 		break;
271 	case SS_DISCONNECTING:
272 	case SS_FREE:
273 		break;
274 	}
275 	sk->sk_state = TCP_CLOSE;
276 	sk_stream_kill_queues(sk);
277 
278 	sock->state = SS_CONNECTING;
279 	err = sk->sk_prot->connect(sk, addr, len);
280 	if (err < 0) {
281 		sock->state = SS_UNCONNECTED;
282 		sk->sk_state = TCP_CLOSE;
283 		goto out;
284 	}
285 
286 	err = -EINPROGRESS;
287 wait_connect:
288 	if (sk->sk_state != TCP_SYN_RECV && (flags & O_NONBLOCK))
289 		goto out;
290 
291 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
292 	release_sock(sk);
293 
294 	err = -ERESTARTSYS;
295 	timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
296 			sk->sk_state != TCP_CLOSE,
297 			timeo);
298 
299 	lock_sock(sk);
300 	if (timeo < 0)
301 		goto out; /* -ERESTARTSYS */
302 
303 	err = -ETIMEDOUT;
304 	if (timeo == 0 && sk->sk_state != TCP_SYN_RECV)
305 		goto out;
306 
307 	if (sk->sk_state != TCP_SYN_RECV) {
308 		sock->state = SS_UNCONNECTED;
309 		err = sock_error(sk);
310 		if (!err)
311 			err = -ECONNREFUSED;
312 		goto out;
313 	}
314 	sock->state = SS_CONNECTED;
315 	err = 0;
316 
317 out:
318 	release_sock(sk);
319 	return err;
320 }
321 #endif
322 
323 static int pn_socket_accept(struct socket *sock, struct socket *newsock,
324 				int flags)
325 {
326 	struct sock *sk = sock->sk;
327 	struct sock *newsk;
328 	int err;
329 
330 	newsk = sk->sk_prot->accept(sk, flags, &err);
331 	if (!newsk)
332 		return err;
333 
334 	lock_sock(newsk);
335 	sock_graft(newsk, newsock);
336 	newsock->state = SS_CONNECTED;
337 	release_sock(newsk);
338 	return 0;
339 }
340 
341 static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
342 				int *sockaddr_len, int peer)
343 {
344 	struct sock *sk = sock->sk;
345 	struct pn_sock *pn = pn_sk(sk);
346 
347 	memset(addr, 0, sizeof(struct sockaddr_pn));
348 	addr->sa_family = AF_PHONET;
349 	if (!peer) /* Race with bind() here is userland's problem. */
350 		pn_sockaddr_set_object((struct sockaddr_pn *)addr,
351 					pn->sobject);
352 
353 	*sockaddr_len = sizeof(struct sockaddr_pn);
354 	return 0;
355 }
356 
357 static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
358 					poll_table *wait)
359 {
360 	struct sock *sk = sock->sk;
361 	struct pep_sock *pn = pep_sk(sk);
362 	unsigned int mask = 0;
363 
364 	poll_wait(file, sk_sleep(sk), wait);
365 
366 	switch (sk->sk_state) {
367 	case TCP_LISTEN:
368 		return hlist_empty(&pn->ackq) ? 0 : POLLIN;
369 	case TCP_CLOSE:
370 		return POLLERR;
371 	}
372 
373 	if (!skb_queue_empty(&sk->sk_receive_queue))
374 		mask |= POLLIN | POLLRDNORM;
375 	if (!skb_queue_empty(&pn->ctrlreq_queue))
376 		mask |= POLLPRI;
377 	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
378 		return POLLHUP;
379 
380 	if (sk->sk_state == TCP_ESTABLISHED &&
381 		atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
382 		atomic_read(&pn->tx_credits))
383 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
384 
385 	return mask;
386 }
387 
388 static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
389 				unsigned long arg)
390 {
391 	struct sock *sk = sock->sk;
392 	struct pn_sock *pn = pn_sk(sk);
393 
394 	if (cmd == SIOCPNGETOBJECT) {
395 		struct net_device *dev;
396 		u16 handle;
397 		u8 saddr;
398 
399 		if (get_user(handle, (__u16 __user *)arg))
400 			return -EFAULT;
401 
402 		lock_sock(sk);
403 		if (sk->sk_bound_dev_if)
404 			dev = dev_get_by_index(sock_net(sk),
405 						sk->sk_bound_dev_if);
406 		else
407 			dev = phonet_device_get(sock_net(sk));
408 		if (dev && (dev->flags & IFF_UP))
409 			saddr = phonet_address_get(dev, pn_addr(handle));
410 		else
411 			saddr = PN_NO_ADDR;
412 		release_sock(sk);
413 
414 		if (dev)
415 			dev_put(dev);
416 		if (saddr == PN_NO_ADDR)
417 			return -EHOSTUNREACH;
418 
419 		handle = pn_object(saddr, pn_port(pn->sobject));
420 		return put_user(handle, (__u16 __user *)arg);
421 	}
422 
423 	return sk->sk_prot->ioctl(sk, cmd, arg);
424 }
425 
426 static int pn_socket_listen(struct socket *sock, int backlog)
427 {
428 	struct sock *sk = sock->sk;
429 	int err = 0;
430 
431 	if (sock->state != SS_UNCONNECTED)
432 		return -EINVAL;
433 	if (pn_socket_autobind(sock))
434 		return -ENOBUFS;
435 
436 	lock_sock(sk);
437 	if (sk->sk_state != TCP_CLOSE) {
438 		err = -EINVAL;
439 		goto out;
440 	}
441 
442 	sk->sk_state = TCP_LISTEN;
443 	sk->sk_ack_backlog = 0;
444 	sk->sk_max_ack_backlog = backlog;
445 out:
446 	release_sock(sk);
447 	return err;
448 }
449 
450 static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
451 				struct msghdr *m, size_t total_len)
452 {
453 	struct sock *sk = sock->sk;
454 
455 	if (pn_socket_autobind(sock))
456 		return -EAGAIN;
457 
458 	return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
459 }
460 
461 const struct proto_ops phonet_dgram_ops = {
462 	.family		= AF_PHONET,
463 	.owner		= THIS_MODULE,
464 	.release	= pn_socket_release,
465 	.bind		= pn_socket_bind,
466 	.connect	= sock_no_connect,
467 	.socketpair	= sock_no_socketpair,
468 	.accept		= sock_no_accept,
469 	.getname	= pn_socket_getname,
470 	.poll		= datagram_poll,
471 	.ioctl		= pn_socket_ioctl,
472 	.listen		= sock_no_listen,
473 	.shutdown	= sock_no_shutdown,
474 	.setsockopt	= sock_no_setsockopt,
475 	.getsockopt	= sock_no_getsockopt,
476 #ifdef CONFIG_COMPAT
477 	.compat_setsockopt = sock_no_setsockopt,
478 	.compat_getsockopt = sock_no_getsockopt,
479 #endif
480 	.sendmsg	= pn_socket_sendmsg,
481 	.recvmsg	= sock_common_recvmsg,
482 	.mmap		= sock_no_mmap,
483 	.sendpage	= sock_no_sendpage,
484 };
485 
486 const struct proto_ops phonet_stream_ops = {
487 	.family		= AF_PHONET,
488 	.owner		= THIS_MODULE,
489 	.release	= pn_socket_release,
490 	.bind		= pn_socket_bind,
491 #ifdef CONFIG_PHONET_PIPECTRLR
492 	.connect	= pn_socket_connect,
493 #else
494 	.connect	= sock_no_connect,
495 #endif
496 	.socketpair	= sock_no_socketpair,
497 	.accept		= pn_socket_accept,
498 	.getname	= pn_socket_getname,
499 	.poll		= pn_socket_poll,
500 	.ioctl		= pn_socket_ioctl,
501 	.listen		= pn_socket_listen,
502 	.shutdown	= sock_no_shutdown,
503 	.setsockopt	= sock_common_setsockopt,
504 	.getsockopt	= sock_common_getsockopt,
505 #ifdef CONFIG_COMPAT
506 	.compat_setsockopt = compat_sock_common_setsockopt,
507 	.compat_getsockopt = compat_sock_common_getsockopt,
508 #endif
509 	.sendmsg	= pn_socket_sendmsg,
510 	.recvmsg	= sock_common_recvmsg,
511 	.mmap		= sock_no_mmap,
512 	.sendpage	= sock_no_sendpage,
513 };
514 EXPORT_SYMBOL(phonet_stream_ops);
515 
516 /* allocate port for a socket */
517 int pn_sock_get_port(struct sock *sk, unsigned short sport)
518 {
519 	static int port_cur;
520 	struct net *net = sock_net(sk);
521 	struct pn_sock *pn = pn_sk(sk);
522 	struct sockaddr_pn try_sa;
523 	struct sock *tmpsk;
524 
525 	memset(&try_sa, 0, sizeof(struct sockaddr_pn));
526 	try_sa.spn_family = AF_PHONET;
527 	WARN_ON(!mutex_is_locked(&port_mutex));
528 	if (!sport) {
529 		/* search free port */
530 		int port, pmin, pmax;
531 
532 		phonet_get_local_port_range(&pmin, &pmax);
533 		for (port = pmin; port <= pmax; port++) {
534 			port_cur++;
535 			if (port_cur < pmin || port_cur > pmax)
536 				port_cur = pmin;
537 
538 			pn_sockaddr_set_port(&try_sa, port_cur);
539 			tmpsk = pn_find_sock_by_sa(net, &try_sa);
540 			if (tmpsk == NULL) {
541 				sport = port_cur;
542 				goto found;
543 			} else
544 				sock_put(tmpsk);
545 		}
546 	} else {
547 		/* try to find specific port */
548 		pn_sockaddr_set_port(&try_sa, sport);
549 		tmpsk = pn_find_sock_by_sa(net, &try_sa);
550 		if (tmpsk == NULL)
551 			/* No sock there! We can use that port... */
552 			goto found;
553 		else
554 			sock_put(tmpsk);
555 	}
556 	/* the port must be in use already */
557 	return -EADDRINUSE;
558 
559 found:
560 	pn->sobject = pn_object(pn_addr(pn->sobject), sport);
561 	return 0;
562 }
563 EXPORT_SYMBOL(pn_sock_get_port);
564 
565 #ifdef CONFIG_PROC_FS
566 static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
567 {
568 	struct net *net = seq_file_net(seq);
569 	struct hlist_head *hlist = pnsocks.hlist;
570 	struct hlist_node *node;
571 	struct sock *sknode;
572 	unsigned h;
573 
574 	for (h = 0; h < PN_HASHSIZE; h++) {
575 		sk_for_each(sknode, node, hlist) {
576 			if (!net_eq(net, sock_net(sknode)))
577 				continue;
578 			if (!pos)
579 				return sknode;
580 			pos--;
581 		}
582 		hlist++;
583 	}
584 	return NULL;
585 }
586 
587 static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
588 {
589 	struct net *net = seq_file_net(seq);
590 
591 	do
592 		sk = sk_next(sk);
593 	while (sk && !net_eq(net, sock_net(sk)));
594 
595 	return sk;
596 }
597 
598 static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
599 	__acquires(pnsocks.lock)
600 {
601 	spin_lock_bh(&pnsocks.lock);
602 	return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
603 }
604 
605 static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
606 {
607 	struct sock *sk;
608 
609 	if (v == SEQ_START_TOKEN)
610 		sk = pn_sock_get_idx(seq, 0);
611 	else
612 		sk = pn_sock_get_next(seq, v);
613 	(*pos)++;
614 	return sk;
615 }
616 
617 static void pn_sock_seq_stop(struct seq_file *seq, void *v)
618 	__releases(pnsocks.lock)
619 {
620 	spin_unlock_bh(&pnsocks.lock);
621 }
622 
623 static int pn_sock_seq_show(struct seq_file *seq, void *v)
624 {
625 	int len;
626 
627 	if (v == SEQ_START_TOKEN)
628 		seq_printf(seq, "%s%n", "pt  loc  rem rs st tx_queue rx_queue "
629 			"  uid inode ref pointer drops", &len);
630 	else {
631 		struct sock *sk = v;
632 		struct pn_sock *pn = pn_sk(sk);
633 
634 		seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
635 			"%d %p %d%n",
636 			sk->sk_protocol, pn->sobject, 0, pn->resource,
637 			sk->sk_state,
638 			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
639 			sock_i_uid(sk), sock_i_ino(sk),
640 			atomic_read(&sk->sk_refcnt), sk,
641 			atomic_read(&sk->sk_drops), &len);
642 	}
643 	seq_printf(seq, "%*s\n", 127 - len, "");
644 	return 0;
645 }
646 
647 static const struct seq_operations pn_sock_seq_ops = {
648 	.start = pn_sock_seq_start,
649 	.next = pn_sock_seq_next,
650 	.stop = pn_sock_seq_stop,
651 	.show = pn_sock_seq_show,
652 };
653 
654 static int pn_sock_open(struct inode *inode, struct file *file)
655 {
656 	return seq_open_net(inode, file, &pn_sock_seq_ops,
657 				sizeof(struct seq_net_private));
658 }
659 
660 const struct file_operations pn_sock_seq_fops = {
661 	.owner = THIS_MODULE,
662 	.open = pn_sock_open,
663 	.read = seq_read,
664 	.llseek = seq_lseek,
665 	.release = seq_release_net,
666 };
667 #endif
668 
669 static struct  {
670 	struct sock *sk[256];
671 } pnres;
672 
673 /*
674  * Find and hold socket based on resource.
675  */
676 struct sock *pn_find_sock_by_res(struct net *net, u8 res)
677 {
678 	struct sock *sk;
679 
680 	if (!net_eq(net, &init_net))
681 		return NULL;
682 
683 	rcu_read_lock();
684 	sk = rcu_dereference(pnres.sk[res]);
685 	if (sk)
686 		sock_hold(sk);
687 	rcu_read_unlock();
688 	return sk;
689 }
690 
691 static DEFINE_MUTEX(resource_mutex);
692 
693 int pn_sock_bind_res(struct sock *sk, u8 res)
694 {
695 	int ret = -EADDRINUSE;
696 
697 	if (!net_eq(sock_net(sk), &init_net))
698 		return -ENOIOCTLCMD;
699 	if (!capable(CAP_SYS_ADMIN))
700 		return -EPERM;
701 	if (pn_socket_autobind(sk->sk_socket))
702 		return -EAGAIN;
703 
704 	mutex_lock(&resource_mutex);
705 	if (pnres.sk[res] == NULL) {
706 		sock_hold(sk);
707 		rcu_assign_pointer(pnres.sk[res], sk);
708 		ret = 0;
709 	}
710 	mutex_unlock(&resource_mutex);
711 	return ret;
712 }
713 
714 int pn_sock_unbind_res(struct sock *sk, u8 res)
715 {
716 	int ret = -ENOENT;
717 
718 	if (!capable(CAP_SYS_ADMIN))
719 		return -EPERM;
720 
721 	mutex_lock(&resource_mutex);
722 	if (pnres.sk[res] == sk) {
723 		rcu_assign_pointer(pnres.sk[res], NULL);
724 		ret = 0;
725 	}
726 	mutex_unlock(&resource_mutex);
727 
728 	if (ret == 0) {
729 		synchronize_rcu();
730 		sock_put(sk);
731 	}
732 	return ret;
733 }
734 
735 void pn_sock_unbind_all_res(struct sock *sk)
736 {
737 	unsigned res, match = 0;
738 
739 	mutex_lock(&resource_mutex);
740 	for (res = 0; res < 256; res++) {
741 		if (pnres.sk[res] == sk) {
742 			rcu_assign_pointer(pnres.sk[res], NULL);
743 			match++;
744 		}
745 	}
746 	mutex_unlock(&resource_mutex);
747 
748 	if (match == 0)
749 		return;
750 	synchronize_rcu();
751 	while (match > 0) {
752 		sock_put(sk);
753 		match--;
754 	}
755 }
756 
757 #ifdef CONFIG_PROC_FS
758 static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
759 {
760 	struct net *net = seq_file_net(seq);
761 	unsigned i;
762 
763 	if (!net_eq(net, &init_net))
764 		return NULL;
765 
766 	for (i = 0; i < 256; i++) {
767 		if (pnres.sk[i] == NULL)
768 			continue;
769 		if (!pos)
770 			return pnres.sk + i;
771 		pos--;
772 	}
773 	return NULL;
774 }
775 
776 static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
777 {
778 	struct net *net = seq_file_net(seq);
779 	unsigned i;
780 
781 	BUG_ON(!net_eq(net, &init_net));
782 
783 	for (i = (sk - pnres.sk) + 1; i < 256; i++)
784 		if (pnres.sk[i])
785 			return pnres.sk + i;
786 	return NULL;
787 }
788 
789 static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
790 	__acquires(resource_mutex)
791 {
792 	mutex_lock(&resource_mutex);
793 	return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
794 }
795 
796 static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
797 {
798 	struct sock **sk;
799 
800 	if (v == SEQ_START_TOKEN)
801 		sk = pn_res_get_idx(seq, 0);
802 	else
803 		sk = pn_res_get_next(seq, v);
804 	(*pos)++;
805 	return sk;
806 }
807 
808 static void pn_res_seq_stop(struct seq_file *seq, void *v)
809 	__releases(resource_mutex)
810 {
811 	mutex_unlock(&resource_mutex);
812 }
813 
814 static int pn_res_seq_show(struct seq_file *seq, void *v)
815 {
816 	int len;
817 
818 	if (v == SEQ_START_TOKEN)
819 		seq_printf(seq, "%s%n", "rs   uid inode", &len);
820 	else {
821 		struct sock **psk = v;
822 		struct sock *sk = *psk;
823 
824 		seq_printf(seq, "%02X %5d %lu%n",
825 			   (int) (psk - pnres.sk), sock_i_uid(sk),
826 			   sock_i_ino(sk), &len);
827 	}
828 	seq_printf(seq, "%*s\n", 63 - len, "");
829 	return 0;
830 }
831 
832 static const struct seq_operations pn_res_seq_ops = {
833 	.start = pn_res_seq_start,
834 	.next = pn_res_seq_next,
835 	.stop = pn_res_seq_stop,
836 	.show = pn_res_seq_show,
837 };
838 
839 static int pn_res_open(struct inode *inode, struct file *file)
840 {
841 	return seq_open_net(inode, file, &pn_res_seq_ops,
842 				sizeof(struct seq_net_private));
843 }
844 
845 const struct file_operations pn_res_seq_fops = {
846 	.owner = THIS_MODULE,
847 	.open = pn_res_open,
848 	.read = seq_read,
849 	.llseek = seq_lseek,
850 	.release = seq_release_net,
851 };
852 #endif
853