1 /* 2 * File: socket.c 3 * 4 * Phonet sockets 5 * 6 * Copyright (C) 2008 Nokia Corporation. 7 * 8 * Authors: Sakari Ailus <sakari.ailus@nokia.com> 9 * Rémi Denis-Courmont 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * version 2 as published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 23 * 02110-1301 USA 24 */ 25 26 #include <linux/gfp.h> 27 #include <linux/kernel.h> 28 #include <linux/net.h> 29 #include <linux/poll.h> 30 #include <net/sock.h> 31 #include <net/tcp_states.h> 32 33 #include <linux/phonet.h> 34 #include <linux/export.h> 35 #include <net/phonet/phonet.h> 36 #include <net/phonet/pep.h> 37 #include <net/phonet/pn_dev.h> 38 39 static int pn_socket_release(struct socket *sock) 40 { 41 struct sock *sk = sock->sk; 42 43 if (sk) { 44 sock->sk = NULL; 45 sk->sk_prot->close(sk, 0); 46 } 47 return 0; 48 } 49 50 #define PN_HASHSIZE 16 51 #define PN_HASHMASK (PN_HASHSIZE-1) 52 53 54 static struct { 55 struct hlist_head hlist[PN_HASHSIZE]; 56 struct mutex lock; 57 } pnsocks; 58 59 void __init pn_sock_init(void) 60 { 61 unsigned int i; 62 63 for (i = 0; i < PN_HASHSIZE; i++) 64 INIT_HLIST_HEAD(pnsocks.hlist + i); 65 mutex_init(&pnsocks.lock); 66 } 67 68 static struct hlist_head *pn_hash_list(u16 obj) 69 { 70 return pnsocks.hlist + (obj & PN_HASHMASK); 71 } 72 73 /* 74 * Find address based on socket address, match only certain fields. 75 * Also grab sock if it was found. Remember to sock_put it later. 76 */ 77 struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) 78 { 79 struct sock *sknode; 80 struct sock *rval = NULL; 81 u16 obj = pn_sockaddr_get_object(spn); 82 u8 res = spn->spn_resource; 83 struct hlist_head *hlist = pn_hash_list(obj); 84 85 rcu_read_lock(); 86 sk_for_each_rcu(sknode, hlist) { 87 struct pn_sock *pn = pn_sk(sknode); 88 BUG_ON(!pn->sobject); /* unbound socket */ 89 90 if (!net_eq(sock_net(sknode), net)) 91 continue; 92 if (pn_port(obj)) { 93 /* Look up socket by port */ 94 if (pn_port(pn->sobject) != pn_port(obj)) 95 continue; 96 } else { 97 /* If port is zero, look up by resource */ 98 if (pn->resource != res) 99 continue; 100 } 101 if (pn_addr(pn->sobject) && 102 pn_addr(pn->sobject) != pn_addr(obj)) 103 continue; 104 105 rval = sknode; 106 sock_hold(sknode); 107 break; 108 } 109 rcu_read_unlock(); 110 111 return rval; 112 } 113 114 /* Deliver a broadcast packet (only in bottom-half) */ 115 void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) 116 { 117 struct hlist_head *hlist = pnsocks.hlist; 118 unsigned int h; 119 120 rcu_read_lock(); 121 for (h = 0; h < PN_HASHSIZE; h++) { 122 struct sock *sknode; 123 124 sk_for_each(sknode, hlist) { 125 struct sk_buff *clone; 126 127 if (!net_eq(sock_net(sknode), net)) 128 continue; 129 if (!sock_flag(sknode, SOCK_BROADCAST)) 130 continue; 131 132 clone = skb_clone(skb, GFP_ATOMIC); 133 if (clone) { 134 sock_hold(sknode); 135 sk_receive_skb(sknode, clone, 0); 136 } 137 } 138 hlist++; 139 } 140 rcu_read_unlock(); 141 } 142 143 void pn_sock_hash(struct sock *sk) 144 { 145 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); 146 147 mutex_lock(&pnsocks.lock); 148 sk_add_node_rcu(sk, hlist); 149 mutex_unlock(&pnsocks.lock); 150 } 151 EXPORT_SYMBOL(pn_sock_hash); 152 153 void pn_sock_unhash(struct sock *sk) 154 { 155 mutex_lock(&pnsocks.lock); 156 sk_del_node_init_rcu(sk); 157 mutex_unlock(&pnsocks.lock); 158 pn_sock_unbind_all_res(sk); 159 synchronize_rcu(); 160 } 161 EXPORT_SYMBOL(pn_sock_unhash); 162 163 static DEFINE_MUTEX(port_mutex); 164 165 static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) 166 { 167 struct sock *sk = sock->sk; 168 struct pn_sock *pn = pn_sk(sk); 169 struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; 170 int err; 171 u16 handle; 172 u8 saddr; 173 174 if (sk->sk_prot->bind) 175 return sk->sk_prot->bind(sk, addr, len); 176 177 if (len < sizeof(struct sockaddr_pn)) 178 return -EINVAL; 179 if (spn->spn_family != AF_PHONET) 180 return -EAFNOSUPPORT; 181 182 handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr); 183 saddr = pn_addr(handle); 184 if (saddr && phonet_address_lookup(sock_net(sk), saddr)) 185 return -EADDRNOTAVAIL; 186 187 lock_sock(sk); 188 if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) { 189 err = -EINVAL; /* attempt to rebind */ 190 goto out; 191 } 192 WARN_ON(sk_hashed(sk)); 193 mutex_lock(&port_mutex); 194 err = sk->sk_prot->get_port(sk, pn_port(handle)); 195 if (err) 196 goto out_port; 197 198 /* get_port() sets the port, bind() sets the address if applicable */ 199 pn->sobject = pn_object(saddr, pn_port(pn->sobject)); 200 pn->resource = spn->spn_resource; 201 202 /* Enable RX on the socket */ 203 sk->sk_prot->hash(sk); 204 out_port: 205 mutex_unlock(&port_mutex); 206 out: 207 release_sock(sk); 208 return err; 209 } 210 211 static int pn_socket_autobind(struct socket *sock) 212 { 213 struct sockaddr_pn sa; 214 int err; 215 216 memset(&sa, 0, sizeof(sa)); 217 sa.spn_family = AF_PHONET; 218 err = pn_socket_bind(sock, (struct sockaddr *)&sa, 219 sizeof(struct sockaddr_pn)); 220 if (err != -EINVAL) 221 return err; 222 BUG_ON(!pn_port(pn_sk(sock->sk)->sobject)); 223 return 0; /* socket was already bound */ 224 } 225 226 static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, 227 int len, int flags) 228 { 229 struct sock *sk = sock->sk; 230 struct pn_sock *pn = pn_sk(sk); 231 struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; 232 struct task_struct *tsk = current; 233 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 234 int err; 235 236 if (pn_socket_autobind(sock)) 237 return -ENOBUFS; 238 if (len < sizeof(struct sockaddr_pn)) 239 return -EINVAL; 240 if (spn->spn_family != AF_PHONET) 241 return -EAFNOSUPPORT; 242 243 lock_sock(sk); 244 245 switch (sock->state) { 246 case SS_UNCONNECTED: 247 if (sk->sk_state != TCP_CLOSE) { 248 err = -EISCONN; 249 goto out; 250 } 251 break; 252 case SS_CONNECTING: 253 err = -EALREADY; 254 goto out; 255 default: 256 err = -EISCONN; 257 goto out; 258 } 259 260 pn->dobject = pn_sockaddr_get_object(spn); 261 pn->resource = pn_sockaddr_get_resource(spn); 262 sock->state = SS_CONNECTING; 263 264 err = sk->sk_prot->connect(sk, addr, len); 265 if (err) { 266 sock->state = SS_UNCONNECTED; 267 pn->dobject = 0; 268 goto out; 269 } 270 271 while (sk->sk_state == TCP_SYN_SENT) { 272 DEFINE_WAIT(wait); 273 274 if (!timeo) { 275 err = -EINPROGRESS; 276 goto out; 277 } 278 if (signal_pending(tsk)) { 279 err = sock_intr_errno(timeo); 280 goto out; 281 } 282 283 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 284 TASK_INTERRUPTIBLE); 285 release_sock(sk); 286 timeo = schedule_timeout(timeo); 287 lock_sock(sk); 288 finish_wait(sk_sleep(sk), &wait); 289 } 290 291 if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) 292 err = 0; 293 else if (sk->sk_state == TCP_CLOSE_WAIT) 294 err = -ECONNRESET; 295 else 296 err = -ECONNREFUSED; 297 sock->state = err ? SS_UNCONNECTED : SS_CONNECTED; 298 out: 299 release_sock(sk); 300 return err; 301 } 302 303 static int pn_socket_accept(struct socket *sock, struct socket *newsock, 304 int flags) 305 { 306 struct sock *sk = sock->sk; 307 struct sock *newsk; 308 int err; 309 310 if (unlikely(sk->sk_state != TCP_LISTEN)) 311 return -EINVAL; 312 313 newsk = sk->sk_prot->accept(sk, flags, &err); 314 if (!newsk) 315 return err; 316 317 lock_sock(newsk); 318 sock_graft(newsk, newsock); 319 newsock->state = SS_CONNECTED; 320 release_sock(newsk); 321 return 0; 322 } 323 324 static int pn_socket_getname(struct socket *sock, struct sockaddr *addr, 325 int *sockaddr_len, int peer) 326 { 327 struct sock *sk = sock->sk; 328 struct pn_sock *pn = pn_sk(sk); 329 330 memset(addr, 0, sizeof(struct sockaddr_pn)); 331 addr->sa_family = AF_PHONET; 332 if (!peer) /* Race with bind() here is userland's problem. */ 333 pn_sockaddr_set_object((struct sockaddr_pn *)addr, 334 pn->sobject); 335 336 *sockaddr_len = sizeof(struct sockaddr_pn); 337 return 0; 338 } 339 340 static unsigned int pn_socket_poll(struct file *file, struct socket *sock, 341 poll_table *wait) 342 { 343 struct sock *sk = sock->sk; 344 struct pep_sock *pn = pep_sk(sk); 345 unsigned int mask = 0; 346 347 poll_wait(file, sk_sleep(sk), wait); 348 349 if (sk->sk_state == TCP_CLOSE) 350 return POLLERR; 351 if (!skb_queue_empty(&sk->sk_receive_queue)) 352 mask |= POLLIN | POLLRDNORM; 353 if (!skb_queue_empty(&pn->ctrlreq_queue)) 354 mask |= POLLPRI; 355 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) 356 return POLLHUP; 357 358 if (sk->sk_state == TCP_ESTABLISHED && 359 atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && 360 atomic_read(&pn->tx_credits)) 361 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 362 363 return mask; 364 } 365 366 static int pn_socket_ioctl(struct socket *sock, unsigned int cmd, 367 unsigned long arg) 368 { 369 struct sock *sk = sock->sk; 370 struct pn_sock *pn = pn_sk(sk); 371 372 if (cmd == SIOCPNGETOBJECT) { 373 struct net_device *dev; 374 u16 handle; 375 u8 saddr; 376 377 if (get_user(handle, (__u16 __user *)arg)) 378 return -EFAULT; 379 380 lock_sock(sk); 381 if (sk->sk_bound_dev_if) 382 dev = dev_get_by_index(sock_net(sk), 383 sk->sk_bound_dev_if); 384 else 385 dev = phonet_device_get(sock_net(sk)); 386 if (dev && (dev->flags & IFF_UP)) 387 saddr = phonet_address_get(dev, pn_addr(handle)); 388 else 389 saddr = PN_NO_ADDR; 390 release_sock(sk); 391 392 if (dev) 393 dev_put(dev); 394 if (saddr == PN_NO_ADDR) 395 return -EHOSTUNREACH; 396 397 handle = pn_object(saddr, pn_port(pn->sobject)); 398 return put_user(handle, (__u16 __user *)arg); 399 } 400 401 return sk->sk_prot->ioctl(sk, cmd, arg); 402 } 403 404 static int pn_socket_listen(struct socket *sock, int backlog) 405 { 406 struct sock *sk = sock->sk; 407 int err = 0; 408 409 if (pn_socket_autobind(sock)) 410 return -ENOBUFS; 411 412 lock_sock(sk); 413 if (sock->state != SS_UNCONNECTED) { 414 err = -EINVAL; 415 goto out; 416 } 417 418 if (sk->sk_state != TCP_LISTEN) { 419 sk->sk_state = TCP_LISTEN; 420 sk->sk_ack_backlog = 0; 421 } 422 sk->sk_max_ack_backlog = backlog; 423 out: 424 release_sock(sk); 425 return err; 426 } 427 428 static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock, 429 struct msghdr *m, size_t total_len) 430 { 431 struct sock *sk = sock->sk; 432 433 if (pn_socket_autobind(sock)) 434 return -EAGAIN; 435 436 return sk->sk_prot->sendmsg(iocb, sk, m, total_len); 437 } 438 439 const struct proto_ops phonet_dgram_ops = { 440 .family = AF_PHONET, 441 .owner = THIS_MODULE, 442 .release = pn_socket_release, 443 .bind = pn_socket_bind, 444 .connect = sock_no_connect, 445 .socketpair = sock_no_socketpair, 446 .accept = sock_no_accept, 447 .getname = pn_socket_getname, 448 .poll = datagram_poll, 449 .ioctl = pn_socket_ioctl, 450 .listen = sock_no_listen, 451 .shutdown = sock_no_shutdown, 452 .setsockopt = sock_no_setsockopt, 453 .getsockopt = sock_no_getsockopt, 454 #ifdef CONFIG_COMPAT 455 .compat_setsockopt = sock_no_setsockopt, 456 .compat_getsockopt = sock_no_getsockopt, 457 #endif 458 .sendmsg = pn_socket_sendmsg, 459 .recvmsg = sock_common_recvmsg, 460 .mmap = sock_no_mmap, 461 .sendpage = sock_no_sendpage, 462 }; 463 464 const struct proto_ops phonet_stream_ops = { 465 .family = AF_PHONET, 466 .owner = THIS_MODULE, 467 .release = pn_socket_release, 468 .bind = pn_socket_bind, 469 .connect = pn_socket_connect, 470 .socketpair = sock_no_socketpair, 471 .accept = pn_socket_accept, 472 .getname = pn_socket_getname, 473 .poll = pn_socket_poll, 474 .ioctl = pn_socket_ioctl, 475 .listen = pn_socket_listen, 476 .shutdown = sock_no_shutdown, 477 .setsockopt = sock_common_setsockopt, 478 .getsockopt = sock_common_getsockopt, 479 #ifdef CONFIG_COMPAT 480 .compat_setsockopt = compat_sock_common_setsockopt, 481 .compat_getsockopt = compat_sock_common_getsockopt, 482 #endif 483 .sendmsg = pn_socket_sendmsg, 484 .recvmsg = sock_common_recvmsg, 485 .mmap = sock_no_mmap, 486 .sendpage = sock_no_sendpage, 487 }; 488 EXPORT_SYMBOL(phonet_stream_ops); 489 490 /* allocate port for a socket */ 491 int pn_sock_get_port(struct sock *sk, unsigned short sport) 492 { 493 static int port_cur; 494 struct net *net = sock_net(sk); 495 struct pn_sock *pn = pn_sk(sk); 496 struct sockaddr_pn try_sa; 497 struct sock *tmpsk; 498 499 memset(&try_sa, 0, sizeof(struct sockaddr_pn)); 500 try_sa.spn_family = AF_PHONET; 501 WARN_ON(!mutex_is_locked(&port_mutex)); 502 if (!sport) { 503 /* search free port */ 504 int port, pmin, pmax; 505 506 phonet_get_local_port_range(&pmin, &pmax); 507 for (port = pmin; port <= pmax; port++) { 508 port_cur++; 509 if (port_cur < pmin || port_cur > pmax) 510 port_cur = pmin; 511 512 pn_sockaddr_set_port(&try_sa, port_cur); 513 tmpsk = pn_find_sock_by_sa(net, &try_sa); 514 if (tmpsk == NULL) { 515 sport = port_cur; 516 goto found; 517 } else 518 sock_put(tmpsk); 519 } 520 } else { 521 /* try to find specific port */ 522 pn_sockaddr_set_port(&try_sa, sport); 523 tmpsk = pn_find_sock_by_sa(net, &try_sa); 524 if (tmpsk == NULL) 525 /* No sock there! We can use that port... */ 526 goto found; 527 else 528 sock_put(tmpsk); 529 } 530 /* the port must be in use already */ 531 return -EADDRINUSE; 532 533 found: 534 pn->sobject = pn_object(pn_addr(pn->sobject), sport); 535 return 0; 536 } 537 EXPORT_SYMBOL(pn_sock_get_port); 538 539 #ifdef CONFIG_PROC_FS 540 static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) 541 { 542 struct net *net = seq_file_net(seq); 543 struct hlist_head *hlist = pnsocks.hlist; 544 struct sock *sknode; 545 unsigned int h; 546 547 for (h = 0; h < PN_HASHSIZE; h++) { 548 sk_for_each_rcu(sknode, hlist) { 549 if (!net_eq(net, sock_net(sknode))) 550 continue; 551 if (!pos) 552 return sknode; 553 pos--; 554 } 555 hlist++; 556 } 557 return NULL; 558 } 559 560 static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk) 561 { 562 struct net *net = seq_file_net(seq); 563 564 do 565 sk = sk_next(sk); 566 while (sk && !net_eq(net, sock_net(sk))); 567 568 return sk; 569 } 570 571 static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) 572 __acquires(rcu) 573 { 574 rcu_read_lock(); 575 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 576 } 577 578 static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos) 579 { 580 struct sock *sk; 581 582 if (v == SEQ_START_TOKEN) 583 sk = pn_sock_get_idx(seq, 0); 584 else 585 sk = pn_sock_get_next(seq, v); 586 (*pos)++; 587 return sk; 588 } 589 590 static void pn_sock_seq_stop(struct seq_file *seq, void *v) 591 __releases(rcu) 592 { 593 rcu_read_unlock(); 594 } 595 596 static int pn_sock_seq_show(struct seq_file *seq, void *v) 597 { 598 int len; 599 600 if (v == SEQ_START_TOKEN) 601 seq_printf(seq, "%s%n", "pt loc rem rs st tx_queue rx_queue " 602 " uid inode ref pointer drops", &len); 603 else { 604 struct sock *sk = v; 605 struct pn_sock *pn = pn_sk(sk); 606 607 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " 608 "%d %pK %d%n", 609 sk->sk_protocol, pn->sobject, pn->dobject, 610 pn->resource, sk->sk_state, 611 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 612 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 613 sock_i_ino(sk), 614 atomic_read(&sk->sk_refcnt), sk, 615 atomic_read(&sk->sk_drops), &len); 616 } 617 seq_printf(seq, "%*s\n", 127 - len, ""); 618 return 0; 619 } 620 621 static const struct seq_operations pn_sock_seq_ops = { 622 .start = pn_sock_seq_start, 623 .next = pn_sock_seq_next, 624 .stop = pn_sock_seq_stop, 625 .show = pn_sock_seq_show, 626 }; 627 628 static int pn_sock_open(struct inode *inode, struct file *file) 629 { 630 return seq_open_net(inode, file, &pn_sock_seq_ops, 631 sizeof(struct seq_net_private)); 632 } 633 634 const struct file_operations pn_sock_seq_fops = { 635 .owner = THIS_MODULE, 636 .open = pn_sock_open, 637 .read = seq_read, 638 .llseek = seq_lseek, 639 .release = seq_release_net, 640 }; 641 #endif 642 643 static struct { 644 struct sock *sk[256]; 645 } pnres; 646 647 /* 648 * Find and hold socket based on resource. 649 */ 650 struct sock *pn_find_sock_by_res(struct net *net, u8 res) 651 { 652 struct sock *sk; 653 654 if (!net_eq(net, &init_net)) 655 return NULL; 656 657 rcu_read_lock(); 658 sk = rcu_dereference(pnres.sk[res]); 659 if (sk) 660 sock_hold(sk); 661 rcu_read_unlock(); 662 return sk; 663 } 664 665 static DEFINE_MUTEX(resource_mutex); 666 667 int pn_sock_bind_res(struct sock *sk, u8 res) 668 { 669 int ret = -EADDRINUSE; 670 671 if (!net_eq(sock_net(sk), &init_net)) 672 return -ENOIOCTLCMD; 673 if (!capable(CAP_SYS_ADMIN)) 674 return -EPERM; 675 if (pn_socket_autobind(sk->sk_socket)) 676 return -EAGAIN; 677 678 mutex_lock(&resource_mutex); 679 if (pnres.sk[res] == NULL) { 680 sock_hold(sk); 681 rcu_assign_pointer(pnres.sk[res], sk); 682 ret = 0; 683 } 684 mutex_unlock(&resource_mutex); 685 return ret; 686 } 687 688 int pn_sock_unbind_res(struct sock *sk, u8 res) 689 { 690 int ret = -ENOENT; 691 692 if (!capable(CAP_SYS_ADMIN)) 693 return -EPERM; 694 695 mutex_lock(&resource_mutex); 696 if (pnres.sk[res] == sk) { 697 RCU_INIT_POINTER(pnres.sk[res], NULL); 698 ret = 0; 699 } 700 mutex_unlock(&resource_mutex); 701 702 if (ret == 0) { 703 synchronize_rcu(); 704 sock_put(sk); 705 } 706 return ret; 707 } 708 709 void pn_sock_unbind_all_res(struct sock *sk) 710 { 711 unsigned int res, match = 0; 712 713 mutex_lock(&resource_mutex); 714 for (res = 0; res < 256; res++) { 715 if (pnres.sk[res] == sk) { 716 RCU_INIT_POINTER(pnres.sk[res], NULL); 717 match++; 718 } 719 } 720 mutex_unlock(&resource_mutex); 721 722 while (match > 0) { 723 __sock_put(sk); 724 match--; 725 } 726 /* Caller is responsible for RCU sync before final sock_put() */ 727 } 728 729 #ifdef CONFIG_PROC_FS 730 static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) 731 { 732 struct net *net = seq_file_net(seq); 733 unsigned int i; 734 735 if (!net_eq(net, &init_net)) 736 return NULL; 737 738 for (i = 0; i < 256; i++) { 739 if (pnres.sk[i] == NULL) 740 continue; 741 if (!pos) 742 return pnres.sk + i; 743 pos--; 744 } 745 return NULL; 746 } 747 748 static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) 749 { 750 struct net *net = seq_file_net(seq); 751 unsigned int i; 752 753 BUG_ON(!net_eq(net, &init_net)); 754 755 for (i = (sk - pnres.sk) + 1; i < 256; i++) 756 if (pnres.sk[i]) 757 return pnres.sk + i; 758 return NULL; 759 } 760 761 static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos) 762 __acquires(resource_mutex) 763 { 764 mutex_lock(&resource_mutex); 765 return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 766 } 767 768 static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos) 769 { 770 struct sock **sk; 771 772 if (v == SEQ_START_TOKEN) 773 sk = pn_res_get_idx(seq, 0); 774 else 775 sk = pn_res_get_next(seq, v); 776 (*pos)++; 777 return sk; 778 } 779 780 static void pn_res_seq_stop(struct seq_file *seq, void *v) 781 __releases(resource_mutex) 782 { 783 mutex_unlock(&resource_mutex); 784 } 785 786 static int pn_res_seq_show(struct seq_file *seq, void *v) 787 { 788 int len; 789 790 if (v == SEQ_START_TOKEN) 791 seq_printf(seq, "%s%n", "rs uid inode", &len); 792 else { 793 struct sock **psk = v; 794 struct sock *sk = *psk; 795 796 seq_printf(seq, "%02X %5u %lu%n", 797 (int) (psk - pnres.sk), 798 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), 799 sock_i_ino(sk), &len); 800 } 801 seq_printf(seq, "%*s\n", 63 - len, ""); 802 return 0; 803 } 804 805 static const struct seq_operations pn_res_seq_ops = { 806 .start = pn_res_seq_start, 807 .next = pn_res_seq_next, 808 .stop = pn_res_seq_stop, 809 .show = pn_res_seq_show, 810 }; 811 812 static int pn_res_open(struct inode *inode, struct file *file) 813 { 814 return seq_open_net(inode, file, &pn_res_seq_ops, 815 sizeof(struct seq_net_private)); 816 } 817 818 const struct file_operations pn_res_seq_fops = { 819 .owner = THIS_MODULE, 820 .open = pn_res_open, 821 .read = seq_read, 822 .llseek = seq_lseek, 823 .release = seq_release_net, 824 }; 825 #endif 826