1 /* 2 * File: socket.c 3 * 4 * Phonet sockets 5 * 6 * Copyright (C) 2008 Nokia Corporation. 7 * 8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> 9 * Original author: Sakari Ailus <sakari.ailus@nokia.com> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * version 2 as published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 23 * 02110-1301 USA 24 */ 25 26 #include <linux/gfp.h> 27 #include <linux/kernel.h> 28 #include <linux/net.h> 29 #include <linux/poll.h> 30 #include <net/sock.h> 31 #include <net/tcp_states.h> 32 33 #include <linux/phonet.h> 34 #include <linux/export.h> 35 #include <net/phonet/phonet.h> 36 #include <net/phonet/pep.h> 37 #include <net/phonet/pn_dev.h> 38 39 static int pn_socket_release(struct socket *sock) 40 { 41 struct sock *sk = sock->sk; 42 43 if (sk) { 44 sock->sk = NULL; 45 sk->sk_prot->close(sk, 0); 46 } 47 return 0; 48 } 49 50 #define PN_HASHSIZE 16 51 #define PN_HASHMASK (PN_HASHSIZE-1) 52 53 54 static struct { 55 struct hlist_head hlist[PN_HASHSIZE]; 56 struct mutex lock; 57 } pnsocks; 58 59 void __init pn_sock_init(void) 60 { 61 unsigned i; 62 63 for (i = 0; i < PN_HASHSIZE; i++) 64 INIT_HLIST_HEAD(pnsocks.hlist + i); 65 mutex_init(&pnsocks.lock); 66 } 67 68 static struct hlist_head *pn_hash_list(u16 obj) 69 { 70 return pnsocks.hlist + (obj & PN_HASHMASK); 71 } 72 73 /* 74 * Find address based on socket address, match only certain fields. 75 * Also grab sock if it was found. Remember to sock_put it later. 76 */ 77 struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) 78 { 79 struct hlist_node *node; 80 struct sock *sknode; 81 struct sock *rval = NULL; 82 u16 obj = pn_sockaddr_get_object(spn); 83 u8 res = spn->spn_resource; 84 struct hlist_head *hlist = pn_hash_list(obj); 85 86 rcu_read_lock(); 87 sk_for_each_rcu(sknode, node, hlist) { 88 struct pn_sock *pn = pn_sk(sknode); 89 BUG_ON(!pn->sobject); /* unbound socket */ 90 91 if (!net_eq(sock_net(sknode), net)) 92 continue; 93 if (pn_port(obj)) { 94 /* Look up socket by port */ 95 if (pn_port(pn->sobject) != pn_port(obj)) 96 continue; 97 } else { 98 /* If port is zero, look up by resource */ 99 if (pn->resource != res) 100 continue; 101 } 102 if (pn_addr(pn->sobject) && 103 pn_addr(pn->sobject) != pn_addr(obj)) 104 continue; 105 106 rval = sknode; 107 sock_hold(sknode); 108 break; 109 } 110 rcu_read_unlock(); 111 112 return rval; 113 } 114 115 /* Deliver a broadcast packet (only in bottom-half) */ 116 void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) 117 { 118 struct hlist_head *hlist = pnsocks.hlist; 119 unsigned h; 120 121 rcu_read_lock(); 122 for (h = 0; h < PN_HASHSIZE; h++) { 123 struct hlist_node *node; 124 struct sock *sknode; 125 126 sk_for_each(sknode, node, hlist) { 127 struct sk_buff *clone; 128 129 if (!net_eq(sock_net(sknode), net)) 130 continue; 131 if (!sock_flag(sknode, SOCK_BROADCAST)) 132 continue; 133 134 clone = skb_clone(skb, GFP_ATOMIC); 135 if (clone) { 136 sock_hold(sknode); 137 sk_receive_skb(sknode, clone, 0); 138 } 139 } 140 hlist++; 141 } 142 rcu_read_unlock(); 143 } 144 145 void pn_sock_hash(struct sock *sk) 146 { 147 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); 148 149 mutex_lock(&pnsocks.lock); 150 sk_add_node_rcu(sk, hlist); 151 mutex_unlock(&pnsocks.lock); 152 } 153 EXPORT_SYMBOL(pn_sock_hash); 154 155 void pn_sock_unhash(struct sock *sk) 156 { 157 mutex_lock(&pnsocks.lock); 158 sk_del_node_init_rcu(sk); 159 mutex_unlock(&pnsocks.lock); 160 pn_sock_unbind_all_res(sk); 161 synchronize_rcu(); 162 } 163 EXPORT_SYMBOL(pn_sock_unhash); 164 165 static DEFINE_MUTEX(port_mutex); 166 167 static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) 168 { 169 struct sock *sk = sock->sk; 170 struct pn_sock *pn = pn_sk(sk); 171 struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; 172 int err; 173 u16 handle; 174 u8 saddr; 175 176 if (sk->sk_prot->bind) 177 return sk->sk_prot->bind(sk, addr, len); 178 179 if (len < sizeof(struct sockaddr_pn)) 180 return -EINVAL; 181 if (spn->spn_family != AF_PHONET) 182 return -EAFNOSUPPORT; 183 184 handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr); 185 saddr = pn_addr(handle); 186 if (saddr && phonet_address_lookup(sock_net(sk), saddr)) 187 return -EADDRNOTAVAIL; 188 189 lock_sock(sk); 190 if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) { 191 err = -EINVAL; /* attempt to rebind */ 192 goto out; 193 } 194 WARN_ON(sk_hashed(sk)); 195 mutex_lock(&port_mutex); 196 err = sk->sk_prot->get_port(sk, pn_port(handle)); 197 if (err) 198 goto out_port; 199 200 /* get_port() sets the port, bind() sets the address if applicable */ 201 pn->sobject = pn_object(saddr, pn_port(pn->sobject)); 202 pn->resource = spn->spn_resource; 203 204 /* Enable RX on the socket */ 205 sk->sk_prot->hash(sk); 206 out_port: 207 mutex_unlock(&port_mutex); 208 out: 209 release_sock(sk); 210 return err; 211 } 212 213 static int pn_socket_autobind(struct socket *sock) 214 { 215 struct sockaddr_pn sa; 216 int err; 217 218 memset(&sa, 0, sizeof(sa)); 219 sa.spn_family = AF_PHONET; 220 err = pn_socket_bind(sock, (struct sockaddr *)&sa, 221 sizeof(struct sockaddr_pn)); 222 if (err != -EINVAL) 223 return err; 224 BUG_ON(!pn_port(pn_sk(sock->sk)->sobject)); 225 return 0; /* socket was already bound */ 226 } 227 228 static int pn_socket_connect(struct socket *sock, struct sockaddr *addr, 229 int len, int flags) 230 { 231 struct sock *sk = sock->sk; 232 struct pn_sock *pn = pn_sk(sk); 233 struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; 234 struct task_struct *tsk = current; 235 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 236 int err; 237 238 if (pn_socket_autobind(sock)) 239 return -ENOBUFS; 240 if (len < sizeof(struct sockaddr_pn)) 241 return -EINVAL; 242 if (spn->spn_family != AF_PHONET) 243 return -EAFNOSUPPORT; 244 245 lock_sock(sk); 246 247 switch (sock->state) { 248 case SS_UNCONNECTED: 249 if (sk->sk_state != TCP_CLOSE) { 250 err = -EISCONN; 251 goto out; 252 } 253 break; 254 case SS_CONNECTING: 255 err = -EALREADY; 256 goto out; 257 default: 258 err = -EISCONN; 259 goto out; 260 } 261 262 pn->dobject = pn_sockaddr_get_object(spn); 263 pn->resource = pn_sockaddr_get_resource(spn); 264 sock->state = SS_CONNECTING; 265 266 err = sk->sk_prot->connect(sk, addr, len); 267 if (err) { 268 sock->state = SS_UNCONNECTED; 269 pn->dobject = 0; 270 goto out; 271 } 272 273 while (sk->sk_state == TCP_SYN_SENT) { 274 DEFINE_WAIT(wait); 275 276 if (!timeo) { 277 err = -EINPROGRESS; 278 goto out; 279 } 280 if (signal_pending(tsk)) { 281 err = sock_intr_errno(timeo); 282 goto out; 283 } 284 285 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 286 TASK_INTERRUPTIBLE); 287 release_sock(sk); 288 timeo = schedule_timeout(timeo); 289 lock_sock(sk); 290 finish_wait(sk_sleep(sk), &wait); 291 } 292 293 if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) 294 err = 0; 295 else if (sk->sk_state == TCP_CLOSE_WAIT) 296 err = -ECONNRESET; 297 else 298 err = -ECONNREFUSED; 299 sock->state = err ? SS_UNCONNECTED : SS_CONNECTED; 300 out: 301 release_sock(sk); 302 return err; 303 } 304 305 static int pn_socket_accept(struct socket *sock, struct socket *newsock, 306 int flags) 307 { 308 struct sock *sk = sock->sk; 309 struct sock *newsk; 310 int err; 311 312 if (unlikely(sk->sk_state != TCP_LISTEN)) 313 return -EINVAL; 314 315 newsk = sk->sk_prot->accept(sk, flags, &err); 316 if (!newsk) 317 return err; 318 319 lock_sock(newsk); 320 sock_graft(newsk, newsock); 321 newsock->state = SS_CONNECTED; 322 release_sock(newsk); 323 return 0; 324 } 325 326 static int pn_socket_getname(struct socket *sock, struct sockaddr *addr, 327 int *sockaddr_len, int peer) 328 { 329 struct sock *sk = sock->sk; 330 struct pn_sock *pn = pn_sk(sk); 331 332 memset(addr, 0, sizeof(struct sockaddr_pn)); 333 addr->sa_family = AF_PHONET; 334 if (!peer) /* Race with bind() here is userland's problem. */ 335 pn_sockaddr_set_object((struct sockaddr_pn *)addr, 336 pn->sobject); 337 338 *sockaddr_len = sizeof(struct sockaddr_pn); 339 return 0; 340 } 341 342 static unsigned int pn_socket_poll(struct file *file, struct socket *sock, 343 poll_table *wait) 344 { 345 struct sock *sk = sock->sk; 346 struct pep_sock *pn = pep_sk(sk); 347 unsigned int mask = 0; 348 349 poll_wait(file, sk_sleep(sk), wait); 350 351 if (sk->sk_state == TCP_CLOSE) 352 return POLLERR; 353 if (!skb_queue_empty(&sk->sk_receive_queue)) 354 mask |= POLLIN | POLLRDNORM; 355 if (!skb_queue_empty(&pn->ctrlreq_queue)) 356 mask |= POLLPRI; 357 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) 358 return POLLHUP; 359 360 if (sk->sk_state == TCP_ESTABLISHED && 361 atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && 362 atomic_read(&pn->tx_credits)) 363 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 364 365 return mask; 366 } 367 368 static int pn_socket_ioctl(struct socket *sock, unsigned int cmd, 369 unsigned long arg) 370 { 371 struct sock *sk = sock->sk; 372 struct pn_sock *pn = pn_sk(sk); 373 374 if (cmd == SIOCPNGETOBJECT) { 375 struct net_device *dev; 376 u16 handle; 377 u8 saddr; 378 379 if (get_user(handle, (__u16 __user *)arg)) 380 return -EFAULT; 381 382 lock_sock(sk); 383 if (sk->sk_bound_dev_if) 384 dev = dev_get_by_index(sock_net(sk), 385 sk->sk_bound_dev_if); 386 else 387 dev = phonet_device_get(sock_net(sk)); 388 if (dev && (dev->flags & IFF_UP)) 389 saddr = phonet_address_get(dev, pn_addr(handle)); 390 else 391 saddr = PN_NO_ADDR; 392 release_sock(sk); 393 394 if (dev) 395 dev_put(dev); 396 if (saddr == PN_NO_ADDR) 397 return -EHOSTUNREACH; 398 399 handle = pn_object(saddr, pn_port(pn->sobject)); 400 return put_user(handle, (__u16 __user *)arg); 401 } 402 403 return sk->sk_prot->ioctl(sk, cmd, arg); 404 } 405 406 static int pn_socket_listen(struct socket *sock, int backlog) 407 { 408 struct sock *sk = sock->sk; 409 int err = 0; 410 411 if (pn_socket_autobind(sock)) 412 return -ENOBUFS; 413 414 lock_sock(sk); 415 if (sock->state != SS_UNCONNECTED) { 416 err = -EINVAL; 417 goto out; 418 } 419 420 if (sk->sk_state != TCP_LISTEN) { 421 sk->sk_state = TCP_LISTEN; 422 sk->sk_ack_backlog = 0; 423 } 424 sk->sk_max_ack_backlog = backlog; 425 out: 426 release_sock(sk); 427 return err; 428 } 429 430 static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock, 431 struct msghdr *m, size_t total_len) 432 { 433 struct sock *sk = sock->sk; 434 435 if (pn_socket_autobind(sock)) 436 return -EAGAIN; 437 438 return sk->sk_prot->sendmsg(iocb, sk, m, total_len); 439 } 440 441 const struct proto_ops phonet_dgram_ops = { 442 .family = AF_PHONET, 443 .owner = THIS_MODULE, 444 .release = pn_socket_release, 445 .bind = pn_socket_bind, 446 .connect = sock_no_connect, 447 .socketpair = sock_no_socketpair, 448 .accept = sock_no_accept, 449 .getname = pn_socket_getname, 450 .poll = datagram_poll, 451 .ioctl = pn_socket_ioctl, 452 .listen = sock_no_listen, 453 .shutdown = sock_no_shutdown, 454 .setsockopt = sock_no_setsockopt, 455 .getsockopt = sock_no_getsockopt, 456 #ifdef CONFIG_COMPAT 457 .compat_setsockopt = sock_no_setsockopt, 458 .compat_getsockopt = sock_no_getsockopt, 459 #endif 460 .sendmsg = pn_socket_sendmsg, 461 .recvmsg = sock_common_recvmsg, 462 .mmap = sock_no_mmap, 463 .sendpage = sock_no_sendpage, 464 }; 465 466 const struct proto_ops phonet_stream_ops = { 467 .family = AF_PHONET, 468 .owner = THIS_MODULE, 469 .release = pn_socket_release, 470 .bind = pn_socket_bind, 471 .connect = pn_socket_connect, 472 .socketpair = sock_no_socketpair, 473 .accept = pn_socket_accept, 474 .getname = pn_socket_getname, 475 .poll = pn_socket_poll, 476 .ioctl = pn_socket_ioctl, 477 .listen = pn_socket_listen, 478 .shutdown = sock_no_shutdown, 479 .setsockopt = sock_common_setsockopt, 480 .getsockopt = sock_common_getsockopt, 481 #ifdef CONFIG_COMPAT 482 .compat_setsockopt = compat_sock_common_setsockopt, 483 .compat_getsockopt = compat_sock_common_getsockopt, 484 #endif 485 .sendmsg = pn_socket_sendmsg, 486 .recvmsg = sock_common_recvmsg, 487 .mmap = sock_no_mmap, 488 .sendpage = sock_no_sendpage, 489 }; 490 EXPORT_SYMBOL(phonet_stream_ops); 491 492 /* allocate port for a socket */ 493 int pn_sock_get_port(struct sock *sk, unsigned short sport) 494 { 495 static int port_cur; 496 struct net *net = sock_net(sk); 497 struct pn_sock *pn = pn_sk(sk); 498 struct sockaddr_pn try_sa; 499 struct sock *tmpsk; 500 501 memset(&try_sa, 0, sizeof(struct sockaddr_pn)); 502 try_sa.spn_family = AF_PHONET; 503 WARN_ON(!mutex_is_locked(&port_mutex)); 504 if (!sport) { 505 /* search free port */ 506 int port, pmin, pmax; 507 508 phonet_get_local_port_range(&pmin, &pmax); 509 for (port = pmin; port <= pmax; port++) { 510 port_cur++; 511 if (port_cur < pmin || port_cur > pmax) 512 port_cur = pmin; 513 514 pn_sockaddr_set_port(&try_sa, port_cur); 515 tmpsk = pn_find_sock_by_sa(net, &try_sa); 516 if (tmpsk == NULL) { 517 sport = port_cur; 518 goto found; 519 } else 520 sock_put(tmpsk); 521 } 522 } else { 523 /* try to find specific port */ 524 pn_sockaddr_set_port(&try_sa, sport); 525 tmpsk = pn_find_sock_by_sa(net, &try_sa); 526 if (tmpsk == NULL) 527 /* No sock there! We can use that port... */ 528 goto found; 529 else 530 sock_put(tmpsk); 531 } 532 /* the port must be in use already */ 533 return -EADDRINUSE; 534 535 found: 536 pn->sobject = pn_object(pn_addr(pn->sobject), sport); 537 return 0; 538 } 539 EXPORT_SYMBOL(pn_sock_get_port); 540 541 #ifdef CONFIG_PROC_FS 542 static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) 543 { 544 struct net *net = seq_file_net(seq); 545 struct hlist_head *hlist = pnsocks.hlist; 546 struct hlist_node *node; 547 struct sock *sknode; 548 unsigned h; 549 550 for (h = 0; h < PN_HASHSIZE; h++) { 551 sk_for_each_rcu(sknode, node, hlist) { 552 if (!net_eq(net, sock_net(sknode))) 553 continue; 554 if (!pos) 555 return sknode; 556 pos--; 557 } 558 hlist++; 559 } 560 return NULL; 561 } 562 563 static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk) 564 { 565 struct net *net = seq_file_net(seq); 566 567 do 568 sk = sk_next(sk); 569 while (sk && !net_eq(net, sock_net(sk))); 570 571 return sk; 572 } 573 574 static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) 575 __acquires(rcu) 576 { 577 rcu_read_lock(); 578 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 579 } 580 581 static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos) 582 { 583 struct sock *sk; 584 585 if (v == SEQ_START_TOKEN) 586 sk = pn_sock_get_idx(seq, 0); 587 else 588 sk = pn_sock_get_next(seq, v); 589 (*pos)++; 590 return sk; 591 } 592 593 static void pn_sock_seq_stop(struct seq_file *seq, void *v) 594 __releases(rcu) 595 { 596 rcu_read_unlock(); 597 } 598 599 static int pn_sock_seq_show(struct seq_file *seq, void *v) 600 { 601 int len; 602 603 if (v == SEQ_START_TOKEN) 604 seq_printf(seq, "%s%n", "pt loc rem rs st tx_queue rx_queue " 605 " uid inode ref pointer drops", &len); 606 else { 607 struct sock *sk = v; 608 struct pn_sock *pn = pn_sk(sk); 609 610 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " 611 "%d %pK %d%n", 612 sk->sk_protocol, pn->sobject, pn->dobject, 613 pn->resource, sk->sk_state, 614 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 615 sock_i_uid(sk), sock_i_ino(sk), 616 atomic_read(&sk->sk_refcnt), sk, 617 atomic_read(&sk->sk_drops), &len); 618 } 619 seq_printf(seq, "%*s\n", 127 - len, ""); 620 return 0; 621 } 622 623 static const struct seq_operations pn_sock_seq_ops = { 624 .start = pn_sock_seq_start, 625 .next = pn_sock_seq_next, 626 .stop = pn_sock_seq_stop, 627 .show = pn_sock_seq_show, 628 }; 629 630 static int pn_sock_open(struct inode *inode, struct file *file) 631 { 632 return seq_open_net(inode, file, &pn_sock_seq_ops, 633 sizeof(struct seq_net_private)); 634 } 635 636 const struct file_operations pn_sock_seq_fops = { 637 .owner = THIS_MODULE, 638 .open = pn_sock_open, 639 .read = seq_read, 640 .llseek = seq_lseek, 641 .release = seq_release_net, 642 }; 643 #endif 644 645 static struct { 646 struct sock *sk[256]; 647 } pnres; 648 649 /* 650 * Find and hold socket based on resource. 651 */ 652 struct sock *pn_find_sock_by_res(struct net *net, u8 res) 653 { 654 struct sock *sk; 655 656 if (!net_eq(net, &init_net)) 657 return NULL; 658 659 rcu_read_lock(); 660 sk = rcu_dereference(pnres.sk[res]); 661 if (sk) 662 sock_hold(sk); 663 rcu_read_unlock(); 664 return sk; 665 } 666 667 static DEFINE_MUTEX(resource_mutex); 668 669 int pn_sock_bind_res(struct sock *sk, u8 res) 670 { 671 int ret = -EADDRINUSE; 672 673 if (!net_eq(sock_net(sk), &init_net)) 674 return -ENOIOCTLCMD; 675 if (!capable(CAP_SYS_ADMIN)) 676 return -EPERM; 677 if (pn_socket_autobind(sk->sk_socket)) 678 return -EAGAIN; 679 680 mutex_lock(&resource_mutex); 681 if (pnres.sk[res] == NULL) { 682 sock_hold(sk); 683 RCU_INIT_POINTER(pnres.sk[res], sk); 684 ret = 0; 685 } 686 mutex_unlock(&resource_mutex); 687 return ret; 688 } 689 690 int pn_sock_unbind_res(struct sock *sk, u8 res) 691 { 692 int ret = -ENOENT; 693 694 if (!capable(CAP_SYS_ADMIN)) 695 return -EPERM; 696 697 mutex_lock(&resource_mutex); 698 if (pnres.sk[res] == sk) { 699 RCU_INIT_POINTER(pnres.sk[res], NULL); 700 ret = 0; 701 } 702 mutex_unlock(&resource_mutex); 703 704 if (ret == 0) { 705 synchronize_rcu(); 706 sock_put(sk); 707 } 708 return ret; 709 } 710 711 void pn_sock_unbind_all_res(struct sock *sk) 712 { 713 unsigned res, match = 0; 714 715 mutex_lock(&resource_mutex); 716 for (res = 0; res < 256; res++) { 717 if (pnres.sk[res] == sk) { 718 RCU_INIT_POINTER(pnres.sk[res], NULL); 719 match++; 720 } 721 } 722 mutex_unlock(&resource_mutex); 723 724 while (match > 0) { 725 __sock_put(sk); 726 match--; 727 } 728 /* Caller is responsible for RCU sync before final sock_put() */ 729 } 730 731 #ifdef CONFIG_PROC_FS 732 static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) 733 { 734 struct net *net = seq_file_net(seq); 735 unsigned i; 736 737 if (!net_eq(net, &init_net)) 738 return NULL; 739 740 for (i = 0; i < 256; i++) { 741 if (pnres.sk[i] == NULL) 742 continue; 743 if (!pos) 744 return pnres.sk + i; 745 pos--; 746 } 747 return NULL; 748 } 749 750 static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) 751 { 752 struct net *net = seq_file_net(seq); 753 unsigned i; 754 755 BUG_ON(!net_eq(net, &init_net)); 756 757 for (i = (sk - pnres.sk) + 1; i < 256; i++) 758 if (pnres.sk[i]) 759 return pnres.sk + i; 760 return NULL; 761 } 762 763 static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos) 764 __acquires(resource_mutex) 765 { 766 mutex_lock(&resource_mutex); 767 return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 768 } 769 770 static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos) 771 { 772 struct sock **sk; 773 774 if (v == SEQ_START_TOKEN) 775 sk = pn_res_get_idx(seq, 0); 776 else 777 sk = pn_res_get_next(seq, v); 778 (*pos)++; 779 return sk; 780 } 781 782 static void pn_res_seq_stop(struct seq_file *seq, void *v) 783 __releases(resource_mutex) 784 { 785 mutex_unlock(&resource_mutex); 786 } 787 788 static int pn_res_seq_show(struct seq_file *seq, void *v) 789 { 790 int len; 791 792 if (v == SEQ_START_TOKEN) 793 seq_printf(seq, "%s%n", "rs uid inode", &len); 794 else { 795 struct sock **psk = v; 796 struct sock *sk = *psk; 797 798 seq_printf(seq, "%02X %5d %lu%n", 799 (int) (psk - pnres.sk), sock_i_uid(sk), 800 sock_i_ino(sk), &len); 801 } 802 seq_printf(seq, "%*s\n", 63 - len, ""); 803 return 0; 804 } 805 806 static const struct seq_operations pn_res_seq_ops = { 807 .start = pn_res_seq_start, 808 .next = pn_res_seq_next, 809 .stop = pn_res_seq_stop, 810 .show = pn_res_seq_show, 811 }; 812 813 static int pn_res_open(struct inode *inode, struct file *file) 814 { 815 return seq_open_net(inode, file, &pn_res_seq_ops, 816 sizeof(struct seq_net_private)); 817 } 818 819 const struct file_operations pn_res_seq_fops = { 820 .owner = THIS_MODULE, 821 .open = pn_res_open, 822 .read = seq_read, 823 .llseek = seq_lseek, 824 .release = seq_release_net, 825 }; 826 #endif 827