1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 #include "trace.h" 50 51 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 52 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 53 #define TIPC_FWD_MSG 1 54 #define TIPC_MAX_PORT 0xffffffff 55 #define TIPC_MIN_PORT 1 56 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 57 58 enum { 59 TIPC_LISTEN = TCP_LISTEN, 60 TIPC_ESTABLISHED = TCP_ESTABLISHED, 61 TIPC_OPEN = TCP_CLOSE, 62 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 63 TIPC_CONNECTING = TCP_SYN_SENT, 64 }; 65 66 struct sockaddr_pair { 67 struct sockaddr_tipc sock; 68 struct sockaddr_tipc member; 69 }; 70 71 /** 72 * struct tipc_sock - TIPC socket structure 73 * @sk: socket - interacts with 'port' and with user via the socket API 74 * @conn_type: TIPC type used when connection was established 75 * @conn_instance: TIPC instance used when connection was established 76 * @published: non-zero if port has one or more associated names 77 * @max_pkt: maximum packet size "hint" used when building messages sent by port 78 * @portid: unique port identity in TIPC socket hash table 79 * @phdr: preformatted message header used when sending messages 80 * #cong_links: list of congested links 81 * @publications: list of publications for port 82 * @blocking_link: address of the congested link we are currently sleeping on 83 * @pub_count: total # of publications port has made during its lifetime 84 * @conn_timeout: the time we can wait for an unresponded setup request 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 86 * @cong_link_cnt: number of congested links 87 * @snt_unacked: # messages sent by socket, and not yet acked by peer 88 * @rcv_unacked: # messages read by user, but not yet acked back to peer 89 * @peer: 'connected' peer for dgram/rdm 90 * @node: hash table node 91 * @mc_method: cookie for use between socket and broadcast layer 92 * @rcu: rcu struct for tipc_sock 93 */ 94 struct tipc_sock { 95 struct sock sk; 96 u32 conn_type; 97 u32 conn_instance; 98 int published; 99 u32 max_pkt; 100 u32 portid; 101 struct tipc_msg phdr; 102 struct list_head cong_links; 103 struct list_head publications; 104 u32 pub_count; 105 atomic_t dupl_rcvcnt; 106 u16 conn_timeout; 107 bool probe_unacked; 108 u16 cong_link_cnt; 109 u16 snt_unacked; 110 u16 snd_win; 111 u16 peer_caps; 112 u16 rcv_unacked; 113 u16 rcv_win; 114 struct sockaddr_tipc peer; 115 struct rhash_head node; 116 struct tipc_mc_method mc_method; 117 struct rcu_head rcu; 118 struct tipc_group *group; 119 bool group_is_open; 120 }; 121 122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 123 static void tipc_data_ready(struct sock *sk); 124 static void tipc_write_space(struct sock *sk); 125 static void tipc_sock_destruct(struct sock *sk); 126 static int tipc_release(struct socket *sock); 127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 128 bool kern); 129 static void tipc_sk_timeout(struct timer_list *t); 130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 131 struct tipc_name_seq const *seq); 132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 133 struct tipc_name_seq const *seq); 134 static int tipc_sk_leave(struct tipc_sock *tsk); 135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 136 static int tipc_sk_insert(struct tipc_sock *tsk); 137 static void tipc_sk_remove(struct tipc_sock *tsk); 138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 140 141 static const struct proto_ops packet_ops; 142 static const struct proto_ops stream_ops; 143 static const struct proto_ops msg_ops; 144 static struct proto tipc_proto; 145 static const struct rhashtable_params tsk_rht_params; 146 147 static u32 tsk_own_node(struct tipc_sock *tsk) 148 { 149 return msg_prevnode(&tsk->phdr); 150 } 151 152 static u32 tsk_peer_node(struct tipc_sock *tsk) 153 { 154 return msg_destnode(&tsk->phdr); 155 } 156 157 static u32 tsk_peer_port(struct tipc_sock *tsk) 158 { 159 return msg_destport(&tsk->phdr); 160 } 161 162 static bool tsk_unreliable(struct tipc_sock *tsk) 163 { 164 return msg_src_droppable(&tsk->phdr) != 0; 165 } 166 167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 168 { 169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 170 } 171 172 static bool tsk_unreturnable(struct tipc_sock *tsk) 173 { 174 return msg_dest_droppable(&tsk->phdr) != 0; 175 } 176 177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 178 { 179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 180 } 181 182 static int tsk_importance(struct tipc_sock *tsk) 183 { 184 return msg_importance(&tsk->phdr); 185 } 186 187 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 188 { 189 if (imp > TIPC_CRITICAL_IMPORTANCE) 190 return -EINVAL; 191 msg_set_importance(&tsk->phdr, (u32)imp); 192 return 0; 193 } 194 195 static struct tipc_sock *tipc_sk(const struct sock *sk) 196 { 197 return container_of(sk, struct tipc_sock, sk); 198 } 199 200 static bool tsk_conn_cong(struct tipc_sock *tsk) 201 { 202 return tsk->snt_unacked > tsk->snd_win; 203 } 204 205 static u16 tsk_blocks(int len) 206 { 207 return ((len / FLOWCTL_BLK_SZ) + 1); 208 } 209 210 /* tsk_blocks(): translate a buffer size in bytes to number of 211 * advertisable blocks, taking into account the ratio truesize(len)/len 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 213 */ 214 static u16 tsk_adv_blocks(int len) 215 { 216 return len / FLOWCTL_BLK_SZ / 4; 217 } 218 219 /* tsk_inc(): increment counter for sent or received data 220 * - If block based flow control is not supported by peer we 221 * fall back to message based ditto, incrementing the counter 222 */ 223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 224 { 225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 226 return ((msglen / FLOWCTL_BLK_SZ) + 1); 227 return 1; 228 } 229 230 /** 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue 232 * 233 * Caller must hold socket lock 234 */ 235 static void tsk_advance_rx_queue(struct sock *sk) 236 { 237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " "); 238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 239 } 240 241 /* tipc_sk_respond() : send response message back to sender 242 */ 243 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 244 { 245 u32 selector; 246 u32 dnode; 247 u32 onode = tipc_own_addr(sock_net(sk)); 248 249 if (!tipc_msg_reverse(onode, &skb, err)) 250 return; 251 252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!"); 253 dnode = msg_destnode(buf_msg(skb)); 254 selector = msg_origport(buf_msg(skb)); 255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 256 } 257 258 /** 259 * tsk_rej_rx_queue - reject all buffers in socket receive queue 260 * 261 * Caller must hold socket lock 262 */ 263 static void tsk_rej_rx_queue(struct sock *sk) 264 { 265 struct sk_buff *skb; 266 267 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 268 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 269 } 270 271 static bool tipc_sk_connected(struct sock *sk) 272 { 273 return sk->sk_state == TIPC_ESTABLISHED; 274 } 275 276 /* tipc_sk_type_connectionless - check if the socket is datagram socket 277 * @sk: socket 278 * 279 * Returns true if connection less, false otherwise 280 */ 281 static bool tipc_sk_type_connectionless(struct sock *sk) 282 { 283 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 284 } 285 286 /* tsk_peer_msg - verify if message was sent by connected port's peer 287 * 288 * Handles cases where the node's network address has changed from 289 * the default of <0.0.0> to its configured setting. 290 */ 291 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 292 { 293 struct sock *sk = &tsk->sk; 294 u32 self = tipc_own_addr(sock_net(sk)); 295 u32 peer_port = tsk_peer_port(tsk); 296 u32 orig_node, peer_node; 297 298 if (unlikely(!tipc_sk_connected(sk))) 299 return false; 300 301 if (unlikely(msg_origport(msg) != peer_port)) 302 return false; 303 304 orig_node = msg_orignode(msg); 305 peer_node = tsk_peer_node(tsk); 306 307 if (likely(orig_node == peer_node)) 308 return true; 309 310 if (!orig_node && peer_node == self) 311 return true; 312 313 if (!peer_node && orig_node == self) 314 return true; 315 316 return false; 317 } 318 319 /* tipc_set_sk_state - set the sk_state of the socket 320 * @sk: socket 321 * 322 * Caller must hold socket lock 323 * 324 * Returns 0 on success, errno otherwise 325 */ 326 static int tipc_set_sk_state(struct sock *sk, int state) 327 { 328 int oldsk_state = sk->sk_state; 329 int res = -EINVAL; 330 331 switch (state) { 332 case TIPC_OPEN: 333 res = 0; 334 break; 335 case TIPC_LISTEN: 336 case TIPC_CONNECTING: 337 if (oldsk_state == TIPC_OPEN) 338 res = 0; 339 break; 340 case TIPC_ESTABLISHED: 341 if (oldsk_state == TIPC_CONNECTING || 342 oldsk_state == TIPC_OPEN) 343 res = 0; 344 break; 345 case TIPC_DISCONNECTING: 346 if (oldsk_state == TIPC_CONNECTING || 347 oldsk_state == TIPC_ESTABLISHED) 348 res = 0; 349 break; 350 } 351 352 if (!res) 353 sk->sk_state = state; 354 355 return res; 356 } 357 358 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 359 { 360 struct sock *sk = sock->sk; 361 int err = sock_error(sk); 362 int typ = sock->type; 363 364 if (err) 365 return err; 366 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 367 if (sk->sk_state == TIPC_DISCONNECTING) 368 return -EPIPE; 369 else if (!tipc_sk_connected(sk)) 370 return -ENOTCONN; 371 } 372 if (!*timeout) 373 return -EAGAIN; 374 if (signal_pending(current)) 375 return sock_intr_errno(*timeout); 376 377 return 0; 378 } 379 380 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 381 ({ \ 382 struct sock *sk_; \ 383 int rc_; \ 384 \ 385 while ((rc_ = !(condition_))) { \ 386 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 387 sk_ = (sock_)->sk; \ 388 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 389 if (rc_) \ 390 break; \ 391 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 392 release_sock(sk_); \ 393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 394 sched_annotate_sleep(); \ 395 lock_sock(sk_); \ 396 remove_wait_queue(sk_sleep(sk_), &wait_); \ 397 } \ 398 rc_; \ 399 }) 400 401 /** 402 * tipc_sk_create - create a TIPC socket 403 * @net: network namespace (must be default network) 404 * @sock: pre-allocated socket structure 405 * @protocol: protocol indicator (must be 0) 406 * @kern: caused by kernel or by userspace? 407 * 408 * This routine creates additional data structures used by the TIPC socket, 409 * initializes them, and links them together. 410 * 411 * Returns 0 on success, errno otherwise 412 */ 413 static int tipc_sk_create(struct net *net, struct socket *sock, 414 int protocol, int kern) 415 { 416 const struct proto_ops *ops; 417 struct sock *sk; 418 struct tipc_sock *tsk; 419 struct tipc_msg *msg; 420 421 /* Validate arguments */ 422 if (unlikely(protocol != 0)) 423 return -EPROTONOSUPPORT; 424 425 switch (sock->type) { 426 case SOCK_STREAM: 427 ops = &stream_ops; 428 break; 429 case SOCK_SEQPACKET: 430 ops = &packet_ops; 431 break; 432 case SOCK_DGRAM: 433 case SOCK_RDM: 434 ops = &msg_ops; 435 break; 436 default: 437 return -EPROTOTYPE; 438 } 439 440 /* Allocate socket's protocol area */ 441 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 442 if (sk == NULL) 443 return -ENOMEM; 444 445 tsk = tipc_sk(sk); 446 tsk->max_pkt = MAX_PKT_DEFAULT; 447 INIT_LIST_HEAD(&tsk->publications); 448 INIT_LIST_HEAD(&tsk->cong_links); 449 msg = &tsk->phdr; 450 451 /* Finish initializing socket data structures */ 452 sock->ops = ops; 453 sock_init_data(sock, sk); 454 tipc_set_sk_state(sk, TIPC_OPEN); 455 if (tipc_sk_insert(tsk)) { 456 pr_warn("Socket create failed; port number exhausted\n"); 457 return -EINVAL; 458 } 459 460 /* Ensure tsk is visible before we read own_addr. */ 461 smp_mb(); 462 463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, 464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0); 465 466 msg_set_origport(msg, tsk->portid); 467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 468 sk->sk_shutdown = 0; 469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 470 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 471 sk->sk_data_ready = tipc_data_ready; 472 sk->sk_write_space = tipc_write_space; 473 sk->sk_destruct = tipc_sock_destruct; 474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 475 tsk->group_is_open = true; 476 atomic_set(&tsk->dupl_rcvcnt, 0); 477 478 /* Start out with safe limits until we receive an advertised window */ 479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 480 tsk->rcv_win = tsk->snd_win; 481 482 if (tipc_sk_type_connectionless(sk)) { 483 tsk_set_unreturnable(tsk, true); 484 if (sock->type == SOCK_DGRAM) 485 tsk_set_unreliable(tsk, true); 486 } 487 488 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " "); 489 return 0; 490 } 491 492 static void tipc_sk_callback(struct rcu_head *head) 493 { 494 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 495 496 sock_put(&tsk->sk); 497 } 498 499 /* Caller should hold socket lock for the socket. */ 500 static void __tipc_shutdown(struct socket *sock, int error) 501 { 502 struct sock *sk = sock->sk; 503 struct tipc_sock *tsk = tipc_sk(sk); 504 struct net *net = sock_net(sk); 505 long timeout = CONN_TIMEOUT_DEFAULT; 506 u32 dnode = tsk_peer_node(tsk); 507 struct sk_buff *skb; 508 509 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 510 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 511 !tsk_conn_cong(tsk))); 512 513 /* Remove any pending SYN message */ 514 __skb_queue_purge(&sk->sk_write_queue); 515 516 /* Reject all unreceived messages, except on an active connection 517 * (which disconnects locally & sends a 'FIN+' to peer). 518 */ 519 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 520 if (TIPC_SKB_CB(skb)->bytes_read) { 521 kfree_skb(skb); 522 continue; 523 } 524 if (!tipc_sk_type_connectionless(sk) && 525 sk->sk_state != TIPC_DISCONNECTING) { 526 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 527 tipc_node_remove_conn(net, dnode, tsk->portid); 528 } 529 tipc_sk_respond(sk, skb, error); 530 } 531 532 if (tipc_sk_type_connectionless(sk)) 533 return; 534 535 if (sk->sk_state != TIPC_DISCONNECTING) { 536 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 537 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 538 tsk_own_node(tsk), tsk_peer_port(tsk), 539 tsk->portid, error); 540 if (skb) 541 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 542 tipc_node_remove_conn(net, dnode, tsk->portid); 543 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 544 } 545 } 546 547 /** 548 * tipc_release - destroy a TIPC socket 549 * @sock: socket to destroy 550 * 551 * This routine cleans up any messages that are still queued on the socket. 552 * For DGRAM and RDM socket types, all queued messages are rejected. 553 * For SEQPACKET and STREAM socket types, the first message is rejected 554 * and any others are discarded. (If the first message on a STREAM socket 555 * is partially-read, it is discarded and the next one is rejected instead.) 556 * 557 * NOTE: Rejected messages are not necessarily returned to the sender! They 558 * are returned or discarded according to the "destination droppable" setting 559 * specified for the message by the sender. 560 * 561 * Returns 0 on success, errno otherwise 562 */ 563 static int tipc_release(struct socket *sock) 564 { 565 struct sock *sk = sock->sk; 566 struct tipc_sock *tsk; 567 568 /* 569 * Exit if socket isn't fully initialized (occurs when a failed accept() 570 * releases a pre-allocated child socket that was never used) 571 */ 572 if (sk == NULL) 573 return 0; 574 575 tsk = tipc_sk(sk); 576 lock_sock(sk); 577 578 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " "); 579 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 580 sk->sk_shutdown = SHUTDOWN_MASK; 581 tipc_sk_leave(tsk); 582 tipc_sk_withdraw(tsk, 0, NULL); 583 sk_stop_timer(sk, &sk->sk_timer); 584 tipc_sk_remove(tsk); 585 586 sock_orphan(sk); 587 /* Reject any messages that accumulated in backlog queue */ 588 release_sock(sk); 589 tipc_dest_list_purge(&tsk->cong_links); 590 tsk->cong_link_cnt = 0; 591 call_rcu(&tsk->rcu, tipc_sk_callback); 592 sock->sk = NULL; 593 594 return 0; 595 } 596 597 /** 598 * tipc_bind - associate or disassocate TIPC name(s) with a socket 599 * @sock: socket structure 600 * @uaddr: socket address describing name(s) and desired operation 601 * @uaddr_len: size of socket address data structure 602 * 603 * Name and name sequence binding is indicated using a positive scope value; 604 * a negative scope value unbinds the specified name. Specifying no name 605 * (i.e. a socket address length of 0) unbinds all names from the socket. 606 * 607 * Returns 0 on success, errno otherwise 608 * 609 * NOTE: This routine doesn't need to take the socket lock since it doesn't 610 * access any non-constant socket information. 611 */ 612 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 613 int uaddr_len) 614 { 615 struct sock *sk = sock->sk; 616 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 617 struct tipc_sock *tsk = tipc_sk(sk); 618 int res = -EINVAL; 619 620 lock_sock(sk); 621 if (unlikely(!uaddr_len)) { 622 res = tipc_sk_withdraw(tsk, 0, NULL); 623 goto exit; 624 } 625 if (tsk->group) { 626 res = -EACCES; 627 goto exit; 628 } 629 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 630 res = -EINVAL; 631 goto exit; 632 } 633 if (addr->family != AF_TIPC) { 634 res = -EAFNOSUPPORT; 635 goto exit; 636 } 637 638 if (addr->addrtype == TIPC_ADDR_NAME) 639 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 640 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 641 res = -EAFNOSUPPORT; 642 goto exit; 643 } 644 645 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 646 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 647 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 648 res = -EACCES; 649 goto exit; 650 } 651 652 res = (addr->scope >= 0) ? 653 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 654 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 655 exit: 656 release_sock(sk); 657 return res; 658 } 659 660 /** 661 * tipc_getname - get port ID of socket or peer socket 662 * @sock: socket structure 663 * @uaddr: area for returned socket address 664 * @uaddr_len: area for returned length of socket address 665 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 666 * 667 * Returns 0 on success, errno otherwise 668 * 669 * NOTE: This routine doesn't need to take the socket lock since it only 670 * accesses socket information that is unchanging (or which changes in 671 * a completely predictable manner). 672 */ 673 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 674 int peer) 675 { 676 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 677 struct sock *sk = sock->sk; 678 struct tipc_sock *tsk = tipc_sk(sk); 679 680 memset(addr, 0, sizeof(*addr)); 681 if (peer) { 682 if ((!tipc_sk_connected(sk)) && 683 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 684 return -ENOTCONN; 685 addr->addr.id.ref = tsk_peer_port(tsk); 686 addr->addr.id.node = tsk_peer_node(tsk); 687 } else { 688 addr->addr.id.ref = tsk->portid; 689 addr->addr.id.node = tipc_own_addr(sock_net(sk)); 690 } 691 692 addr->addrtype = TIPC_ADDR_ID; 693 addr->family = AF_TIPC; 694 addr->scope = 0; 695 addr->addr.name.domain = 0; 696 697 return sizeof(*addr); 698 } 699 700 /** 701 * tipc_poll - read and possibly block on pollmask 702 * @file: file structure associated with the socket 703 * @sock: socket for which to calculate the poll bits 704 * @wait: ??? 705 * 706 * Returns pollmask value 707 * 708 * COMMENTARY: 709 * It appears that the usual socket locking mechanisms are not useful here 710 * since the pollmask info is potentially out-of-date the moment this routine 711 * exits. TCP and other protocols seem to rely on higher level poll routines 712 * to handle any preventable race conditions, so TIPC will do the same ... 713 * 714 * IMPORTANT: The fact that a read or write operation is indicated does NOT 715 * imply that the operation will succeed, merely that it should be performed 716 * and will not block. 717 */ 718 static __poll_t tipc_poll(struct file *file, struct socket *sock, 719 poll_table *wait) 720 { 721 struct sock *sk = sock->sk; 722 struct tipc_sock *tsk = tipc_sk(sk); 723 __poll_t revents = 0; 724 725 sock_poll_wait(file, sock, wait); 726 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " "); 727 728 if (sk->sk_shutdown & RCV_SHUTDOWN) 729 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 730 if (sk->sk_shutdown == SHUTDOWN_MASK) 731 revents |= EPOLLHUP; 732 733 switch (sk->sk_state) { 734 case TIPC_ESTABLISHED: 735 case TIPC_CONNECTING: 736 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 737 revents |= EPOLLOUT; 738 /* fall thru' */ 739 case TIPC_LISTEN: 740 if (!skb_queue_empty(&sk->sk_receive_queue)) 741 revents |= EPOLLIN | EPOLLRDNORM; 742 break; 743 case TIPC_OPEN: 744 if (tsk->group_is_open && !tsk->cong_link_cnt) 745 revents |= EPOLLOUT; 746 if (!tipc_sk_type_connectionless(sk)) 747 break; 748 if (skb_queue_empty(&sk->sk_receive_queue)) 749 break; 750 revents |= EPOLLIN | EPOLLRDNORM; 751 break; 752 case TIPC_DISCONNECTING: 753 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 754 break; 755 } 756 return revents; 757 } 758 759 /** 760 * tipc_sendmcast - send multicast message 761 * @sock: socket structure 762 * @seq: destination address 763 * @msg: message to send 764 * @dlen: length of data to send 765 * @timeout: timeout to wait for wakeup 766 * 767 * Called from function tipc_sendmsg(), which has done all sanity checks 768 * Returns the number of bytes sent on success, or errno 769 */ 770 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 771 struct msghdr *msg, size_t dlen, long timeout) 772 { 773 struct sock *sk = sock->sk; 774 struct tipc_sock *tsk = tipc_sk(sk); 775 struct tipc_msg *hdr = &tsk->phdr; 776 struct net *net = sock_net(sk); 777 int mtu = tipc_bcast_get_mtu(net); 778 struct tipc_mc_method *method = &tsk->mc_method; 779 struct sk_buff_head pkts; 780 struct tipc_nlist dsts; 781 int rc; 782 783 if (tsk->group) 784 return -EACCES; 785 786 /* Block or return if any destination link is congested */ 787 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 788 if (unlikely(rc)) 789 return rc; 790 791 /* Lookup destination nodes */ 792 tipc_nlist_init(&dsts, tipc_own_addr(net)); 793 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 794 seq->upper, &dsts); 795 if (!dsts.local && !dsts.remote) 796 return -EHOSTUNREACH; 797 798 /* Build message header */ 799 msg_set_type(hdr, TIPC_MCAST_MSG); 800 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 801 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 802 msg_set_destport(hdr, 0); 803 msg_set_destnode(hdr, 0); 804 msg_set_nametype(hdr, seq->type); 805 msg_set_namelower(hdr, seq->lower); 806 msg_set_nameupper(hdr, seq->upper); 807 808 /* Build message as chain of buffers */ 809 skb_queue_head_init(&pkts); 810 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 811 812 /* Send message if build was successful */ 813 if (unlikely(rc == dlen)) { 814 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts), 815 TIPC_DUMP_SK_SNDQ, " "); 816 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 817 &tsk->cong_link_cnt); 818 } 819 820 tipc_nlist_purge(&dsts); 821 822 return rc ? rc : dlen; 823 } 824 825 /** 826 * tipc_send_group_msg - send a message to a member in the group 827 * @net: network namespace 828 * @m: message to send 829 * @mb: group member 830 * @dnode: destination node 831 * @dport: destination port 832 * @dlen: total length of message data 833 */ 834 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 835 struct msghdr *m, struct tipc_member *mb, 836 u32 dnode, u32 dport, int dlen) 837 { 838 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 839 struct tipc_mc_method *method = &tsk->mc_method; 840 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 841 struct tipc_msg *hdr = &tsk->phdr; 842 struct sk_buff_head pkts; 843 int mtu, rc; 844 845 /* Complete message header */ 846 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 847 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 848 msg_set_destport(hdr, dport); 849 msg_set_destnode(hdr, dnode); 850 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 851 852 /* Build message as chain of buffers */ 853 skb_queue_head_init(&pkts); 854 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 855 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 856 if (unlikely(rc != dlen)) 857 return rc; 858 859 /* Send message */ 860 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 861 if (unlikely(rc == -ELINKCONG)) { 862 tipc_dest_push(&tsk->cong_links, dnode, 0); 863 tsk->cong_link_cnt++; 864 } 865 866 /* Update send window */ 867 tipc_group_update_member(mb, blks); 868 869 /* A broadcast sent within next EXPIRE period must follow same path */ 870 method->rcast = true; 871 method->mandatory = true; 872 return dlen; 873 } 874 875 /** 876 * tipc_send_group_unicast - send message to a member in the group 877 * @sock: socket structure 878 * @m: message to send 879 * @dlen: total length of message data 880 * @timeout: timeout to wait for wakeup 881 * 882 * Called from function tipc_sendmsg(), which has done all sanity checks 883 * Returns the number of bytes sent on success, or errno 884 */ 885 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 886 int dlen, long timeout) 887 { 888 struct sock *sk = sock->sk; 889 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 890 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 891 struct tipc_sock *tsk = tipc_sk(sk); 892 struct net *net = sock_net(sk); 893 struct tipc_member *mb = NULL; 894 u32 node, port; 895 int rc; 896 897 node = dest->addr.id.node; 898 port = dest->addr.id.ref; 899 if (!port && !node) 900 return -EHOSTUNREACH; 901 902 /* Block or return if destination link or member is congested */ 903 rc = tipc_wait_for_cond(sock, &timeout, 904 !tipc_dest_find(&tsk->cong_links, node, 0) && 905 tsk->group && 906 !tipc_group_cong(tsk->group, node, port, blks, 907 &mb)); 908 if (unlikely(rc)) 909 return rc; 910 911 if (unlikely(!mb)) 912 return -EHOSTUNREACH; 913 914 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 915 916 return rc ? rc : dlen; 917 } 918 919 /** 920 * tipc_send_group_anycast - send message to any member with given identity 921 * @sock: socket structure 922 * @m: message to send 923 * @dlen: total length of message data 924 * @timeout: timeout to wait for wakeup 925 * 926 * Called from function tipc_sendmsg(), which has done all sanity checks 927 * Returns the number of bytes sent on success, or errno 928 */ 929 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 930 int dlen, long timeout) 931 { 932 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 933 struct sock *sk = sock->sk; 934 struct tipc_sock *tsk = tipc_sk(sk); 935 struct list_head *cong_links = &tsk->cong_links; 936 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 937 struct tipc_msg *hdr = &tsk->phdr; 938 struct tipc_member *first = NULL; 939 struct tipc_member *mbr = NULL; 940 struct net *net = sock_net(sk); 941 u32 node, port, exclude; 942 struct list_head dsts; 943 u32 type, inst, scope; 944 int lookups = 0; 945 int dstcnt, rc; 946 bool cong; 947 948 INIT_LIST_HEAD(&dsts); 949 950 type = msg_nametype(hdr); 951 inst = dest->addr.name.name.instance; 952 scope = msg_lookup_scope(hdr); 953 954 while (++lookups < 4) { 955 exclude = tipc_group_exclude(tsk->group); 956 957 first = NULL; 958 959 /* Look for a non-congested destination member, if any */ 960 while (1) { 961 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 962 &dstcnt, exclude, false)) 963 return -EHOSTUNREACH; 964 tipc_dest_pop(&dsts, &node, &port); 965 cong = tipc_group_cong(tsk->group, node, port, blks, 966 &mbr); 967 if (!cong) 968 break; 969 if (mbr == first) 970 break; 971 if (!first) 972 first = mbr; 973 } 974 975 /* Start over if destination was not in member list */ 976 if (unlikely(!mbr)) 977 continue; 978 979 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 980 break; 981 982 /* Block or return if destination link or member is congested */ 983 rc = tipc_wait_for_cond(sock, &timeout, 984 !tipc_dest_find(cong_links, node, 0) && 985 tsk->group && 986 !tipc_group_cong(tsk->group, node, port, 987 blks, &mbr)); 988 if (unlikely(rc)) 989 return rc; 990 991 /* Send, unless destination disappeared while waiting */ 992 if (likely(mbr)) 993 break; 994 } 995 996 if (unlikely(lookups >= 4)) 997 return -EHOSTUNREACH; 998 999 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 1000 1001 return rc ? rc : dlen; 1002 } 1003 1004 /** 1005 * tipc_send_group_bcast - send message to all members in communication group 1006 * @sk: socket structure 1007 * @m: message to send 1008 * @dlen: total length of message data 1009 * @timeout: timeout to wait for wakeup 1010 * 1011 * Called from function tipc_sendmsg(), which has done all sanity checks 1012 * Returns the number of bytes sent on success, or errno 1013 */ 1014 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 1015 int dlen, long timeout) 1016 { 1017 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1018 struct sock *sk = sock->sk; 1019 struct net *net = sock_net(sk); 1020 struct tipc_sock *tsk = tipc_sk(sk); 1021 struct tipc_nlist *dsts; 1022 struct tipc_mc_method *method = &tsk->mc_method; 1023 bool ack = method->mandatory && method->rcast; 1024 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1025 struct tipc_msg *hdr = &tsk->phdr; 1026 int mtu = tipc_bcast_get_mtu(net); 1027 struct sk_buff_head pkts; 1028 int rc = -EHOSTUNREACH; 1029 1030 /* Block or return if any destination link or member is congested */ 1031 rc = tipc_wait_for_cond(sock, &timeout, 1032 !tsk->cong_link_cnt && tsk->group && 1033 !tipc_group_bc_cong(tsk->group, blks)); 1034 if (unlikely(rc)) 1035 return rc; 1036 1037 dsts = tipc_group_dests(tsk->group); 1038 if (!dsts->local && !dsts->remote) 1039 return -EHOSTUNREACH; 1040 1041 /* Complete message header */ 1042 if (dest) { 1043 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1044 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1045 } else { 1046 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1047 msg_set_nameinst(hdr, 0); 1048 } 1049 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1050 msg_set_destport(hdr, 0); 1051 msg_set_destnode(hdr, 0); 1052 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group)); 1053 1054 /* Avoid getting stuck with repeated forced replicasts */ 1055 msg_set_grp_bc_ack_req(hdr, ack); 1056 1057 /* Build message as chain of buffers */ 1058 skb_queue_head_init(&pkts); 1059 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1060 if (unlikely(rc != dlen)) 1061 return rc; 1062 1063 /* Send message */ 1064 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1065 if (unlikely(rc)) 1066 return rc; 1067 1068 /* Update broadcast sequence number and send windows */ 1069 tipc_group_update_bc_members(tsk->group, blks, ack); 1070 1071 /* Broadcast link is now free to choose method for next broadcast */ 1072 method->mandatory = false; 1073 method->expires = jiffies; 1074 1075 return dlen; 1076 } 1077 1078 /** 1079 * tipc_send_group_mcast - send message to all members with given identity 1080 * @sock: socket structure 1081 * @m: message to send 1082 * @dlen: total length of message data 1083 * @timeout: timeout to wait for wakeup 1084 * 1085 * Called from function tipc_sendmsg(), which has done all sanity checks 1086 * Returns the number of bytes sent on success, or errno 1087 */ 1088 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1089 int dlen, long timeout) 1090 { 1091 struct sock *sk = sock->sk; 1092 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1093 struct tipc_sock *tsk = tipc_sk(sk); 1094 struct tipc_group *grp = tsk->group; 1095 struct tipc_msg *hdr = &tsk->phdr; 1096 struct net *net = sock_net(sk); 1097 u32 type, inst, scope, exclude; 1098 struct list_head dsts; 1099 u32 dstcnt; 1100 1101 INIT_LIST_HEAD(&dsts); 1102 1103 type = msg_nametype(hdr); 1104 inst = dest->addr.name.name.instance; 1105 scope = msg_lookup_scope(hdr); 1106 exclude = tipc_group_exclude(grp); 1107 1108 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1109 &dstcnt, exclude, true)) 1110 return -EHOSTUNREACH; 1111 1112 if (dstcnt == 1) { 1113 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1114 return tipc_send_group_unicast(sock, m, dlen, timeout); 1115 } 1116 1117 tipc_dest_list_purge(&dsts); 1118 return tipc_send_group_bcast(sock, m, dlen, timeout); 1119 } 1120 1121 /** 1122 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1123 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1124 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1125 * 1126 * Multi-threaded: parallel calls with reference to same queues may occur 1127 */ 1128 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1129 struct sk_buff_head *inputq) 1130 { 1131 u32 self = tipc_own_addr(net); 1132 u32 type, lower, upper, scope; 1133 struct sk_buff *skb, *_skb; 1134 u32 portid, onode; 1135 struct sk_buff_head tmpq; 1136 struct list_head dports; 1137 struct tipc_msg *hdr; 1138 int user, mtyp, hlen; 1139 bool exact; 1140 1141 __skb_queue_head_init(&tmpq); 1142 INIT_LIST_HEAD(&dports); 1143 1144 skb = tipc_skb_peek(arrvq, &inputq->lock); 1145 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1146 hdr = buf_msg(skb); 1147 user = msg_user(hdr); 1148 mtyp = msg_type(hdr); 1149 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1150 onode = msg_orignode(hdr); 1151 type = msg_nametype(hdr); 1152 1153 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1154 spin_lock_bh(&inputq->lock); 1155 if (skb_peek(arrvq) == skb) { 1156 __skb_dequeue(arrvq); 1157 __skb_queue_tail(inputq, skb); 1158 } 1159 kfree_skb(skb); 1160 spin_unlock_bh(&inputq->lock); 1161 continue; 1162 } 1163 1164 /* Group messages require exact scope match */ 1165 if (msg_in_group(hdr)) { 1166 lower = 0; 1167 upper = ~0; 1168 scope = msg_lookup_scope(hdr); 1169 exact = true; 1170 } else { 1171 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1172 if (onode == self) 1173 scope = TIPC_NODE_SCOPE; 1174 else 1175 scope = TIPC_CLUSTER_SCOPE; 1176 exact = false; 1177 lower = msg_namelower(hdr); 1178 upper = msg_nameupper(hdr); 1179 } 1180 1181 /* Create destination port list: */ 1182 tipc_nametbl_mc_lookup(net, type, lower, upper, 1183 scope, exact, &dports); 1184 1185 /* Clone message per destination */ 1186 while (tipc_dest_pop(&dports, NULL, &portid)) { 1187 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1188 if (_skb) { 1189 msg_set_destport(buf_msg(_skb), portid); 1190 __skb_queue_tail(&tmpq, _skb); 1191 continue; 1192 } 1193 pr_warn("Failed to clone mcast rcv buffer\n"); 1194 } 1195 /* Append to inputq if not already done by other thread */ 1196 spin_lock_bh(&inputq->lock); 1197 if (skb_peek(arrvq) == skb) { 1198 skb_queue_splice_tail_init(&tmpq, inputq); 1199 kfree_skb(__skb_dequeue(arrvq)); 1200 } 1201 spin_unlock_bh(&inputq->lock); 1202 __skb_queue_purge(&tmpq); 1203 kfree_skb(skb); 1204 } 1205 tipc_sk_rcv(net, inputq); 1206 } 1207 1208 /** 1209 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1210 * @tsk: receiving socket 1211 * @skb: pointer to message buffer. 1212 */ 1213 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1214 struct sk_buff_head *inputq, 1215 struct sk_buff_head *xmitq) 1216 { 1217 struct tipc_msg *hdr = buf_msg(skb); 1218 u32 onode = tsk_own_node(tsk); 1219 struct sock *sk = &tsk->sk; 1220 int mtyp = msg_type(hdr); 1221 bool conn_cong; 1222 1223 /* Ignore if connection cannot be validated: */ 1224 if (!tsk_peer_msg(tsk, hdr)) { 1225 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!"); 1226 goto exit; 1227 } 1228 1229 if (unlikely(msg_errcode(hdr))) { 1230 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1231 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1232 tsk_peer_port(tsk)); 1233 sk->sk_state_change(sk); 1234 1235 /* State change is ignored if socket already awake, 1236 * - convert msg to abort msg and add to inqueue 1237 */ 1238 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE); 1239 msg_set_type(hdr, TIPC_CONN_MSG); 1240 msg_set_size(hdr, BASIC_H_SIZE); 1241 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1242 __skb_queue_tail(inputq, skb); 1243 return; 1244 } 1245 1246 tsk->probe_unacked = false; 1247 1248 if (mtyp == CONN_PROBE) { 1249 msg_set_type(hdr, CONN_PROBE_REPLY); 1250 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1251 __skb_queue_tail(xmitq, skb); 1252 return; 1253 } else if (mtyp == CONN_ACK) { 1254 conn_cong = tsk_conn_cong(tsk); 1255 tsk->snt_unacked -= msg_conn_ack(hdr); 1256 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1257 tsk->snd_win = msg_adv_win(hdr); 1258 if (conn_cong) 1259 sk->sk_write_space(sk); 1260 } else if (mtyp != CONN_PROBE_REPLY) { 1261 pr_warn("Received unknown CONN_PROTO msg\n"); 1262 } 1263 exit: 1264 kfree_skb(skb); 1265 } 1266 1267 /** 1268 * tipc_sendmsg - send message in connectionless manner 1269 * @sock: socket structure 1270 * @m: message to send 1271 * @dsz: amount of user data to be sent 1272 * 1273 * Message must have an destination specified explicitly. 1274 * Used for SOCK_RDM and SOCK_DGRAM messages, 1275 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1276 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1277 * 1278 * Returns the number of bytes sent on success, or errno otherwise 1279 */ 1280 static int tipc_sendmsg(struct socket *sock, 1281 struct msghdr *m, size_t dsz) 1282 { 1283 struct sock *sk = sock->sk; 1284 int ret; 1285 1286 lock_sock(sk); 1287 ret = __tipc_sendmsg(sock, m, dsz); 1288 release_sock(sk); 1289 1290 return ret; 1291 } 1292 1293 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1294 { 1295 struct sock *sk = sock->sk; 1296 struct net *net = sock_net(sk); 1297 struct tipc_sock *tsk = tipc_sk(sk); 1298 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1299 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1300 struct list_head *clinks = &tsk->cong_links; 1301 bool syn = !tipc_sk_type_connectionless(sk); 1302 struct tipc_group *grp = tsk->group; 1303 struct tipc_msg *hdr = &tsk->phdr; 1304 struct tipc_name_seq *seq; 1305 struct sk_buff_head pkts; 1306 u32 dport, dnode = 0; 1307 u32 type, inst; 1308 int mtu, rc; 1309 1310 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1311 return -EMSGSIZE; 1312 1313 if (likely(dest)) { 1314 if (unlikely(m->msg_namelen < sizeof(*dest))) 1315 return -EINVAL; 1316 if (unlikely(dest->family != AF_TIPC)) 1317 return -EINVAL; 1318 } 1319 1320 if (grp) { 1321 if (!dest) 1322 return tipc_send_group_bcast(sock, m, dlen, timeout); 1323 if (dest->addrtype == TIPC_ADDR_NAME) 1324 return tipc_send_group_anycast(sock, m, dlen, timeout); 1325 if (dest->addrtype == TIPC_ADDR_ID) 1326 return tipc_send_group_unicast(sock, m, dlen, timeout); 1327 if (dest->addrtype == TIPC_ADDR_MCAST) 1328 return tipc_send_group_mcast(sock, m, dlen, timeout); 1329 return -EINVAL; 1330 } 1331 1332 if (unlikely(!dest)) { 1333 dest = &tsk->peer; 1334 if (!syn || dest->family != AF_TIPC) 1335 return -EDESTADDRREQ; 1336 } 1337 1338 if (unlikely(syn)) { 1339 if (sk->sk_state == TIPC_LISTEN) 1340 return -EPIPE; 1341 if (sk->sk_state != TIPC_OPEN) 1342 return -EISCONN; 1343 if (tsk->published) 1344 return -EOPNOTSUPP; 1345 if (dest->addrtype == TIPC_ADDR_NAME) { 1346 tsk->conn_type = dest->addr.name.name.type; 1347 tsk->conn_instance = dest->addr.name.name.instance; 1348 } 1349 msg_set_syn(hdr, 1); 1350 } 1351 1352 seq = &dest->addr.nameseq; 1353 if (dest->addrtype == TIPC_ADDR_MCAST) 1354 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1355 1356 if (dest->addrtype == TIPC_ADDR_NAME) { 1357 type = dest->addr.name.name.type; 1358 inst = dest->addr.name.name.instance; 1359 dnode = dest->addr.name.domain; 1360 msg_set_type(hdr, TIPC_NAMED_MSG); 1361 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1362 msg_set_nametype(hdr, type); 1363 msg_set_nameinst(hdr, inst); 1364 msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); 1365 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1366 msg_set_destnode(hdr, dnode); 1367 msg_set_destport(hdr, dport); 1368 if (unlikely(!dport && !dnode)) 1369 return -EHOSTUNREACH; 1370 } else if (dest->addrtype == TIPC_ADDR_ID) { 1371 dnode = dest->addr.id.node; 1372 msg_set_type(hdr, TIPC_DIRECT_MSG); 1373 msg_set_lookup_scope(hdr, 0); 1374 msg_set_destnode(hdr, dnode); 1375 msg_set_destport(hdr, dest->addr.id.ref); 1376 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1377 } else { 1378 return -EINVAL; 1379 } 1380 1381 /* Block or return if destination link is congested */ 1382 rc = tipc_wait_for_cond(sock, &timeout, 1383 !tipc_dest_find(clinks, dnode, 0)); 1384 if (unlikely(rc)) 1385 return rc; 1386 1387 skb_queue_head_init(&pkts); 1388 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1389 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1390 if (unlikely(rc != dlen)) 1391 return rc; 1392 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) 1393 return -ENOMEM; 1394 1395 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " "); 1396 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1397 if (unlikely(rc == -ELINKCONG)) { 1398 tipc_dest_push(clinks, dnode, 0); 1399 tsk->cong_link_cnt++; 1400 rc = 0; 1401 } 1402 1403 if (unlikely(syn && !rc)) 1404 tipc_set_sk_state(sk, TIPC_CONNECTING); 1405 1406 return rc ? rc : dlen; 1407 } 1408 1409 /** 1410 * tipc_sendstream - send stream-oriented data 1411 * @sock: socket structure 1412 * @m: data to send 1413 * @dsz: total length of data to be transmitted 1414 * 1415 * Used for SOCK_STREAM data. 1416 * 1417 * Returns the number of bytes sent on success (or partial success), 1418 * or errno if no data sent 1419 */ 1420 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1421 { 1422 struct sock *sk = sock->sk; 1423 int ret; 1424 1425 lock_sock(sk); 1426 ret = __tipc_sendstream(sock, m, dsz); 1427 release_sock(sk); 1428 1429 return ret; 1430 } 1431 1432 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1433 { 1434 struct sock *sk = sock->sk; 1435 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1436 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1437 struct tipc_sock *tsk = tipc_sk(sk); 1438 struct tipc_msg *hdr = &tsk->phdr; 1439 struct net *net = sock_net(sk); 1440 struct sk_buff_head pkts; 1441 u32 dnode = tsk_peer_node(tsk); 1442 int send, sent = 0; 1443 int rc = 0; 1444 1445 skb_queue_head_init(&pkts); 1446 1447 if (unlikely(dlen > INT_MAX)) 1448 return -EMSGSIZE; 1449 1450 /* Handle implicit connection setup */ 1451 if (unlikely(dest)) { 1452 rc = __tipc_sendmsg(sock, m, dlen); 1453 if (dlen && dlen == rc) { 1454 tsk->peer_caps = tipc_node_get_capabilities(net, dnode); 1455 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1456 } 1457 return rc; 1458 } 1459 1460 do { 1461 rc = tipc_wait_for_cond(sock, &timeout, 1462 (!tsk->cong_link_cnt && 1463 !tsk_conn_cong(tsk) && 1464 tipc_sk_connected(sk))); 1465 if (unlikely(rc)) 1466 break; 1467 1468 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1469 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1470 if (unlikely(rc != send)) 1471 break; 1472 1473 trace_tipc_sk_sendstream(sk, skb_peek(&pkts), 1474 TIPC_DUMP_SK_SNDQ, " "); 1475 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1476 if (unlikely(rc == -ELINKCONG)) { 1477 tsk->cong_link_cnt = 1; 1478 rc = 0; 1479 } 1480 if (likely(!rc)) { 1481 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1482 sent += send; 1483 } 1484 } while (sent < dlen && !rc); 1485 1486 return sent ? sent : rc; 1487 } 1488 1489 /** 1490 * tipc_send_packet - send a connection-oriented message 1491 * @sock: socket structure 1492 * @m: message to send 1493 * @dsz: length of data to be transmitted 1494 * 1495 * Used for SOCK_SEQPACKET messages. 1496 * 1497 * Returns the number of bytes sent on success, or errno otherwise 1498 */ 1499 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1500 { 1501 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1502 return -EMSGSIZE; 1503 1504 return tipc_sendstream(sock, m, dsz); 1505 } 1506 1507 /* tipc_sk_finish_conn - complete the setup of a connection 1508 */ 1509 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1510 u32 peer_node) 1511 { 1512 struct sock *sk = &tsk->sk; 1513 struct net *net = sock_net(sk); 1514 struct tipc_msg *msg = &tsk->phdr; 1515 1516 msg_set_syn(msg, 0); 1517 msg_set_destnode(msg, peer_node); 1518 msg_set_destport(msg, peer_port); 1519 msg_set_type(msg, TIPC_CONN_MSG); 1520 msg_set_lookup_scope(msg, 0); 1521 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1522 1523 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1524 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1525 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1526 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1527 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1528 __skb_queue_purge(&sk->sk_write_queue); 1529 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1530 return; 1531 1532 /* Fall back to message based flow control */ 1533 tsk->rcv_win = FLOWCTL_MSG_WIN; 1534 tsk->snd_win = FLOWCTL_MSG_WIN; 1535 } 1536 1537 /** 1538 * tipc_sk_set_orig_addr - capture sender's address for received message 1539 * @m: descriptor for message info 1540 * @hdr: received message header 1541 * 1542 * Note: Address is not captured if not requested by receiver. 1543 */ 1544 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1545 { 1546 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1547 struct tipc_msg *hdr = buf_msg(skb); 1548 1549 if (!srcaddr) 1550 return; 1551 1552 srcaddr->sock.family = AF_TIPC; 1553 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1554 srcaddr->sock.scope = 0; 1555 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1556 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1557 srcaddr->sock.addr.name.domain = 0; 1558 m->msg_namelen = sizeof(struct sockaddr_tipc); 1559 1560 if (!msg_in_group(hdr)) 1561 return; 1562 1563 /* Group message users may also want to know sending member's id */ 1564 srcaddr->member.family = AF_TIPC; 1565 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1566 srcaddr->member.scope = 0; 1567 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1568 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1569 srcaddr->member.addr.name.domain = 0; 1570 m->msg_namelen = sizeof(*srcaddr); 1571 } 1572 1573 /** 1574 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1575 * @m: descriptor for message info 1576 * @skb: received message buffer 1577 * @tsk: TIPC port associated with message 1578 * 1579 * Note: Ancillary data is not captured if not requested by receiver. 1580 * 1581 * Returns 0 if successful, otherwise errno 1582 */ 1583 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb, 1584 struct tipc_sock *tsk) 1585 { 1586 struct tipc_msg *msg; 1587 u32 anc_data[3]; 1588 u32 err; 1589 u32 dest_type; 1590 int has_name; 1591 int res; 1592 1593 if (likely(m->msg_controllen == 0)) 1594 return 0; 1595 msg = buf_msg(skb); 1596 1597 /* Optionally capture errored message object(s) */ 1598 err = msg ? msg_errcode(msg) : 0; 1599 if (unlikely(err)) { 1600 anc_data[0] = err; 1601 anc_data[1] = msg_data_sz(msg); 1602 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1603 if (res) 1604 return res; 1605 if (anc_data[1]) { 1606 if (skb_linearize(skb)) 1607 return -ENOMEM; 1608 msg = buf_msg(skb); 1609 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1610 msg_data(msg)); 1611 if (res) 1612 return res; 1613 } 1614 } 1615 1616 /* Optionally capture message destination object */ 1617 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1618 switch (dest_type) { 1619 case TIPC_NAMED_MSG: 1620 has_name = 1; 1621 anc_data[0] = msg_nametype(msg); 1622 anc_data[1] = msg_namelower(msg); 1623 anc_data[2] = msg_namelower(msg); 1624 break; 1625 case TIPC_MCAST_MSG: 1626 has_name = 1; 1627 anc_data[0] = msg_nametype(msg); 1628 anc_data[1] = msg_namelower(msg); 1629 anc_data[2] = msg_nameupper(msg); 1630 break; 1631 case TIPC_CONN_MSG: 1632 has_name = (tsk->conn_type != 0); 1633 anc_data[0] = tsk->conn_type; 1634 anc_data[1] = tsk->conn_instance; 1635 anc_data[2] = tsk->conn_instance; 1636 break; 1637 default: 1638 has_name = 0; 1639 } 1640 if (has_name) { 1641 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1642 if (res) 1643 return res; 1644 } 1645 1646 return 0; 1647 } 1648 1649 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1650 { 1651 struct sock *sk = &tsk->sk; 1652 struct net *net = sock_net(sk); 1653 struct sk_buff *skb = NULL; 1654 struct tipc_msg *msg; 1655 u32 peer_port = tsk_peer_port(tsk); 1656 u32 dnode = tsk_peer_node(tsk); 1657 1658 if (!tipc_sk_connected(sk)) 1659 return; 1660 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1661 dnode, tsk_own_node(tsk), peer_port, 1662 tsk->portid, TIPC_OK); 1663 if (!skb) 1664 return; 1665 msg = buf_msg(skb); 1666 msg_set_conn_ack(msg, tsk->rcv_unacked); 1667 tsk->rcv_unacked = 0; 1668 1669 /* Adjust to and advertize the correct window limit */ 1670 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1671 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1672 msg_set_adv_win(msg, tsk->rcv_win); 1673 } 1674 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1675 } 1676 1677 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1678 { 1679 struct sock *sk = sock->sk; 1680 DEFINE_WAIT(wait); 1681 long timeo = *timeop; 1682 int err = sock_error(sk); 1683 1684 if (err) 1685 return err; 1686 1687 for (;;) { 1688 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1689 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1690 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1691 err = -ENOTCONN; 1692 break; 1693 } 1694 release_sock(sk); 1695 timeo = schedule_timeout(timeo); 1696 lock_sock(sk); 1697 } 1698 err = 0; 1699 if (!skb_queue_empty(&sk->sk_receive_queue)) 1700 break; 1701 err = -EAGAIN; 1702 if (!timeo) 1703 break; 1704 err = sock_intr_errno(timeo); 1705 if (signal_pending(current)) 1706 break; 1707 1708 err = sock_error(sk); 1709 if (err) 1710 break; 1711 } 1712 finish_wait(sk_sleep(sk), &wait); 1713 *timeop = timeo; 1714 return err; 1715 } 1716 1717 /** 1718 * tipc_recvmsg - receive packet-oriented message 1719 * @m: descriptor for message info 1720 * @buflen: length of user buffer area 1721 * @flags: receive flags 1722 * 1723 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1724 * If the complete message doesn't fit in user area, truncate it. 1725 * 1726 * Returns size of returned message data, errno otherwise 1727 */ 1728 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1729 size_t buflen, int flags) 1730 { 1731 struct sock *sk = sock->sk; 1732 bool connected = !tipc_sk_type_connectionless(sk); 1733 struct tipc_sock *tsk = tipc_sk(sk); 1734 int rc, err, hlen, dlen, copy; 1735 struct sk_buff_head xmitq; 1736 struct tipc_msg *hdr; 1737 struct sk_buff *skb; 1738 bool grp_evt; 1739 long timeout; 1740 1741 /* Catch invalid receive requests */ 1742 if (unlikely(!buflen)) 1743 return -EINVAL; 1744 1745 lock_sock(sk); 1746 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1747 rc = -ENOTCONN; 1748 goto exit; 1749 } 1750 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1751 1752 /* Step rcv queue to first msg with data or error; wait if necessary */ 1753 do { 1754 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1755 if (unlikely(rc)) 1756 goto exit; 1757 skb = skb_peek(&sk->sk_receive_queue); 1758 hdr = buf_msg(skb); 1759 dlen = msg_data_sz(hdr); 1760 hlen = msg_hdr_sz(hdr); 1761 err = msg_errcode(hdr); 1762 grp_evt = msg_is_grp_evt(hdr); 1763 if (likely(dlen || err)) 1764 break; 1765 tsk_advance_rx_queue(sk); 1766 } while (1); 1767 1768 /* Collect msg meta data, including error code and rejected data */ 1769 tipc_sk_set_orig_addr(m, skb); 1770 rc = tipc_sk_anc_data_recv(m, skb, tsk); 1771 if (unlikely(rc)) 1772 goto exit; 1773 hdr = buf_msg(skb); 1774 1775 /* Capture data if non-error msg, otherwise just set return value */ 1776 if (likely(!err)) { 1777 copy = min_t(int, dlen, buflen); 1778 if (unlikely(copy != dlen)) 1779 m->msg_flags |= MSG_TRUNC; 1780 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1781 } else { 1782 copy = 0; 1783 rc = 0; 1784 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1785 rc = -ECONNRESET; 1786 } 1787 if (unlikely(rc)) 1788 goto exit; 1789 1790 /* Mark message as group event if applicable */ 1791 if (unlikely(grp_evt)) { 1792 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1793 m->msg_flags |= MSG_EOR; 1794 m->msg_flags |= MSG_OOB; 1795 copy = 0; 1796 } 1797 1798 /* Caption of data or error code/rejected data was successful */ 1799 if (unlikely(flags & MSG_PEEK)) 1800 goto exit; 1801 1802 /* Send group flow control advertisement when applicable */ 1803 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1804 skb_queue_head_init(&xmitq); 1805 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1806 msg_orignode(hdr), msg_origport(hdr), 1807 &xmitq); 1808 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1809 } 1810 1811 tsk_advance_rx_queue(sk); 1812 1813 if (likely(!connected)) 1814 goto exit; 1815 1816 /* Send connection flow control advertisement when applicable */ 1817 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1818 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1819 tipc_sk_send_ack(tsk); 1820 exit: 1821 release_sock(sk); 1822 return rc ? rc : copy; 1823 } 1824 1825 /** 1826 * tipc_recvstream - receive stream-oriented data 1827 * @m: descriptor for message info 1828 * @buflen: total size of user buffer area 1829 * @flags: receive flags 1830 * 1831 * Used for SOCK_STREAM messages only. If not enough data is available 1832 * will optionally wait for more; never truncates data. 1833 * 1834 * Returns size of returned message data, errno otherwise 1835 */ 1836 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1837 size_t buflen, int flags) 1838 { 1839 struct sock *sk = sock->sk; 1840 struct tipc_sock *tsk = tipc_sk(sk); 1841 struct sk_buff *skb; 1842 struct tipc_msg *hdr; 1843 struct tipc_skb_cb *skb_cb; 1844 bool peek = flags & MSG_PEEK; 1845 int offset, required, copy, copied = 0; 1846 int hlen, dlen, err, rc; 1847 long timeout; 1848 1849 /* Catch invalid receive attempts */ 1850 if (unlikely(!buflen)) 1851 return -EINVAL; 1852 1853 lock_sock(sk); 1854 1855 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1856 rc = -ENOTCONN; 1857 goto exit; 1858 } 1859 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1860 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1861 1862 do { 1863 /* Look at first msg in receive queue; wait if necessary */ 1864 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1865 if (unlikely(rc)) 1866 break; 1867 skb = skb_peek(&sk->sk_receive_queue); 1868 skb_cb = TIPC_SKB_CB(skb); 1869 hdr = buf_msg(skb); 1870 dlen = msg_data_sz(hdr); 1871 hlen = msg_hdr_sz(hdr); 1872 err = msg_errcode(hdr); 1873 1874 /* Discard any empty non-errored (SYN-) message */ 1875 if (unlikely(!dlen && !err)) { 1876 tsk_advance_rx_queue(sk); 1877 continue; 1878 } 1879 1880 /* Collect msg meta data, incl. error code and rejected data */ 1881 if (!copied) { 1882 tipc_sk_set_orig_addr(m, skb); 1883 rc = tipc_sk_anc_data_recv(m, skb, tsk); 1884 if (rc) 1885 break; 1886 hdr = buf_msg(skb); 1887 } 1888 1889 /* Copy data if msg ok, otherwise return error/partial data */ 1890 if (likely(!err)) { 1891 offset = skb_cb->bytes_read; 1892 copy = min_t(int, dlen - offset, buflen - copied); 1893 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1894 if (unlikely(rc)) 1895 break; 1896 copied += copy; 1897 offset += copy; 1898 if (unlikely(offset < dlen)) { 1899 if (!peek) 1900 skb_cb->bytes_read = offset; 1901 break; 1902 } 1903 } else { 1904 rc = 0; 1905 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1906 rc = -ECONNRESET; 1907 if (copied || rc) 1908 break; 1909 } 1910 1911 if (unlikely(peek)) 1912 break; 1913 1914 tsk_advance_rx_queue(sk); 1915 1916 /* Send connection flow control advertisement when applicable */ 1917 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1918 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1919 tipc_sk_send_ack(tsk); 1920 1921 /* Exit if all requested data or FIN/error received */ 1922 if (copied == buflen || err) 1923 break; 1924 1925 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1926 exit: 1927 release_sock(sk); 1928 return copied ? copied : rc; 1929 } 1930 1931 /** 1932 * tipc_write_space - wake up thread if port congestion is released 1933 * @sk: socket 1934 */ 1935 static void tipc_write_space(struct sock *sk) 1936 { 1937 struct socket_wq *wq; 1938 1939 rcu_read_lock(); 1940 wq = rcu_dereference(sk->sk_wq); 1941 if (skwq_has_sleeper(wq)) 1942 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1943 EPOLLWRNORM | EPOLLWRBAND); 1944 rcu_read_unlock(); 1945 } 1946 1947 /** 1948 * tipc_data_ready - wake up threads to indicate messages have been received 1949 * @sk: socket 1950 * @len: the length of messages 1951 */ 1952 static void tipc_data_ready(struct sock *sk) 1953 { 1954 struct socket_wq *wq; 1955 1956 rcu_read_lock(); 1957 wq = rcu_dereference(sk->sk_wq); 1958 if (skwq_has_sleeper(wq)) 1959 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1960 EPOLLRDNORM | EPOLLRDBAND); 1961 rcu_read_unlock(); 1962 } 1963 1964 static void tipc_sock_destruct(struct sock *sk) 1965 { 1966 __skb_queue_purge(&sk->sk_receive_queue); 1967 } 1968 1969 static void tipc_sk_proto_rcv(struct sock *sk, 1970 struct sk_buff_head *inputq, 1971 struct sk_buff_head *xmitq) 1972 { 1973 struct sk_buff *skb = __skb_dequeue(inputq); 1974 struct tipc_sock *tsk = tipc_sk(sk); 1975 struct tipc_msg *hdr = buf_msg(skb); 1976 struct tipc_group *grp = tsk->group; 1977 bool wakeup = false; 1978 1979 switch (msg_user(hdr)) { 1980 case CONN_MANAGER: 1981 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq); 1982 return; 1983 case SOCK_WAKEUP: 1984 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1985 tsk->cong_link_cnt--; 1986 wakeup = true; 1987 break; 1988 case GROUP_PROTOCOL: 1989 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1990 break; 1991 case TOP_SRV: 1992 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1993 hdr, inputq, xmitq); 1994 break; 1995 default: 1996 break; 1997 } 1998 1999 if (wakeup) 2000 sk->sk_write_space(sk); 2001 2002 kfree_skb(skb); 2003 } 2004 2005 /** 2006 * tipc_sk_filter_connect - check incoming message for a connection-based socket 2007 * @tsk: TIPC socket 2008 * @skb: pointer to message buffer. 2009 * Returns true if message should be added to receive queue, false otherwise 2010 */ 2011 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 2012 { 2013 struct sock *sk = &tsk->sk; 2014 struct net *net = sock_net(sk); 2015 struct tipc_msg *hdr = buf_msg(skb); 2016 bool con_msg = msg_connected(hdr); 2017 u32 pport = tsk_peer_port(tsk); 2018 u32 pnode = tsk_peer_node(tsk); 2019 u32 oport = msg_origport(hdr); 2020 u32 onode = msg_orignode(hdr); 2021 int err = msg_errcode(hdr); 2022 unsigned long delay; 2023 2024 if (unlikely(msg_mcast(hdr))) 2025 return false; 2026 2027 switch (sk->sk_state) { 2028 case TIPC_CONNECTING: 2029 /* Setup ACK */ 2030 if (likely(con_msg)) { 2031 if (err) 2032 break; 2033 tipc_sk_finish_conn(tsk, oport, onode); 2034 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2035 /* ACK+ message with data is added to receive queue */ 2036 if (msg_data_sz(hdr)) 2037 return true; 2038 /* Empty ACK-, - wake up sleeping connect() and drop */ 2039 sk->sk_data_ready(sk); 2040 msg_set_dest_droppable(hdr, 1); 2041 return false; 2042 } 2043 /* Ignore connectionless message if not from listening socket */ 2044 if (oport != pport || onode != pnode) 2045 return false; 2046 2047 /* Rejected SYN */ 2048 if (err != TIPC_ERR_OVERLOAD) 2049 break; 2050 2051 /* Prepare for new setup attempt if we have a SYN clone */ 2052 if (skb_queue_empty(&sk->sk_write_queue)) 2053 break; 2054 get_random_bytes(&delay, 2); 2055 delay %= (tsk->conn_timeout / 4); 2056 delay = msecs_to_jiffies(delay + 100); 2057 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay); 2058 return false; 2059 case TIPC_OPEN: 2060 case TIPC_DISCONNECTING: 2061 return false; 2062 case TIPC_LISTEN: 2063 /* Accept only SYN message */ 2064 if (!msg_is_syn(hdr) && 2065 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT) 2066 return false; 2067 if (!con_msg && !err) 2068 return true; 2069 return false; 2070 case TIPC_ESTABLISHED: 2071 /* Accept only connection-based messages sent by peer */ 2072 if (likely(con_msg && !err && pport == oport && pnode == onode)) 2073 return true; 2074 if (!tsk_peer_msg(tsk, hdr)) 2075 return false; 2076 if (!err) 2077 return true; 2078 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2079 tipc_node_remove_conn(net, pnode, tsk->portid); 2080 sk->sk_state_change(sk); 2081 return true; 2082 default: 2083 pr_err("Unknown sk_state %u\n", sk->sk_state); 2084 } 2085 /* Abort connection setup attempt */ 2086 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2087 sk->sk_err = ECONNREFUSED; 2088 sk->sk_state_change(sk); 2089 return true; 2090 } 2091 2092 /** 2093 * rcvbuf_limit - get proper overload limit of socket receive queue 2094 * @sk: socket 2095 * @skb: message 2096 * 2097 * For connection oriented messages, irrespective of importance, 2098 * default queue limit is 2 MB. 2099 * 2100 * For connectionless messages, queue limits are based on message 2101 * importance as follows: 2102 * 2103 * TIPC_LOW_IMPORTANCE (2 MB) 2104 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2105 * TIPC_HIGH_IMPORTANCE (8 MB) 2106 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2107 * 2108 * Returns overload limit according to corresponding message importance 2109 */ 2110 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2111 { 2112 struct tipc_sock *tsk = tipc_sk(sk); 2113 struct tipc_msg *hdr = buf_msg(skb); 2114 2115 if (unlikely(msg_in_group(hdr))) 2116 return sk->sk_rcvbuf; 2117 2118 if (unlikely(!msg_connected(hdr))) 2119 return sk->sk_rcvbuf << msg_importance(hdr); 2120 2121 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2122 return sk->sk_rcvbuf; 2123 2124 return FLOWCTL_MSG_LIM; 2125 } 2126 2127 /** 2128 * tipc_sk_filter_rcv - validate incoming message 2129 * @sk: socket 2130 * @skb: pointer to message. 2131 * 2132 * Enqueues message on receive queue if acceptable; optionally handles 2133 * disconnect indication for a connected socket. 2134 * 2135 * Called with socket lock already taken 2136 * 2137 */ 2138 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2139 struct sk_buff_head *xmitq) 2140 { 2141 bool sk_conn = !tipc_sk_type_connectionless(sk); 2142 struct tipc_sock *tsk = tipc_sk(sk); 2143 struct tipc_group *grp = tsk->group; 2144 struct tipc_msg *hdr = buf_msg(skb); 2145 struct net *net = sock_net(sk); 2146 struct sk_buff_head inputq; 2147 int limit, err = TIPC_OK; 2148 2149 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " "); 2150 TIPC_SKB_CB(skb)->bytes_read = 0; 2151 __skb_queue_head_init(&inputq); 2152 __skb_queue_tail(&inputq, skb); 2153 2154 if (unlikely(!msg_isdata(hdr))) 2155 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2156 2157 if (unlikely(grp)) 2158 tipc_group_filter_msg(grp, &inputq, xmitq); 2159 2160 /* Validate and add to receive buffer if there is space */ 2161 while ((skb = __skb_dequeue(&inputq))) { 2162 hdr = buf_msg(skb); 2163 limit = rcvbuf_limit(sk, skb); 2164 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2165 (!sk_conn && msg_connected(hdr)) || 2166 (!grp && msg_in_group(hdr))) 2167 err = TIPC_ERR_NO_PORT; 2168 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2169 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, 2170 "err_overload2!"); 2171 atomic_inc(&sk->sk_drops); 2172 err = TIPC_ERR_OVERLOAD; 2173 } 2174 2175 if (unlikely(err)) { 2176 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) { 2177 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, 2178 "@filter_rcv!"); 2179 __skb_queue_tail(xmitq, skb); 2180 } 2181 err = TIPC_OK; 2182 continue; 2183 } 2184 __skb_queue_tail(&sk->sk_receive_queue, skb); 2185 skb_set_owner_r(skb, sk); 2186 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL, 2187 "rcvq >90% allocated!"); 2188 sk->sk_data_ready(sk); 2189 } 2190 } 2191 2192 /** 2193 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2194 * @sk: socket 2195 * @skb: message 2196 * 2197 * Caller must hold socket lock 2198 */ 2199 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2200 { 2201 unsigned int before = sk_rmem_alloc_get(sk); 2202 struct sk_buff_head xmitq; 2203 unsigned int added; 2204 2205 __skb_queue_head_init(&xmitq); 2206 2207 tipc_sk_filter_rcv(sk, skb, &xmitq); 2208 added = sk_rmem_alloc_get(sk) - before; 2209 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2210 2211 /* Send pending response/rejected messages, if any */ 2212 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2213 return 0; 2214 } 2215 2216 /** 2217 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2218 * inputq and try adding them to socket or backlog queue 2219 * @inputq: list of incoming buffers with potentially different destinations 2220 * @sk: socket where the buffers should be enqueued 2221 * @dport: port number for the socket 2222 * 2223 * Caller must hold socket lock 2224 */ 2225 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2226 u32 dport, struct sk_buff_head *xmitq) 2227 { 2228 unsigned long time_limit = jiffies + 2; 2229 struct sk_buff *skb; 2230 unsigned int lim; 2231 atomic_t *dcnt; 2232 u32 onode; 2233 2234 while (skb_queue_len(inputq)) { 2235 if (unlikely(time_after_eq(jiffies, time_limit))) 2236 return; 2237 2238 skb = tipc_skb_dequeue(inputq, dport); 2239 if (unlikely(!skb)) 2240 return; 2241 2242 /* Add message directly to receive queue if possible */ 2243 if (!sock_owned_by_user(sk)) { 2244 tipc_sk_filter_rcv(sk, skb, xmitq); 2245 continue; 2246 } 2247 2248 /* Try backlog, compensating for double-counted bytes */ 2249 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2250 if (!sk->sk_backlog.len) 2251 atomic_set(dcnt, 0); 2252 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2253 if (likely(!sk_add_backlog(sk, skb, lim))) { 2254 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL, 2255 "bklg & rcvq >90% allocated!"); 2256 continue; 2257 } 2258 2259 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!"); 2260 /* Overload => reject message back to sender */ 2261 onode = tipc_own_addr(sock_net(sk)); 2262 atomic_inc(&sk->sk_drops); 2263 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) { 2264 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL, 2265 "@sk_enqueue!"); 2266 __skb_queue_tail(xmitq, skb); 2267 } 2268 break; 2269 } 2270 } 2271 2272 /** 2273 * tipc_sk_rcv - handle a chain of incoming buffers 2274 * @inputq: buffer list containing the buffers 2275 * Consumes all buffers in list until inputq is empty 2276 * Note: may be called in multiple threads referring to the same queue 2277 */ 2278 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2279 { 2280 struct sk_buff_head xmitq; 2281 u32 dnode, dport = 0; 2282 int err; 2283 struct tipc_sock *tsk; 2284 struct sock *sk; 2285 struct sk_buff *skb; 2286 2287 __skb_queue_head_init(&xmitq); 2288 while (skb_queue_len(inputq)) { 2289 dport = tipc_skb_peek_port(inputq, dport); 2290 tsk = tipc_sk_lookup(net, dport); 2291 2292 if (likely(tsk)) { 2293 sk = &tsk->sk; 2294 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2295 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2296 spin_unlock_bh(&sk->sk_lock.slock); 2297 } 2298 /* Send pending response/rejected messages, if any */ 2299 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2300 sock_put(sk); 2301 continue; 2302 } 2303 /* No destination socket => dequeue skb if still there */ 2304 skb = tipc_skb_dequeue(inputq, dport); 2305 if (!skb) 2306 return; 2307 2308 /* Try secondary lookup if unresolved named message */ 2309 err = TIPC_ERR_NO_PORT; 2310 if (tipc_msg_lookup_dest(net, skb, &err)) 2311 goto xmit; 2312 2313 /* Prepare for message rejection */ 2314 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2315 continue; 2316 2317 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!"); 2318 xmit: 2319 dnode = msg_destnode(buf_msg(skb)); 2320 tipc_node_xmit_skb(net, skb, dnode, dport); 2321 } 2322 } 2323 2324 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2325 { 2326 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2327 struct sock *sk = sock->sk; 2328 int done; 2329 2330 do { 2331 int err = sock_error(sk); 2332 if (err) 2333 return err; 2334 if (!*timeo_p) 2335 return -ETIMEDOUT; 2336 if (signal_pending(current)) 2337 return sock_intr_errno(*timeo_p); 2338 2339 add_wait_queue(sk_sleep(sk), &wait); 2340 done = sk_wait_event(sk, timeo_p, 2341 sk->sk_state != TIPC_CONNECTING, &wait); 2342 remove_wait_queue(sk_sleep(sk), &wait); 2343 } while (!done); 2344 return 0; 2345 } 2346 2347 /** 2348 * tipc_connect - establish a connection to another TIPC port 2349 * @sock: socket structure 2350 * @dest: socket address for destination port 2351 * @destlen: size of socket address data structure 2352 * @flags: file-related flags associated with socket 2353 * 2354 * Returns 0 on success, errno otherwise 2355 */ 2356 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2357 int destlen, int flags) 2358 { 2359 struct sock *sk = sock->sk; 2360 struct tipc_sock *tsk = tipc_sk(sk); 2361 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2362 struct msghdr m = {NULL,}; 2363 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2364 int previous; 2365 int res = 0; 2366 2367 if (destlen != sizeof(struct sockaddr_tipc)) 2368 return -EINVAL; 2369 2370 lock_sock(sk); 2371 2372 if (tsk->group) { 2373 res = -EINVAL; 2374 goto exit; 2375 } 2376 2377 if (dst->family == AF_UNSPEC) { 2378 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2379 if (!tipc_sk_type_connectionless(sk)) 2380 res = -EINVAL; 2381 goto exit; 2382 } else if (dst->family != AF_TIPC) { 2383 res = -EINVAL; 2384 } 2385 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2386 res = -EINVAL; 2387 if (res) 2388 goto exit; 2389 2390 /* DGRAM/RDM connect(), just save the destaddr */ 2391 if (tipc_sk_type_connectionless(sk)) { 2392 memcpy(&tsk->peer, dest, destlen); 2393 goto exit; 2394 } 2395 2396 previous = sk->sk_state; 2397 2398 switch (sk->sk_state) { 2399 case TIPC_OPEN: 2400 /* Send a 'SYN-' to destination */ 2401 m.msg_name = dest; 2402 m.msg_namelen = destlen; 2403 2404 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2405 * indicate send_msg() is never blocked. 2406 */ 2407 if (!timeout) 2408 m.msg_flags = MSG_DONTWAIT; 2409 2410 res = __tipc_sendmsg(sock, &m, 0); 2411 if ((res < 0) && (res != -EWOULDBLOCK)) 2412 goto exit; 2413 2414 /* Just entered TIPC_CONNECTING state; the only 2415 * difference is that return value in non-blocking 2416 * case is EINPROGRESS, rather than EALREADY. 2417 */ 2418 res = -EINPROGRESS; 2419 /* fall thru' */ 2420 case TIPC_CONNECTING: 2421 if (!timeout) { 2422 if (previous == TIPC_CONNECTING) 2423 res = -EALREADY; 2424 goto exit; 2425 } 2426 timeout = msecs_to_jiffies(timeout); 2427 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2428 res = tipc_wait_for_connect(sock, &timeout); 2429 break; 2430 case TIPC_ESTABLISHED: 2431 res = -EISCONN; 2432 break; 2433 default: 2434 res = -EINVAL; 2435 } 2436 2437 exit: 2438 release_sock(sk); 2439 return res; 2440 } 2441 2442 /** 2443 * tipc_listen - allow socket to listen for incoming connections 2444 * @sock: socket structure 2445 * @len: (unused) 2446 * 2447 * Returns 0 on success, errno otherwise 2448 */ 2449 static int tipc_listen(struct socket *sock, int len) 2450 { 2451 struct sock *sk = sock->sk; 2452 int res; 2453 2454 lock_sock(sk); 2455 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2456 release_sock(sk); 2457 2458 return res; 2459 } 2460 2461 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2462 { 2463 struct sock *sk = sock->sk; 2464 DEFINE_WAIT(wait); 2465 int err; 2466 2467 /* True wake-one mechanism for incoming connections: only 2468 * one process gets woken up, not the 'whole herd'. 2469 * Since we do not 'race & poll' for established sockets 2470 * anymore, the common case will execute the loop only once. 2471 */ 2472 for (;;) { 2473 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2474 TASK_INTERRUPTIBLE); 2475 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2476 release_sock(sk); 2477 timeo = schedule_timeout(timeo); 2478 lock_sock(sk); 2479 } 2480 err = 0; 2481 if (!skb_queue_empty(&sk->sk_receive_queue)) 2482 break; 2483 err = -EAGAIN; 2484 if (!timeo) 2485 break; 2486 err = sock_intr_errno(timeo); 2487 if (signal_pending(current)) 2488 break; 2489 } 2490 finish_wait(sk_sleep(sk), &wait); 2491 return err; 2492 } 2493 2494 /** 2495 * tipc_accept - wait for connection request 2496 * @sock: listening socket 2497 * @newsock: new socket that is to be connected 2498 * @flags: file-related flags associated with socket 2499 * 2500 * Returns 0 on success, errno otherwise 2501 */ 2502 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2503 bool kern) 2504 { 2505 struct sock *new_sk, *sk = sock->sk; 2506 struct sk_buff *buf; 2507 struct tipc_sock *new_tsock; 2508 struct tipc_msg *msg; 2509 long timeo; 2510 int res; 2511 2512 lock_sock(sk); 2513 2514 if (sk->sk_state != TIPC_LISTEN) { 2515 res = -EINVAL; 2516 goto exit; 2517 } 2518 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2519 res = tipc_wait_for_accept(sock, timeo); 2520 if (res) 2521 goto exit; 2522 2523 buf = skb_peek(&sk->sk_receive_queue); 2524 2525 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2526 if (res) 2527 goto exit; 2528 security_sk_clone(sock->sk, new_sock->sk); 2529 2530 new_sk = new_sock->sk; 2531 new_tsock = tipc_sk(new_sk); 2532 msg = buf_msg(buf); 2533 2534 /* we lock on new_sk; but lockdep sees the lock on sk */ 2535 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2536 2537 /* 2538 * Reject any stray messages received by new socket 2539 * before the socket lock was taken (very, very unlikely) 2540 */ 2541 tsk_rej_rx_queue(new_sk); 2542 2543 /* Connect new socket to it's peer */ 2544 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2545 2546 tsk_set_importance(new_tsock, msg_importance(msg)); 2547 if (msg_named(msg)) { 2548 new_tsock->conn_type = msg_nametype(msg); 2549 new_tsock->conn_instance = msg_nameinst(msg); 2550 } 2551 2552 /* 2553 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2554 * Respond to 'SYN+' by queuing it on new socket. 2555 */ 2556 if (!msg_data_sz(msg)) { 2557 struct msghdr m = {NULL,}; 2558 2559 tsk_advance_rx_queue(sk); 2560 __tipc_sendstream(new_sock, &m, 0); 2561 } else { 2562 __skb_dequeue(&sk->sk_receive_queue); 2563 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2564 skb_set_owner_r(buf, new_sk); 2565 } 2566 release_sock(new_sk); 2567 exit: 2568 release_sock(sk); 2569 return res; 2570 } 2571 2572 /** 2573 * tipc_shutdown - shutdown socket connection 2574 * @sock: socket structure 2575 * @how: direction to close (must be SHUT_RDWR) 2576 * 2577 * Terminates connection (if necessary), then purges socket's receive queue. 2578 * 2579 * Returns 0 on success, errno otherwise 2580 */ 2581 static int tipc_shutdown(struct socket *sock, int how) 2582 { 2583 struct sock *sk = sock->sk; 2584 int res; 2585 2586 if (how != SHUT_RDWR) 2587 return -EINVAL; 2588 2589 lock_sock(sk); 2590 2591 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); 2592 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2593 sk->sk_shutdown = SEND_SHUTDOWN; 2594 2595 if (sk->sk_state == TIPC_DISCONNECTING) { 2596 /* Discard any unreceived messages */ 2597 __skb_queue_purge(&sk->sk_receive_queue); 2598 2599 /* Wake up anyone sleeping in poll */ 2600 sk->sk_state_change(sk); 2601 res = 0; 2602 } else { 2603 res = -ENOTCONN; 2604 } 2605 2606 release_sock(sk); 2607 return res; 2608 } 2609 2610 static void tipc_sk_check_probing_state(struct sock *sk, 2611 struct sk_buff_head *list) 2612 { 2613 struct tipc_sock *tsk = tipc_sk(sk); 2614 u32 pnode = tsk_peer_node(tsk); 2615 u32 pport = tsk_peer_port(tsk); 2616 u32 self = tsk_own_node(tsk); 2617 u32 oport = tsk->portid; 2618 struct sk_buff *skb; 2619 2620 if (tsk->probe_unacked) { 2621 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2622 sk->sk_err = ECONNABORTED; 2623 tipc_node_remove_conn(sock_net(sk), pnode, pport); 2624 sk->sk_state_change(sk); 2625 return; 2626 } 2627 /* Prepare new probe */ 2628 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2629 pnode, self, pport, oport, TIPC_OK); 2630 if (skb) 2631 __skb_queue_tail(list, skb); 2632 tsk->probe_unacked = true; 2633 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2634 } 2635 2636 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list) 2637 { 2638 struct tipc_sock *tsk = tipc_sk(sk); 2639 2640 /* Try again later if dest link is congested */ 2641 if (tsk->cong_link_cnt) { 2642 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100)); 2643 return; 2644 } 2645 /* Prepare SYN for retransmit */ 2646 tipc_msg_skb_clone(&sk->sk_write_queue, list); 2647 } 2648 2649 static void tipc_sk_timeout(struct timer_list *t) 2650 { 2651 struct sock *sk = from_timer(sk, t, sk_timer); 2652 struct tipc_sock *tsk = tipc_sk(sk); 2653 u32 pnode = tsk_peer_node(tsk); 2654 struct sk_buff_head list; 2655 int rc = 0; 2656 2657 skb_queue_head_init(&list); 2658 bh_lock_sock(sk); 2659 2660 /* Try again later if socket is busy */ 2661 if (sock_owned_by_user(sk)) { 2662 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2663 bh_unlock_sock(sk); 2664 return; 2665 } 2666 2667 if (sk->sk_state == TIPC_ESTABLISHED) 2668 tipc_sk_check_probing_state(sk, &list); 2669 else if (sk->sk_state == TIPC_CONNECTING) 2670 tipc_sk_retry_connect(sk, &list); 2671 2672 bh_unlock_sock(sk); 2673 2674 if (!skb_queue_empty(&list)) 2675 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid); 2676 2677 /* SYN messages may cause link congestion */ 2678 if (rc == -ELINKCONG) { 2679 tipc_dest_push(&tsk->cong_links, pnode, 0); 2680 tsk->cong_link_cnt = 1; 2681 } 2682 sock_put(sk); 2683 } 2684 2685 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2686 struct tipc_name_seq const *seq) 2687 { 2688 struct sock *sk = &tsk->sk; 2689 struct net *net = sock_net(sk); 2690 struct publication *publ; 2691 u32 key; 2692 2693 if (scope != TIPC_NODE_SCOPE) 2694 scope = TIPC_CLUSTER_SCOPE; 2695 2696 if (tipc_sk_connected(sk)) 2697 return -EINVAL; 2698 key = tsk->portid + tsk->pub_count + 1; 2699 if (key == tsk->portid) 2700 return -EADDRINUSE; 2701 2702 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2703 scope, tsk->portid, key); 2704 if (unlikely(!publ)) 2705 return -EINVAL; 2706 2707 list_add(&publ->binding_sock, &tsk->publications); 2708 tsk->pub_count++; 2709 tsk->published = 1; 2710 return 0; 2711 } 2712 2713 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2714 struct tipc_name_seq const *seq) 2715 { 2716 struct net *net = sock_net(&tsk->sk); 2717 struct publication *publ; 2718 struct publication *safe; 2719 int rc = -EINVAL; 2720 2721 if (scope != TIPC_NODE_SCOPE) 2722 scope = TIPC_CLUSTER_SCOPE; 2723 2724 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { 2725 if (seq) { 2726 if (publ->scope != scope) 2727 continue; 2728 if (publ->type != seq->type) 2729 continue; 2730 if (publ->lower != seq->lower) 2731 continue; 2732 if (publ->upper != seq->upper) 2733 break; 2734 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2735 publ->upper, publ->key); 2736 rc = 0; 2737 break; 2738 } 2739 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2740 publ->upper, publ->key); 2741 rc = 0; 2742 } 2743 if (list_empty(&tsk->publications)) 2744 tsk->published = 0; 2745 return rc; 2746 } 2747 2748 /* tipc_sk_reinit: set non-zero address in all existing sockets 2749 * when we go from standalone to network mode. 2750 */ 2751 void tipc_sk_reinit(struct net *net) 2752 { 2753 struct tipc_net *tn = net_generic(net, tipc_net_id); 2754 struct rhashtable_iter iter; 2755 struct tipc_sock *tsk; 2756 struct tipc_msg *msg; 2757 2758 rhashtable_walk_enter(&tn->sk_rht, &iter); 2759 2760 do { 2761 rhashtable_walk_start(&iter); 2762 2763 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2764 sock_hold(&tsk->sk); 2765 rhashtable_walk_stop(&iter); 2766 lock_sock(&tsk->sk); 2767 msg = &tsk->phdr; 2768 msg_set_prevnode(msg, tipc_own_addr(net)); 2769 msg_set_orignode(msg, tipc_own_addr(net)); 2770 release_sock(&tsk->sk); 2771 rhashtable_walk_start(&iter); 2772 sock_put(&tsk->sk); 2773 } 2774 2775 rhashtable_walk_stop(&iter); 2776 } while (tsk == ERR_PTR(-EAGAIN)); 2777 2778 rhashtable_walk_exit(&iter); 2779 } 2780 2781 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2782 { 2783 struct tipc_net *tn = net_generic(net, tipc_net_id); 2784 struct tipc_sock *tsk; 2785 2786 rcu_read_lock(); 2787 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2788 if (tsk) 2789 sock_hold(&tsk->sk); 2790 rcu_read_unlock(); 2791 2792 return tsk; 2793 } 2794 2795 static int tipc_sk_insert(struct tipc_sock *tsk) 2796 { 2797 struct sock *sk = &tsk->sk; 2798 struct net *net = sock_net(sk); 2799 struct tipc_net *tn = net_generic(net, tipc_net_id); 2800 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2801 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2802 2803 while (remaining--) { 2804 portid++; 2805 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2806 portid = TIPC_MIN_PORT; 2807 tsk->portid = portid; 2808 sock_hold(&tsk->sk); 2809 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2810 tsk_rht_params)) 2811 return 0; 2812 sock_put(&tsk->sk); 2813 } 2814 2815 return -1; 2816 } 2817 2818 static void tipc_sk_remove(struct tipc_sock *tsk) 2819 { 2820 struct sock *sk = &tsk->sk; 2821 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2822 2823 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2824 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2825 __sock_put(sk); 2826 } 2827 } 2828 2829 static const struct rhashtable_params tsk_rht_params = { 2830 .nelem_hint = 192, 2831 .head_offset = offsetof(struct tipc_sock, node), 2832 .key_offset = offsetof(struct tipc_sock, portid), 2833 .key_len = sizeof(u32), /* portid */ 2834 .max_size = 1048576, 2835 .min_size = 256, 2836 .automatic_shrinking = true, 2837 }; 2838 2839 int tipc_sk_rht_init(struct net *net) 2840 { 2841 struct tipc_net *tn = net_generic(net, tipc_net_id); 2842 2843 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2844 } 2845 2846 void tipc_sk_rht_destroy(struct net *net) 2847 { 2848 struct tipc_net *tn = net_generic(net, tipc_net_id); 2849 2850 /* Wait for socket readers to complete */ 2851 synchronize_net(); 2852 2853 rhashtable_destroy(&tn->sk_rht); 2854 } 2855 2856 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2857 { 2858 struct net *net = sock_net(&tsk->sk); 2859 struct tipc_group *grp = tsk->group; 2860 struct tipc_msg *hdr = &tsk->phdr; 2861 struct tipc_name_seq seq; 2862 int rc; 2863 2864 if (mreq->type < TIPC_RESERVED_TYPES) 2865 return -EACCES; 2866 if (mreq->scope > TIPC_NODE_SCOPE) 2867 return -EINVAL; 2868 if (grp) 2869 return -EACCES; 2870 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2871 if (!grp) 2872 return -ENOMEM; 2873 tsk->group = grp; 2874 msg_set_lookup_scope(hdr, mreq->scope); 2875 msg_set_nametype(hdr, mreq->type); 2876 msg_set_dest_droppable(hdr, true); 2877 seq.type = mreq->type; 2878 seq.lower = mreq->instance; 2879 seq.upper = seq.lower; 2880 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2881 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2882 if (rc) { 2883 tipc_group_delete(net, grp); 2884 tsk->group = NULL; 2885 return rc; 2886 } 2887 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2888 tsk->mc_method.rcast = true; 2889 tsk->mc_method.mandatory = true; 2890 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2891 return rc; 2892 } 2893 2894 static int tipc_sk_leave(struct tipc_sock *tsk) 2895 { 2896 struct net *net = sock_net(&tsk->sk); 2897 struct tipc_group *grp = tsk->group; 2898 struct tipc_name_seq seq; 2899 int scope; 2900 2901 if (!grp) 2902 return -EINVAL; 2903 tipc_group_self(grp, &seq, &scope); 2904 tipc_group_delete(net, grp); 2905 tsk->group = NULL; 2906 tipc_sk_withdraw(tsk, scope, &seq); 2907 return 0; 2908 } 2909 2910 /** 2911 * tipc_setsockopt - set socket option 2912 * @sock: socket structure 2913 * @lvl: option level 2914 * @opt: option identifier 2915 * @ov: pointer to new option value 2916 * @ol: length of option value 2917 * 2918 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2919 * (to ease compatibility). 2920 * 2921 * Returns 0 on success, errno otherwise 2922 */ 2923 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2924 char __user *ov, unsigned int ol) 2925 { 2926 struct sock *sk = sock->sk; 2927 struct tipc_sock *tsk = tipc_sk(sk); 2928 struct tipc_group_req mreq; 2929 u32 value = 0; 2930 int res = 0; 2931 2932 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2933 return 0; 2934 if (lvl != SOL_TIPC) 2935 return -ENOPROTOOPT; 2936 2937 switch (opt) { 2938 case TIPC_IMPORTANCE: 2939 case TIPC_SRC_DROPPABLE: 2940 case TIPC_DEST_DROPPABLE: 2941 case TIPC_CONN_TIMEOUT: 2942 if (ol < sizeof(value)) 2943 return -EINVAL; 2944 if (get_user(value, (u32 __user *)ov)) 2945 return -EFAULT; 2946 break; 2947 case TIPC_GROUP_JOIN: 2948 if (ol < sizeof(mreq)) 2949 return -EINVAL; 2950 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2951 return -EFAULT; 2952 break; 2953 default: 2954 if (ov || ol) 2955 return -EINVAL; 2956 } 2957 2958 lock_sock(sk); 2959 2960 switch (opt) { 2961 case TIPC_IMPORTANCE: 2962 res = tsk_set_importance(tsk, value); 2963 break; 2964 case TIPC_SRC_DROPPABLE: 2965 if (sock->type != SOCK_STREAM) 2966 tsk_set_unreliable(tsk, value); 2967 else 2968 res = -ENOPROTOOPT; 2969 break; 2970 case TIPC_DEST_DROPPABLE: 2971 tsk_set_unreturnable(tsk, value); 2972 break; 2973 case TIPC_CONN_TIMEOUT: 2974 tipc_sk(sk)->conn_timeout = value; 2975 break; 2976 case TIPC_MCAST_BROADCAST: 2977 tsk->mc_method.rcast = false; 2978 tsk->mc_method.mandatory = true; 2979 break; 2980 case TIPC_MCAST_REPLICAST: 2981 tsk->mc_method.rcast = true; 2982 tsk->mc_method.mandatory = true; 2983 break; 2984 case TIPC_GROUP_JOIN: 2985 res = tipc_sk_join(tsk, &mreq); 2986 break; 2987 case TIPC_GROUP_LEAVE: 2988 res = tipc_sk_leave(tsk); 2989 break; 2990 default: 2991 res = -EINVAL; 2992 } 2993 2994 release_sock(sk); 2995 2996 return res; 2997 } 2998 2999 /** 3000 * tipc_getsockopt - get socket option 3001 * @sock: socket structure 3002 * @lvl: option level 3003 * @opt: option identifier 3004 * @ov: receptacle for option value 3005 * @ol: receptacle for length of option value 3006 * 3007 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 3008 * (to ease compatibility). 3009 * 3010 * Returns 0 on success, errno otherwise 3011 */ 3012 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 3013 char __user *ov, int __user *ol) 3014 { 3015 struct sock *sk = sock->sk; 3016 struct tipc_sock *tsk = tipc_sk(sk); 3017 struct tipc_name_seq seq; 3018 int len, scope; 3019 u32 value; 3020 int res; 3021 3022 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 3023 return put_user(0, ol); 3024 if (lvl != SOL_TIPC) 3025 return -ENOPROTOOPT; 3026 res = get_user(len, ol); 3027 if (res) 3028 return res; 3029 3030 lock_sock(sk); 3031 3032 switch (opt) { 3033 case TIPC_IMPORTANCE: 3034 value = tsk_importance(tsk); 3035 break; 3036 case TIPC_SRC_DROPPABLE: 3037 value = tsk_unreliable(tsk); 3038 break; 3039 case TIPC_DEST_DROPPABLE: 3040 value = tsk_unreturnable(tsk); 3041 break; 3042 case TIPC_CONN_TIMEOUT: 3043 value = tsk->conn_timeout; 3044 /* no need to set "res", since already 0 at this point */ 3045 break; 3046 case TIPC_NODE_RECVQ_DEPTH: 3047 value = 0; /* was tipc_queue_size, now obsolete */ 3048 break; 3049 case TIPC_SOCK_RECVQ_DEPTH: 3050 value = skb_queue_len(&sk->sk_receive_queue); 3051 break; 3052 case TIPC_GROUP_JOIN: 3053 seq.type = 0; 3054 if (tsk->group) 3055 tipc_group_self(tsk->group, &seq, &scope); 3056 value = seq.type; 3057 break; 3058 default: 3059 res = -EINVAL; 3060 } 3061 3062 release_sock(sk); 3063 3064 if (res) 3065 return res; /* "get" failed */ 3066 3067 if (len < sizeof(value)) 3068 return -EINVAL; 3069 3070 if (copy_to_user(ov, &value, sizeof(value))) 3071 return -EFAULT; 3072 3073 return put_user(sizeof(value), ol); 3074 } 3075 3076 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3077 { 3078 struct net *net = sock_net(sock->sk); 3079 struct tipc_sioc_nodeid_req nr = {0}; 3080 struct tipc_sioc_ln_req lnr; 3081 void __user *argp = (void __user *)arg; 3082 3083 switch (cmd) { 3084 case SIOCGETLINKNAME: 3085 if (copy_from_user(&lnr, argp, sizeof(lnr))) 3086 return -EFAULT; 3087 if (!tipc_node_get_linkname(net, 3088 lnr.bearer_id & 0xffff, lnr.peer, 3089 lnr.linkname, TIPC_MAX_LINK_NAME)) { 3090 if (copy_to_user(argp, &lnr, sizeof(lnr))) 3091 return -EFAULT; 3092 return 0; 3093 } 3094 return -EADDRNOTAVAIL; 3095 case SIOCGETNODEID: 3096 if (copy_from_user(&nr, argp, sizeof(nr))) 3097 return -EFAULT; 3098 if (!tipc_node_get_id(net, nr.peer, nr.node_id)) 3099 return -EADDRNOTAVAIL; 3100 if (copy_to_user(argp, &nr, sizeof(nr))) 3101 return -EFAULT; 3102 return 0; 3103 default: 3104 return -ENOIOCTLCMD; 3105 } 3106 } 3107 3108 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 3109 { 3110 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 3111 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 3112 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 3113 3114 tsk1->peer.family = AF_TIPC; 3115 tsk1->peer.addrtype = TIPC_ADDR_ID; 3116 tsk1->peer.scope = TIPC_NODE_SCOPE; 3117 tsk1->peer.addr.id.ref = tsk2->portid; 3118 tsk1->peer.addr.id.node = onode; 3119 tsk2->peer.family = AF_TIPC; 3120 tsk2->peer.addrtype = TIPC_ADDR_ID; 3121 tsk2->peer.scope = TIPC_NODE_SCOPE; 3122 tsk2->peer.addr.id.ref = tsk1->portid; 3123 tsk2->peer.addr.id.node = onode; 3124 3125 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3126 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3127 return 0; 3128 } 3129 3130 /* Protocol switches for the various types of TIPC sockets */ 3131 3132 static const struct proto_ops msg_ops = { 3133 .owner = THIS_MODULE, 3134 .family = AF_TIPC, 3135 .release = tipc_release, 3136 .bind = tipc_bind, 3137 .connect = tipc_connect, 3138 .socketpair = tipc_socketpair, 3139 .accept = sock_no_accept, 3140 .getname = tipc_getname, 3141 .poll = tipc_poll, 3142 .ioctl = tipc_ioctl, 3143 .listen = sock_no_listen, 3144 .shutdown = tipc_shutdown, 3145 .setsockopt = tipc_setsockopt, 3146 .getsockopt = tipc_getsockopt, 3147 .sendmsg = tipc_sendmsg, 3148 .recvmsg = tipc_recvmsg, 3149 .mmap = sock_no_mmap, 3150 .sendpage = sock_no_sendpage 3151 }; 3152 3153 static const struct proto_ops packet_ops = { 3154 .owner = THIS_MODULE, 3155 .family = AF_TIPC, 3156 .release = tipc_release, 3157 .bind = tipc_bind, 3158 .connect = tipc_connect, 3159 .socketpair = tipc_socketpair, 3160 .accept = tipc_accept, 3161 .getname = tipc_getname, 3162 .poll = tipc_poll, 3163 .ioctl = tipc_ioctl, 3164 .listen = tipc_listen, 3165 .shutdown = tipc_shutdown, 3166 .setsockopt = tipc_setsockopt, 3167 .getsockopt = tipc_getsockopt, 3168 .sendmsg = tipc_send_packet, 3169 .recvmsg = tipc_recvmsg, 3170 .mmap = sock_no_mmap, 3171 .sendpage = sock_no_sendpage 3172 }; 3173 3174 static const struct proto_ops stream_ops = { 3175 .owner = THIS_MODULE, 3176 .family = AF_TIPC, 3177 .release = tipc_release, 3178 .bind = tipc_bind, 3179 .connect = tipc_connect, 3180 .socketpair = tipc_socketpair, 3181 .accept = tipc_accept, 3182 .getname = tipc_getname, 3183 .poll = tipc_poll, 3184 .ioctl = tipc_ioctl, 3185 .listen = tipc_listen, 3186 .shutdown = tipc_shutdown, 3187 .setsockopt = tipc_setsockopt, 3188 .getsockopt = tipc_getsockopt, 3189 .sendmsg = tipc_sendstream, 3190 .recvmsg = tipc_recvstream, 3191 .mmap = sock_no_mmap, 3192 .sendpage = sock_no_sendpage 3193 }; 3194 3195 static const struct net_proto_family tipc_family_ops = { 3196 .owner = THIS_MODULE, 3197 .family = AF_TIPC, 3198 .create = tipc_sk_create 3199 }; 3200 3201 static struct proto tipc_proto = { 3202 .name = "TIPC", 3203 .owner = THIS_MODULE, 3204 .obj_size = sizeof(struct tipc_sock), 3205 .sysctl_rmem = sysctl_tipc_rmem 3206 }; 3207 3208 /** 3209 * tipc_socket_init - initialize TIPC socket interface 3210 * 3211 * Returns 0 on success, errno otherwise 3212 */ 3213 int tipc_socket_init(void) 3214 { 3215 int res; 3216 3217 res = proto_register(&tipc_proto, 1); 3218 if (res) { 3219 pr_err("Failed to register TIPC protocol type\n"); 3220 goto out; 3221 } 3222 3223 res = sock_register(&tipc_family_ops); 3224 if (res) { 3225 pr_err("Failed to register TIPC socket type\n"); 3226 proto_unregister(&tipc_proto); 3227 goto out; 3228 } 3229 out: 3230 return res; 3231 } 3232 3233 /** 3234 * tipc_socket_stop - stop TIPC socket interface 3235 */ 3236 void tipc_socket_stop(void) 3237 { 3238 sock_unregister(tipc_family_ops.family); 3239 proto_unregister(&tipc_proto); 3240 } 3241 3242 /* Caller should hold socket lock for the passed tipc socket. */ 3243 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3244 { 3245 u32 peer_node; 3246 u32 peer_port; 3247 struct nlattr *nest; 3248 3249 peer_node = tsk_peer_node(tsk); 3250 peer_port = tsk_peer_port(tsk); 3251 3252 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3253 3254 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3255 goto msg_full; 3256 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3257 goto msg_full; 3258 3259 if (tsk->conn_type != 0) { 3260 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3261 goto msg_full; 3262 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3263 goto msg_full; 3264 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3265 goto msg_full; 3266 } 3267 nla_nest_end(skb, nest); 3268 3269 return 0; 3270 3271 msg_full: 3272 nla_nest_cancel(skb, nest); 3273 3274 return -EMSGSIZE; 3275 } 3276 3277 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock 3278 *tsk) 3279 { 3280 struct net *net = sock_net(skb->sk); 3281 struct sock *sk = &tsk->sk; 3282 3283 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || 3284 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) 3285 return -EMSGSIZE; 3286 3287 if (tipc_sk_connected(sk)) { 3288 if (__tipc_nl_add_sk_con(skb, tsk)) 3289 return -EMSGSIZE; 3290 } else if (!list_empty(&tsk->publications)) { 3291 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3292 return -EMSGSIZE; 3293 } 3294 return 0; 3295 } 3296 3297 /* Caller should hold socket lock for the passed tipc socket. */ 3298 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3299 struct tipc_sock *tsk) 3300 { 3301 struct nlattr *attrs; 3302 void *hdr; 3303 3304 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3305 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3306 if (!hdr) 3307 goto msg_cancel; 3308 3309 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3310 if (!attrs) 3311 goto genlmsg_cancel; 3312 3313 if (__tipc_nl_add_sk_info(skb, tsk)) 3314 goto attr_msg_cancel; 3315 3316 nla_nest_end(skb, attrs); 3317 genlmsg_end(skb, hdr); 3318 3319 return 0; 3320 3321 attr_msg_cancel: 3322 nla_nest_cancel(skb, attrs); 3323 genlmsg_cancel: 3324 genlmsg_cancel(skb, hdr); 3325 msg_cancel: 3326 return -EMSGSIZE; 3327 } 3328 3329 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 3330 int (*skb_handler)(struct sk_buff *skb, 3331 struct netlink_callback *cb, 3332 struct tipc_sock *tsk)) 3333 { 3334 struct rhashtable_iter *iter = (void *)cb->args[4]; 3335 struct tipc_sock *tsk; 3336 int err; 3337 3338 rhashtable_walk_start(iter); 3339 while ((tsk = rhashtable_walk_next(iter)) != NULL) { 3340 if (IS_ERR(tsk)) { 3341 err = PTR_ERR(tsk); 3342 if (err == -EAGAIN) { 3343 err = 0; 3344 continue; 3345 } 3346 break; 3347 } 3348 3349 sock_hold(&tsk->sk); 3350 rhashtable_walk_stop(iter); 3351 lock_sock(&tsk->sk); 3352 err = skb_handler(skb, cb, tsk); 3353 if (err) { 3354 release_sock(&tsk->sk); 3355 sock_put(&tsk->sk); 3356 goto out; 3357 } 3358 release_sock(&tsk->sk); 3359 rhashtable_walk_start(iter); 3360 sock_put(&tsk->sk); 3361 } 3362 rhashtable_walk_stop(iter); 3363 out: 3364 return skb->len; 3365 } 3366 EXPORT_SYMBOL(tipc_nl_sk_walk); 3367 3368 int tipc_dump_start(struct netlink_callback *cb) 3369 { 3370 return __tipc_dump_start(cb, sock_net(cb->skb->sk)); 3371 } 3372 EXPORT_SYMBOL(tipc_dump_start); 3373 3374 int __tipc_dump_start(struct netlink_callback *cb, struct net *net) 3375 { 3376 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */ 3377 struct rhashtable_iter *iter = (void *)cb->args[4]; 3378 struct tipc_net *tn = tipc_net(net); 3379 3380 if (!iter) { 3381 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 3382 if (!iter) 3383 return -ENOMEM; 3384 3385 cb->args[4] = (long)iter; 3386 } 3387 3388 rhashtable_walk_enter(&tn->sk_rht, iter); 3389 return 0; 3390 } 3391 3392 int tipc_dump_done(struct netlink_callback *cb) 3393 { 3394 struct rhashtable_iter *hti = (void *)cb->args[4]; 3395 3396 rhashtable_walk_exit(hti); 3397 kfree(hti); 3398 return 0; 3399 } 3400 EXPORT_SYMBOL(tipc_dump_done); 3401 3402 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3403 struct tipc_sock *tsk, u32 sk_filter_state, 3404 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3405 { 3406 struct sock *sk = &tsk->sk; 3407 struct nlattr *attrs; 3408 struct nlattr *stat; 3409 3410 /*filter response w.r.t sk_state*/ 3411 if (!(sk_filter_state & (1 << sk->sk_state))) 3412 return 0; 3413 3414 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3415 if (!attrs) 3416 goto msg_cancel; 3417 3418 if (__tipc_nl_add_sk_info(skb, tsk)) 3419 goto attr_msg_cancel; 3420 3421 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || 3422 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3423 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3424 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3425 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3426 sock_i_uid(sk))) || 3427 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3428 tipc_diag_gen_cookie(sk), 3429 TIPC_NLA_SOCK_PAD)) 3430 goto attr_msg_cancel; 3431 3432 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); 3433 if (!stat) 3434 goto attr_msg_cancel; 3435 3436 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3437 skb_queue_len(&sk->sk_receive_queue)) || 3438 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3439 skb_queue_len(&sk->sk_write_queue)) || 3440 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3441 atomic_read(&sk->sk_drops))) 3442 goto stat_msg_cancel; 3443 3444 if (tsk->cong_link_cnt && 3445 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) 3446 goto stat_msg_cancel; 3447 3448 if (tsk_conn_cong(tsk) && 3449 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) 3450 goto stat_msg_cancel; 3451 3452 nla_nest_end(skb, stat); 3453 3454 if (tsk->group) 3455 if (tipc_group_fill_sock_diag(tsk->group, skb)) 3456 goto stat_msg_cancel; 3457 3458 nla_nest_end(skb, attrs); 3459 3460 return 0; 3461 3462 stat_msg_cancel: 3463 nla_nest_cancel(skb, stat); 3464 attr_msg_cancel: 3465 nla_nest_cancel(skb, attrs); 3466 msg_cancel: 3467 return -EMSGSIZE; 3468 } 3469 EXPORT_SYMBOL(tipc_sk_fill_sock_diag); 3470 3471 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3472 { 3473 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); 3474 } 3475 3476 /* Caller should hold socket lock for the passed tipc socket. */ 3477 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3478 struct netlink_callback *cb, 3479 struct publication *publ) 3480 { 3481 void *hdr; 3482 struct nlattr *attrs; 3483 3484 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3485 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3486 if (!hdr) 3487 goto msg_cancel; 3488 3489 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 3490 if (!attrs) 3491 goto genlmsg_cancel; 3492 3493 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3494 goto attr_msg_cancel; 3495 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3496 goto attr_msg_cancel; 3497 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3498 goto attr_msg_cancel; 3499 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3500 goto attr_msg_cancel; 3501 3502 nla_nest_end(skb, attrs); 3503 genlmsg_end(skb, hdr); 3504 3505 return 0; 3506 3507 attr_msg_cancel: 3508 nla_nest_cancel(skb, attrs); 3509 genlmsg_cancel: 3510 genlmsg_cancel(skb, hdr); 3511 msg_cancel: 3512 return -EMSGSIZE; 3513 } 3514 3515 /* Caller should hold socket lock for the passed tipc socket. */ 3516 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3517 struct netlink_callback *cb, 3518 struct tipc_sock *tsk, u32 *last_publ) 3519 { 3520 int err; 3521 struct publication *p; 3522 3523 if (*last_publ) { 3524 list_for_each_entry(p, &tsk->publications, binding_sock) { 3525 if (p->key == *last_publ) 3526 break; 3527 } 3528 if (p->key != *last_publ) { 3529 /* We never set seq or call nl_dump_check_consistent() 3530 * this means that setting prev_seq here will cause the 3531 * consistence check to fail in the netlink callback 3532 * handler. Resulting in the last NLMSG_DONE message 3533 * having the NLM_F_DUMP_INTR flag set. 3534 */ 3535 cb->prev_seq = 1; 3536 *last_publ = 0; 3537 return -EPIPE; 3538 } 3539 } else { 3540 p = list_first_entry(&tsk->publications, struct publication, 3541 binding_sock); 3542 } 3543 3544 list_for_each_entry_from(p, &tsk->publications, binding_sock) { 3545 err = __tipc_nl_add_sk_publ(skb, cb, p); 3546 if (err) { 3547 *last_publ = p->key; 3548 return err; 3549 } 3550 } 3551 *last_publ = 0; 3552 3553 return 0; 3554 } 3555 3556 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3557 { 3558 int err; 3559 u32 tsk_portid = cb->args[0]; 3560 u32 last_publ = cb->args[1]; 3561 u32 done = cb->args[2]; 3562 struct net *net = sock_net(skb->sk); 3563 struct tipc_sock *tsk; 3564 3565 if (!tsk_portid) { 3566 struct nlattr **attrs; 3567 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3568 3569 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3570 if (err) 3571 return err; 3572 3573 if (!attrs[TIPC_NLA_SOCK]) 3574 return -EINVAL; 3575 3576 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 3577 attrs[TIPC_NLA_SOCK], 3578 tipc_nl_sock_policy, NULL); 3579 if (err) 3580 return err; 3581 3582 if (!sock[TIPC_NLA_SOCK_REF]) 3583 return -EINVAL; 3584 3585 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3586 } 3587 3588 if (done) 3589 return 0; 3590 3591 tsk = tipc_sk_lookup(net, tsk_portid); 3592 if (!tsk) 3593 return -EINVAL; 3594 3595 lock_sock(&tsk->sk); 3596 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3597 if (!err) 3598 done = 1; 3599 release_sock(&tsk->sk); 3600 sock_put(&tsk->sk); 3601 3602 cb->args[0] = tsk_portid; 3603 cb->args[1] = last_publ; 3604 cb->args[2] = done; 3605 3606 return skb->len; 3607 } 3608 3609 /** 3610 * tipc_sk_filtering - check if a socket should be traced 3611 * @sk: the socket to be examined 3612 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering, 3613 * (portid, sock type, name type, name lower, name upper) 3614 * 3615 * Returns true if the socket meets the socket tuple data 3616 * (value 0 = 'any') or when there is no tuple set (all = 0), 3617 * otherwise false 3618 */ 3619 bool tipc_sk_filtering(struct sock *sk) 3620 { 3621 struct tipc_sock *tsk; 3622 struct publication *p; 3623 u32 _port, _sktype, _type, _lower, _upper; 3624 u32 type = 0, lower = 0, upper = 0; 3625 3626 if (!sk) 3627 return true; 3628 3629 tsk = tipc_sk(sk); 3630 3631 _port = sysctl_tipc_sk_filter[0]; 3632 _sktype = sysctl_tipc_sk_filter[1]; 3633 _type = sysctl_tipc_sk_filter[2]; 3634 _lower = sysctl_tipc_sk_filter[3]; 3635 _upper = sysctl_tipc_sk_filter[4]; 3636 3637 if (!_port && !_sktype && !_type && !_lower && !_upper) 3638 return true; 3639 3640 if (_port) 3641 return (_port == tsk->portid); 3642 3643 if (_sktype && _sktype != sk->sk_type) 3644 return false; 3645 3646 if (tsk->published) { 3647 p = list_first_entry_or_null(&tsk->publications, 3648 struct publication, binding_sock); 3649 if (p) { 3650 type = p->type; 3651 lower = p->lower; 3652 upper = p->upper; 3653 } 3654 } 3655 3656 if (!tipc_sk_type_connectionless(sk)) { 3657 type = tsk->conn_type; 3658 lower = tsk->conn_instance; 3659 upper = tsk->conn_instance; 3660 } 3661 3662 if ((_type && _type != type) || (_lower && _lower != lower) || 3663 (_upper && _upper != upper)) 3664 return false; 3665 3666 return true; 3667 } 3668 3669 u32 tipc_sock_get_portid(struct sock *sk) 3670 { 3671 return (sk) ? (tipc_sk(sk))->portid : 0; 3672 } 3673 3674 /** 3675 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded, 3676 * both the rcv and backlog queues are considered 3677 * @sk: tipc sk to be checked 3678 * @skb: tipc msg to be checked 3679 * 3680 * Returns true if the socket rx queue allocation is > 90%, otherwise false 3681 */ 3682 3683 bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb) 3684 { 3685 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt; 3686 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 3687 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk); 3688 3689 return (qsize > lim * 90 / 100); 3690 } 3691 3692 /** 3693 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded, 3694 * only the rcv queue is considered 3695 * @sk: tipc sk to be checked 3696 * @skb: tipc msg to be checked 3697 * 3698 * Returns true if the socket rx queue allocation is > 90%, otherwise false 3699 */ 3700 3701 bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb) 3702 { 3703 unsigned int lim = rcvbuf_limit(sk, skb); 3704 unsigned int qsize = sk_rmem_alloc_get(sk); 3705 3706 return (qsize > lim * 90 / 100); 3707 } 3708 3709 /** 3710 * tipc_sk_dump - dump TIPC socket 3711 * @sk: tipc sk to be dumped 3712 * @dqueues: bitmask to decide if any socket queue to be dumped? 3713 * - TIPC_DUMP_NONE: don't dump socket queues 3714 * - TIPC_DUMP_SK_SNDQ: dump socket send queue 3715 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue 3716 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue 3717 * - TIPC_DUMP_ALL: dump all the socket queues above 3718 * @buf: returned buffer of dump data in format 3719 */ 3720 int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf) 3721 { 3722 int i = 0; 3723 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN; 3724 struct tipc_sock *tsk; 3725 struct publication *p; 3726 bool tsk_connected; 3727 3728 if (!sk) { 3729 i += scnprintf(buf, sz, "sk data: (null)\n"); 3730 return i; 3731 } 3732 3733 tsk = tipc_sk(sk); 3734 tsk_connected = !tipc_sk_type_connectionless(sk); 3735 3736 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type); 3737 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state); 3738 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk)); 3739 i += scnprintf(buf + i, sz - i, " %u", tsk->portid); 3740 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected); 3741 if (tsk_connected) { 3742 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk)); 3743 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk)); 3744 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type); 3745 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance); 3746 } 3747 i += scnprintf(buf + i, sz - i, " | %u", tsk->published); 3748 if (tsk->published) { 3749 p = list_first_entry_or_null(&tsk->publications, 3750 struct publication, binding_sock); 3751 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0); 3752 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0); 3753 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0); 3754 } 3755 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win); 3756 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win); 3757 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt); 3758 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps); 3759 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt); 3760 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked); 3761 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked); 3762 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt)); 3763 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown); 3764 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk)); 3765 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf); 3766 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk)); 3767 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf); 3768 i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len); 3769 3770 if (dqueues & TIPC_DUMP_SK_SNDQ) { 3771 i += scnprintf(buf + i, sz - i, "sk_write_queue: "); 3772 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i); 3773 } 3774 3775 if (dqueues & TIPC_DUMP_SK_RCVQ) { 3776 i += scnprintf(buf + i, sz - i, "sk_receive_queue: "); 3777 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i); 3778 } 3779 3780 if (dqueues & TIPC_DUMP_SK_BKLGQ) { 3781 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head "); 3782 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i); 3783 if (sk->sk_backlog.tail != sk->sk_backlog.head) { 3784 i += scnprintf(buf + i, sz - i, " tail "); 3785 i += tipc_skb_dump(sk->sk_backlog.tail, false, 3786 buf + i); 3787 } 3788 } 3789 3790 return i; 3791 } 3792