1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 #include "trace.h" 50 51 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 52 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 53 #define TIPC_FWD_MSG 1 54 #define TIPC_MAX_PORT 0xffffffff 55 #define TIPC_MIN_PORT 1 56 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 57 58 enum { 59 TIPC_LISTEN = TCP_LISTEN, 60 TIPC_ESTABLISHED = TCP_ESTABLISHED, 61 TIPC_OPEN = TCP_CLOSE, 62 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 63 TIPC_CONNECTING = TCP_SYN_SENT, 64 }; 65 66 struct sockaddr_pair { 67 struct sockaddr_tipc sock; 68 struct sockaddr_tipc member; 69 }; 70 71 /** 72 * struct tipc_sock - TIPC socket structure 73 * @sk: socket - interacts with 'port' and with user via the socket API 74 * @conn_type: TIPC type used when connection was established 75 * @conn_instance: TIPC instance used when connection was established 76 * @published: non-zero if port has one or more associated names 77 * @max_pkt: maximum packet size "hint" used when building messages sent by port 78 * @portid: unique port identity in TIPC socket hash table 79 * @phdr: preformatted message header used when sending messages 80 * #cong_links: list of congested links 81 * @publications: list of publications for port 82 * @blocking_link: address of the congested link we are currently sleeping on 83 * @pub_count: total # of publications port has made during its lifetime 84 * @conn_timeout: the time we can wait for an unresponded setup request 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 86 * @cong_link_cnt: number of congested links 87 * @snt_unacked: # messages sent by socket, and not yet acked by peer 88 * @rcv_unacked: # messages read by user, but not yet acked back to peer 89 * @peer: 'connected' peer for dgram/rdm 90 * @node: hash table node 91 * @mc_method: cookie for use between socket and broadcast layer 92 * @rcu: rcu struct for tipc_sock 93 */ 94 struct tipc_sock { 95 struct sock sk; 96 u32 conn_type; 97 u32 conn_instance; 98 int published; 99 u32 max_pkt; 100 u32 portid; 101 struct tipc_msg phdr; 102 struct list_head cong_links; 103 struct list_head publications; 104 u32 pub_count; 105 atomic_t dupl_rcvcnt; 106 u16 conn_timeout; 107 bool probe_unacked; 108 u16 cong_link_cnt; 109 u16 snt_unacked; 110 u16 snd_win; 111 u16 peer_caps; 112 u16 rcv_unacked; 113 u16 rcv_win; 114 struct sockaddr_tipc peer; 115 struct rhash_head node; 116 struct tipc_mc_method mc_method; 117 struct rcu_head rcu; 118 struct tipc_group *group; 119 bool group_is_open; 120 }; 121 122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 123 static void tipc_data_ready(struct sock *sk); 124 static void tipc_write_space(struct sock *sk); 125 static void tipc_sock_destruct(struct sock *sk); 126 static int tipc_release(struct socket *sock); 127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 128 bool kern); 129 static void tipc_sk_timeout(struct timer_list *t); 130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 131 struct tipc_name_seq const *seq); 132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 133 struct tipc_name_seq const *seq); 134 static int tipc_sk_leave(struct tipc_sock *tsk); 135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 136 static int tipc_sk_insert(struct tipc_sock *tsk); 137 static void tipc_sk_remove(struct tipc_sock *tsk); 138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 140 141 static const struct proto_ops packet_ops; 142 static const struct proto_ops stream_ops; 143 static const struct proto_ops msg_ops; 144 static struct proto tipc_proto; 145 static const struct rhashtable_params tsk_rht_params; 146 147 static u32 tsk_own_node(struct tipc_sock *tsk) 148 { 149 return msg_prevnode(&tsk->phdr); 150 } 151 152 static u32 tsk_peer_node(struct tipc_sock *tsk) 153 { 154 return msg_destnode(&tsk->phdr); 155 } 156 157 static u32 tsk_peer_port(struct tipc_sock *tsk) 158 { 159 return msg_destport(&tsk->phdr); 160 } 161 162 static bool tsk_unreliable(struct tipc_sock *tsk) 163 { 164 return msg_src_droppable(&tsk->phdr) != 0; 165 } 166 167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 168 { 169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 170 } 171 172 static bool tsk_unreturnable(struct tipc_sock *tsk) 173 { 174 return msg_dest_droppable(&tsk->phdr) != 0; 175 } 176 177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 178 { 179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 180 } 181 182 static int tsk_importance(struct tipc_sock *tsk) 183 { 184 return msg_importance(&tsk->phdr); 185 } 186 187 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 188 { 189 if (imp > TIPC_CRITICAL_IMPORTANCE) 190 return -EINVAL; 191 msg_set_importance(&tsk->phdr, (u32)imp); 192 return 0; 193 } 194 195 static struct tipc_sock *tipc_sk(const struct sock *sk) 196 { 197 return container_of(sk, struct tipc_sock, sk); 198 } 199 200 static bool tsk_conn_cong(struct tipc_sock *tsk) 201 { 202 return tsk->snt_unacked > tsk->snd_win; 203 } 204 205 static u16 tsk_blocks(int len) 206 { 207 return ((len / FLOWCTL_BLK_SZ) + 1); 208 } 209 210 /* tsk_blocks(): translate a buffer size in bytes to number of 211 * advertisable blocks, taking into account the ratio truesize(len)/len 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 213 */ 214 static u16 tsk_adv_blocks(int len) 215 { 216 return len / FLOWCTL_BLK_SZ / 4; 217 } 218 219 /* tsk_inc(): increment counter for sent or received data 220 * - If block based flow control is not supported by peer we 221 * fall back to message based ditto, incrementing the counter 222 */ 223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 224 { 225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 226 return ((msglen / FLOWCTL_BLK_SZ) + 1); 227 return 1; 228 } 229 230 /** 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue 232 * 233 * Caller must hold socket lock 234 */ 235 static void tsk_advance_rx_queue(struct sock *sk) 236 { 237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " "); 238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 239 } 240 241 /* tipc_sk_respond() : send response message back to sender 242 */ 243 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 244 { 245 u32 selector; 246 u32 dnode; 247 u32 onode = tipc_own_addr(sock_net(sk)); 248 249 if (!tipc_msg_reverse(onode, &skb, err)) 250 return; 251 252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!"); 253 dnode = msg_destnode(buf_msg(skb)); 254 selector = msg_origport(buf_msg(skb)); 255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 256 } 257 258 /** 259 * tsk_rej_rx_queue - reject all buffers in socket receive queue 260 * 261 * Caller must hold socket lock 262 */ 263 static void tsk_rej_rx_queue(struct sock *sk) 264 { 265 struct sk_buff *skb; 266 267 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 268 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 269 } 270 271 static bool tipc_sk_connected(struct sock *sk) 272 { 273 return sk->sk_state == TIPC_ESTABLISHED; 274 } 275 276 /* tipc_sk_type_connectionless - check if the socket is datagram socket 277 * @sk: socket 278 * 279 * Returns true if connection less, false otherwise 280 */ 281 static bool tipc_sk_type_connectionless(struct sock *sk) 282 { 283 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 284 } 285 286 /* tsk_peer_msg - verify if message was sent by connected port's peer 287 * 288 * Handles cases where the node's network address has changed from 289 * the default of <0.0.0> to its configured setting. 290 */ 291 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 292 { 293 struct sock *sk = &tsk->sk; 294 u32 self = tipc_own_addr(sock_net(sk)); 295 u32 peer_port = tsk_peer_port(tsk); 296 u32 orig_node, peer_node; 297 298 if (unlikely(!tipc_sk_connected(sk))) 299 return false; 300 301 if (unlikely(msg_origport(msg) != peer_port)) 302 return false; 303 304 orig_node = msg_orignode(msg); 305 peer_node = tsk_peer_node(tsk); 306 307 if (likely(orig_node == peer_node)) 308 return true; 309 310 if (!orig_node && peer_node == self) 311 return true; 312 313 if (!peer_node && orig_node == self) 314 return true; 315 316 return false; 317 } 318 319 /* tipc_set_sk_state - set the sk_state of the socket 320 * @sk: socket 321 * 322 * Caller must hold socket lock 323 * 324 * Returns 0 on success, errno otherwise 325 */ 326 static int tipc_set_sk_state(struct sock *sk, int state) 327 { 328 int oldsk_state = sk->sk_state; 329 int res = -EINVAL; 330 331 switch (state) { 332 case TIPC_OPEN: 333 res = 0; 334 break; 335 case TIPC_LISTEN: 336 case TIPC_CONNECTING: 337 if (oldsk_state == TIPC_OPEN) 338 res = 0; 339 break; 340 case TIPC_ESTABLISHED: 341 if (oldsk_state == TIPC_CONNECTING || 342 oldsk_state == TIPC_OPEN) 343 res = 0; 344 break; 345 case TIPC_DISCONNECTING: 346 if (oldsk_state == TIPC_CONNECTING || 347 oldsk_state == TIPC_ESTABLISHED) 348 res = 0; 349 break; 350 } 351 352 if (!res) 353 sk->sk_state = state; 354 355 return res; 356 } 357 358 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 359 { 360 struct sock *sk = sock->sk; 361 int err = sock_error(sk); 362 int typ = sock->type; 363 364 if (err) 365 return err; 366 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 367 if (sk->sk_state == TIPC_DISCONNECTING) 368 return -EPIPE; 369 else if (!tipc_sk_connected(sk)) 370 return -ENOTCONN; 371 } 372 if (!*timeout) 373 return -EAGAIN; 374 if (signal_pending(current)) 375 return sock_intr_errno(*timeout); 376 377 return 0; 378 } 379 380 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 381 ({ \ 382 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 383 struct sock *sk_; \ 384 int rc_; \ 385 \ 386 while ((rc_ = !(condition_))) { \ 387 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \ 388 smp_rmb(); \ 389 sk_ = (sock_)->sk; \ 390 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 391 if (rc_) \ 392 break; \ 393 add_wait_queue(sk_sleep(sk_), &wait_); \ 394 release_sock(sk_); \ 395 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 396 sched_annotate_sleep(); \ 397 lock_sock(sk_); \ 398 remove_wait_queue(sk_sleep(sk_), &wait_); \ 399 } \ 400 rc_; \ 401 }) 402 403 /** 404 * tipc_sk_create - create a TIPC socket 405 * @net: network namespace (must be default network) 406 * @sock: pre-allocated socket structure 407 * @protocol: protocol indicator (must be 0) 408 * @kern: caused by kernel or by userspace? 409 * 410 * This routine creates additional data structures used by the TIPC socket, 411 * initializes them, and links them together. 412 * 413 * Returns 0 on success, errno otherwise 414 */ 415 static int tipc_sk_create(struct net *net, struct socket *sock, 416 int protocol, int kern) 417 { 418 const struct proto_ops *ops; 419 struct sock *sk; 420 struct tipc_sock *tsk; 421 struct tipc_msg *msg; 422 423 /* Validate arguments */ 424 if (unlikely(protocol != 0)) 425 return -EPROTONOSUPPORT; 426 427 switch (sock->type) { 428 case SOCK_STREAM: 429 ops = &stream_ops; 430 break; 431 case SOCK_SEQPACKET: 432 ops = &packet_ops; 433 break; 434 case SOCK_DGRAM: 435 case SOCK_RDM: 436 ops = &msg_ops; 437 break; 438 default: 439 return -EPROTOTYPE; 440 } 441 442 /* Allocate socket's protocol area */ 443 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 444 if (sk == NULL) 445 return -ENOMEM; 446 447 tsk = tipc_sk(sk); 448 tsk->max_pkt = MAX_PKT_DEFAULT; 449 INIT_LIST_HEAD(&tsk->publications); 450 INIT_LIST_HEAD(&tsk->cong_links); 451 msg = &tsk->phdr; 452 453 /* Finish initializing socket data structures */ 454 sock->ops = ops; 455 sock_init_data(sock, sk); 456 tipc_set_sk_state(sk, TIPC_OPEN); 457 if (tipc_sk_insert(tsk)) { 458 pr_warn("Socket create failed; port number exhausted\n"); 459 return -EINVAL; 460 } 461 462 /* Ensure tsk is visible before we read own_addr. */ 463 smp_mb(); 464 465 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, 466 TIPC_NAMED_MSG, NAMED_H_SIZE, 0); 467 468 msg_set_origport(msg, tsk->portid); 469 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 470 sk->sk_shutdown = 0; 471 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 472 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 473 sk->sk_data_ready = tipc_data_ready; 474 sk->sk_write_space = tipc_write_space; 475 sk->sk_destruct = tipc_sock_destruct; 476 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 477 tsk->group_is_open = true; 478 atomic_set(&tsk->dupl_rcvcnt, 0); 479 480 /* Start out with safe limits until we receive an advertised window */ 481 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 482 tsk->rcv_win = tsk->snd_win; 483 484 if (tipc_sk_type_connectionless(sk)) { 485 tsk_set_unreturnable(tsk, true); 486 if (sock->type == SOCK_DGRAM) 487 tsk_set_unreliable(tsk, true); 488 } 489 __skb_queue_head_init(&tsk->mc_method.deferredq); 490 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " "); 491 return 0; 492 } 493 494 static void tipc_sk_callback(struct rcu_head *head) 495 { 496 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 497 498 sock_put(&tsk->sk); 499 } 500 501 /* Caller should hold socket lock for the socket. */ 502 static void __tipc_shutdown(struct socket *sock, int error) 503 { 504 struct sock *sk = sock->sk; 505 struct tipc_sock *tsk = tipc_sk(sk); 506 struct net *net = sock_net(sk); 507 long timeout = CONN_TIMEOUT_DEFAULT; 508 u32 dnode = tsk_peer_node(tsk); 509 struct sk_buff *skb; 510 511 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 512 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 513 !tsk_conn_cong(tsk))); 514 515 /* Remove any pending SYN message */ 516 __skb_queue_purge(&sk->sk_write_queue); 517 518 /* Reject all unreceived messages, except on an active connection 519 * (which disconnects locally & sends a 'FIN+' to peer). 520 */ 521 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 522 if (TIPC_SKB_CB(skb)->bytes_read) { 523 kfree_skb(skb); 524 continue; 525 } 526 if (!tipc_sk_type_connectionless(sk) && 527 sk->sk_state != TIPC_DISCONNECTING) { 528 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 529 tipc_node_remove_conn(net, dnode, tsk->portid); 530 } 531 tipc_sk_respond(sk, skb, error); 532 } 533 534 if (tipc_sk_type_connectionless(sk)) 535 return; 536 537 if (sk->sk_state != TIPC_DISCONNECTING) { 538 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 539 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 540 tsk_own_node(tsk), tsk_peer_port(tsk), 541 tsk->portid, error); 542 if (skb) 543 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 544 tipc_node_remove_conn(net, dnode, tsk->portid); 545 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 546 } 547 } 548 549 /** 550 * tipc_release - destroy a TIPC socket 551 * @sock: socket to destroy 552 * 553 * This routine cleans up any messages that are still queued on the socket. 554 * For DGRAM and RDM socket types, all queued messages are rejected. 555 * For SEQPACKET and STREAM socket types, the first message is rejected 556 * and any others are discarded. (If the first message on a STREAM socket 557 * is partially-read, it is discarded and the next one is rejected instead.) 558 * 559 * NOTE: Rejected messages are not necessarily returned to the sender! They 560 * are returned or discarded according to the "destination droppable" setting 561 * specified for the message by the sender. 562 * 563 * Returns 0 on success, errno otherwise 564 */ 565 static int tipc_release(struct socket *sock) 566 { 567 struct sock *sk = sock->sk; 568 struct tipc_sock *tsk; 569 570 /* 571 * Exit if socket isn't fully initialized (occurs when a failed accept() 572 * releases a pre-allocated child socket that was never used) 573 */ 574 if (sk == NULL) 575 return 0; 576 577 tsk = tipc_sk(sk); 578 lock_sock(sk); 579 580 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " "); 581 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 582 sk->sk_shutdown = SHUTDOWN_MASK; 583 tipc_sk_leave(tsk); 584 tipc_sk_withdraw(tsk, 0, NULL); 585 __skb_queue_purge(&tsk->mc_method.deferredq); 586 sk_stop_timer(sk, &sk->sk_timer); 587 tipc_sk_remove(tsk); 588 589 sock_orphan(sk); 590 /* Reject any messages that accumulated in backlog queue */ 591 release_sock(sk); 592 tipc_dest_list_purge(&tsk->cong_links); 593 tsk->cong_link_cnt = 0; 594 call_rcu(&tsk->rcu, tipc_sk_callback); 595 sock->sk = NULL; 596 597 return 0; 598 } 599 600 /** 601 * tipc_bind - associate or disassocate TIPC name(s) with a socket 602 * @sock: socket structure 603 * @uaddr: socket address describing name(s) and desired operation 604 * @uaddr_len: size of socket address data structure 605 * 606 * Name and name sequence binding is indicated using a positive scope value; 607 * a negative scope value unbinds the specified name. Specifying no name 608 * (i.e. a socket address length of 0) unbinds all names from the socket. 609 * 610 * Returns 0 on success, errno otherwise 611 * 612 * NOTE: This routine doesn't need to take the socket lock since it doesn't 613 * access any non-constant socket information. 614 */ 615 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 616 int uaddr_len) 617 { 618 struct sock *sk = sock->sk; 619 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 620 struct tipc_sock *tsk = tipc_sk(sk); 621 int res = -EINVAL; 622 623 lock_sock(sk); 624 if (unlikely(!uaddr_len)) { 625 res = tipc_sk_withdraw(tsk, 0, NULL); 626 goto exit; 627 } 628 if (tsk->group) { 629 res = -EACCES; 630 goto exit; 631 } 632 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 633 res = -EINVAL; 634 goto exit; 635 } 636 if (addr->family != AF_TIPC) { 637 res = -EAFNOSUPPORT; 638 goto exit; 639 } 640 641 if (addr->addrtype == TIPC_ADDR_NAME) 642 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 643 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 644 res = -EAFNOSUPPORT; 645 goto exit; 646 } 647 648 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 649 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 650 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 651 res = -EACCES; 652 goto exit; 653 } 654 655 res = (addr->scope >= 0) ? 656 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 657 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 658 exit: 659 release_sock(sk); 660 return res; 661 } 662 663 /** 664 * tipc_getname - get port ID of socket or peer socket 665 * @sock: socket structure 666 * @uaddr: area for returned socket address 667 * @uaddr_len: area for returned length of socket address 668 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 669 * 670 * Returns 0 on success, errno otherwise 671 * 672 * NOTE: This routine doesn't need to take the socket lock since it only 673 * accesses socket information that is unchanging (or which changes in 674 * a completely predictable manner). 675 */ 676 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 677 int peer) 678 { 679 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 680 struct sock *sk = sock->sk; 681 struct tipc_sock *tsk = tipc_sk(sk); 682 683 memset(addr, 0, sizeof(*addr)); 684 if (peer) { 685 if ((!tipc_sk_connected(sk)) && 686 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 687 return -ENOTCONN; 688 addr->addr.id.ref = tsk_peer_port(tsk); 689 addr->addr.id.node = tsk_peer_node(tsk); 690 } else { 691 addr->addr.id.ref = tsk->portid; 692 addr->addr.id.node = tipc_own_addr(sock_net(sk)); 693 } 694 695 addr->addrtype = TIPC_ADDR_ID; 696 addr->family = AF_TIPC; 697 addr->scope = 0; 698 addr->addr.name.domain = 0; 699 700 return sizeof(*addr); 701 } 702 703 /** 704 * tipc_poll - read and possibly block on pollmask 705 * @file: file structure associated with the socket 706 * @sock: socket for which to calculate the poll bits 707 * @wait: ??? 708 * 709 * Returns pollmask value 710 * 711 * COMMENTARY: 712 * It appears that the usual socket locking mechanisms are not useful here 713 * since the pollmask info is potentially out-of-date the moment this routine 714 * exits. TCP and other protocols seem to rely on higher level poll routines 715 * to handle any preventable race conditions, so TIPC will do the same ... 716 * 717 * IMPORTANT: The fact that a read or write operation is indicated does NOT 718 * imply that the operation will succeed, merely that it should be performed 719 * and will not block. 720 */ 721 static __poll_t tipc_poll(struct file *file, struct socket *sock, 722 poll_table *wait) 723 { 724 struct sock *sk = sock->sk; 725 struct tipc_sock *tsk = tipc_sk(sk); 726 __poll_t revents = 0; 727 728 sock_poll_wait(file, sock, wait); 729 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " "); 730 731 if (sk->sk_shutdown & RCV_SHUTDOWN) 732 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 733 if (sk->sk_shutdown == SHUTDOWN_MASK) 734 revents |= EPOLLHUP; 735 736 switch (sk->sk_state) { 737 case TIPC_ESTABLISHED: 738 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 739 revents |= EPOLLOUT; 740 /* fall through */ 741 case TIPC_LISTEN: 742 case TIPC_CONNECTING: 743 if (!skb_queue_empty(&sk->sk_receive_queue)) 744 revents |= EPOLLIN | EPOLLRDNORM; 745 break; 746 case TIPC_OPEN: 747 if (tsk->group_is_open && !tsk->cong_link_cnt) 748 revents |= EPOLLOUT; 749 if (!tipc_sk_type_connectionless(sk)) 750 break; 751 if (skb_queue_empty(&sk->sk_receive_queue)) 752 break; 753 revents |= EPOLLIN | EPOLLRDNORM; 754 break; 755 case TIPC_DISCONNECTING: 756 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 757 break; 758 } 759 return revents; 760 } 761 762 /** 763 * tipc_sendmcast - send multicast message 764 * @sock: socket structure 765 * @seq: destination address 766 * @msg: message to send 767 * @dlen: length of data to send 768 * @timeout: timeout to wait for wakeup 769 * 770 * Called from function tipc_sendmsg(), which has done all sanity checks 771 * Returns the number of bytes sent on success, or errno 772 */ 773 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 774 struct msghdr *msg, size_t dlen, long timeout) 775 { 776 struct sock *sk = sock->sk; 777 struct tipc_sock *tsk = tipc_sk(sk); 778 struct tipc_msg *hdr = &tsk->phdr; 779 struct net *net = sock_net(sk); 780 int mtu = tipc_bcast_get_mtu(net); 781 struct tipc_mc_method *method = &tsk->mc_method; 782 struct sk_buff_head pkts; 783 struct tipc_nlist dsts; 784 int rc; 785 786 if (tsk->group) 787 return -EACCES; 788 789 /* Block or return if any destination link is congested */ 790 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 791 if (unlikely(rc)) 792 return rc; 793 794 /* Lookup destination nodes */ 795 tipc_nlist_init(&dsts, tipc_own_addr(net)); 796 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 797 seq->upper, &dsts); 798 if (!dsts.local && !dsts.remote) 799 return -EHOSTUNREACH; 800 801 /* Build message header */ 802 msg_set_type(hdr, TIPC_MCAST_MSG); 803 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 804 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 805 msg_set_destport(hdr, 0); 806 msg_set_destnode(hdr, 0); 807 msg_set_nametype(hdr, seq->type); 808 msg_set_namelower(hdr, seq->lower); 809 msg_set_nameupper(hdr, seq->upper); 810 811 /* Build message as chain of buffers */ 812 __skb_queue_head_init(&pkts); 813 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 814 815 /* Send message if build was successful */ 816 if (unlikely(rc == dlen)) { 817 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts), 818 TIPC_DUMP_SK_SNDQ, " "); 819 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 820 &tsk->cong_link_cnt); 821 } 822 823 tipc_nlist_purge(&dsts); 824 825 return rc ? rc : dlen; 826 } 827 828 /** 829 * tipc_send_group_msg - send a message to a member in the group 830 * @net: network namespace 831 * @m: message to send 832 * @mb: group member 833 * @dnode: destination node 834 * @dport: destination port 835 * @dlen: total length of message data 836 */ 837 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 838 struct msghdr *m, struct tipc_member *mb, 839 u32 dnode, u32 dport, int dlen) 840 { 841 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 842 struct tipc_mc_method *method = &tsk->mc_method; 843 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 844 struct tipc_msg *hdr = &tsk->phdr; 845 struct sk_buff_head pkts; 846 int mtu, rc; 847 848 /* Complete message header */ 849 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 850 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 851 msg_set_destport(hdr, dport); 852 msg_set_destnode(hdr, dnode); 853 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 854 855 /* Build message as chain of buffers */ 856 __skb_queue_head_init(&pkts); 857 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 858 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 859 if (unlikely(rc != dlen)) 860 return rc; 861 862 /* Send message */ 863 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 864 if (unlikely(rc == -ELINKCONG)) { 865 tipc_dest_push(&tsk->cong_links, dnode, 0); 866 tsk->cong_link_cnt++; 867 } 868 869 /* Update send window */ 870 tipc_group_update_member(mb, blks); 871 872 /* A broadcast sent within next EXPIRE period must follow same path */ 873 method->rcast = true; 874 method->mandatory = true; 875 return dlen; 876 } 877 878 /** 879 * tipc_send_group_unicast - send message to a member in the group 880 * @sock: socket structure 881 * @m: message to send 882 * @dlen: total length of message data 883 * @timeout: timeout to wait for wakeup 884 * 885 * Called from function tipc_sendmsg(), which has done all sanity checks 886 * Returns the number of bytes sent on success, or errno 887 */ 888 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 889 int dlen, long timeout) 890 { 891 struct sock *sk = sock->sk; 892 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 893 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 894 struct tipc_sock *tsk = tipc_sk(sk); 895 struct net *net = sock_net(sk); 896 struct tipc_member *mb = NULL; 897 u32 node, port; 898 int rc; 899 900 node = dest->addr.id.node; 901 port = dest->addr.id.ref; 902 if (!port && !node) 903 return -EHOSTUNREACH; 904 905 /* Block or return if destination link or member is congested */ 906 rc = tipc_wait_for_cond(sock, &timeout, 907 !tipc_dest_find(&tsk->cong_links, node, 0) && 908 tsk->group && 909 !tipc_group_cong(tsk->group, node, port, blks, 910 &mb)); 911 if (unlikely(rc)) 912 return rc; 913 914 if (unlikely(!mb)) 915 return -EHOSTUNREACH; 916 917 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 918 919 return rc ? rc : dlen; 920 } 921 922 /** 923 * tipc_send_group_anycast - send message to any member with given identity 924 * @sock: socket structure 925 * @m: message to send 926 * @dlen: total length of message data 927 * @timeout: timeout to wait for wakeup 928 * 929 * Called from function tipc_sendmsg(), which has done all sanity checks 930 * Returns the number of bytes sent on success, or errno 931 */ 932 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 933 int dlen, long timeout) 934 { 935 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 936 struct sock *sk = sock->sk; 937 struct tipc_sock *tsk = tipc_sk(sk); 938 struct list_head *cong_links = &tsk->cong_links; 939 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 940 struct tipc_msg *hdr = &tsk->phdr; 941 struct tipc_member *first = NULL; 942 struct tipc_member *mbr = NULL; 943 struct net *net = sock_net(sk); 944 u32 node, port, exclude; 945 struct list_head dsts; 946 u32 type, inst, scope; 947 int lookups = 0; 948 int dstcnt, rc; 949 bool cong; 950 951 INIT_LIST_HEAD(&dsts); 952 953 type = msg_nametype(hdr); 954 inst = dest->addr.name.name.instance; 955 scope = msg_lookup_scope(hdr); 956 957 while (++lookups < 4) { 958 exclude = tipc_group_exclude(tsk->group); 959 960 first = NULL; 961 962 /* Look for a non-congested destination member, if any */ 963 while (1) { 964 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 965 &dstcnt, exclude, false)) 966 return -EHOSTUNREACH; 967 tipc_dest_pop(&dsts, &node, &port); 968 cong = tipc_group_cong(tsk->group, node, port, blks, 969 &mbr); 970 if (!cong) 971 break; 972 if (mbr == first) 973 break; 974 if (!first) 975 first = mbr; 976 } 977 978 /* Start over if destination was not in member list */ 979 if (unlikely(!mbr)) 980 continue; 981 982 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 983 break; 984 985 /* Block or return if destination link or member is congested */ 986 rc = tipc_wait_for_cond(sock, &timeout, 987 !tipc_dest_find(cong_links, node, 0) && 988 tsk->group && 989 !tipc_group_cong(tsk->group, node, port, 990 blks, &mbr)); 991 if (unlikely(rc)) 992 return rc; 993 994 /* Send, unless destination disappeared while waiting */ 995 if (likely(mbr)) 996 break; 997 } 998 999 if (unlikely(lookups >= 4)) 1000 return -EHOSTUNREACH; 1001 1002 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 1003 1004 return rc ? rc : dlen; 1005 } 1006 1007 /** 1008 * tipc_send_group_bcast - send message to all members in communication group 1009 * @sk: socket structure 1010 * @m: message to send 1011 * @dlen: total length of message data 1012 * @timeout: timeout to wait for wakeup 1013 * 1014 * Called from function tipc_sendmsg(), which has done all sanity checks 1015 * Returns the number of bytes sent on success, or errno 1016 */ 1017 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 1018 int dlen, long timeout) 1019 { 1020 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1021 struct sock *sk = sock->sk; 1022 struct net *net = sock_net(sk); 1023 struct tipc_sock *tsk = tipc_sk(sk); 1024 struct tipc_nlist *dsts; 1025 struct tipc_mc_method *method = &tsk->mc_method; 1026 bool ack = method->mandatory && method->rcast; 1027 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1028 struct tipc_msg *hdr = &tsk->phdr; 1029 int mtu = tipc_bcast_get_mtu(net); 1030 struct sk_buff_head pkts; 1031 int rc = -EHOSTUNREACH; 1032 1033 /* Block or return if any destination link or member is congested */ 1034 rc = tipc_wait_for_cond(sock, &timeout, 1035 !tsk->cong_link_cnt && tsk->group && 1036 !tipc_group_bc_cong(tsk->group, blks)); 1037 if (unlikely(rc)) 1038 return rc; 1039 1040 dsts = tipc_group_dests(tsk->group); 1041 if (!dsts->local && !dsts->remote) 1042 return -EHOSTUNREACH; 1043 1044 /* Complete message header */ 1045 if (dest) { 1046 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1047 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1048 } else { 1049 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1050 msg_set_nameinst(hdr, 0); 1051 } 1052 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1053 msg_set_destport(hdr, 0); 1054 msg_set_destnode(hdr, 0); 1055 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group)); 1056 1057 /* Avoid getting stuck with repeated forced replicasts */ 1058 msg_set_grp_bc_ack_req(hdr, ack); 1059 1060 /* Build message as chain of buffers */ 1061 __skb_queue_head_init(&pkts); 1062 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1063 if (unlikely(rc != dlen)) 1064 return rc; 1065 1066 /* Send message */ 1067 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1068 if (unlikely(rc)) 1069 return rc; 1070 1071 /* Update broadcast sequence number and send windows */ 1072 tipc_group_update_bc_members(tsk->group, blks, ack); 1073 1074 /* Broadcast link is now free to choose method for next broadcast */ 1075 method->mandatory = false; 1076 method->expires = jiffies; 1077 1078 return dlen; 1079 } 1080 1081 /** 1082 * tipc_send_group_mcast - send message to all members with given identity 1083 * @sock: socket structure 1084 * @m: message to send 1085 * @dlen: total length of message data 1086 * @timeout: timeout to wait for wakeup 1087 * 1088 * Called from function tipc_sendmsg(), which has done all sanity checks 1089 * Returns the number of bytes sent on success, or errno 1090 */ 1091 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1092 int dlen, long timeout) 1093 { 1094 struct sock *sk = sock->sk; 1095 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1096 struct tipc_sock *tsk = tipc_sk(sk); 1097 struct tipc_group *grp = tsk->group; 1098 struct tipc_msg *hdr = &tsk->phdr; 1099 struct net *net = sock_net(sk); 1100 u32 type, inst, scope, exclude; 1101 struct list_head dsts; 1102 u32 dstcnt; 1103 1104 INIT_LIST_HEAD(&dsts); 1105 1106 type = msg_nametype(hdr); 1107 inst = dest->addr.name.name.instance; 1108 scope = msg_lookup_scope(hdr); 1109 exclude = tipc_group_exclude(grp); 1110 1111 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1112 &dstcnt, exclude, true)) 1113 return -EHOSTUNREACH; 1114 1115 if (dstcnt == 1) { 1116 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1117 return tipc_send_group_unicast(sock, m, dlen, timeout); 1118 } 1119 1120 tipc_dest_list_purge(&dsts); 1121 return tipc_send_group_bcast(sock, m, dlen, timeout); 1122 } 1123 1124 /** 1125 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1126 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1127 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1128 * 1129 * Multi-threaded: parallel calls with reference to same queues may occur 1130 */ 1131 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1132 struct sk_buff_head *inputq) 1133 { 1134 u32 self = tipc_own_addr(net); 1135 u32 type, lower, upper, scope; 1136 struct sk_buff *skb, *_skb; 1137 u32 portid, onode; 1138 struct sk_buff_head tmpq; 1139 struct list_head dports; 1140 struct tipc_msg *hdr; 1141 int user, mtyp, hlen; 1142 bool exact; 1143 1144 __skb_queue_head_init(&tmpq); 1145 INIT_LIST_HEAD(&dports); 1146 1147 skb = tipc_skb_peek(arrvq, &inputq->lock); 1148 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1149 hdr = buf_msg(skb); 1150 user = msg_user(hdr); 1151 mtyp = msg_type(hdr); 1152 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1153 onode = msg_orignode(hdr); 1154 type = msg_nametype(hdr); 1155 1156 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1157 spin_lock_bh(&inputq->lock); 1158 if (skb_peek(arrvq) == skb) { 1159 __skb_dequeue(arrvq); 1160 __skb_queue_tail(inputq, skb); 1161 } 1162 kfree_skb(skb); 1163 spin_unlock_bh(&inputq->lock); 1164 continue; 1165 } 1166 1167 /* Group messages require exact scope match */ 1168 if (msg_in_group(hdr)) { 1169 lower = 0; 1170 upper = ~0; 1171 scope = msg_lookup_scope(hdr); 1172 exact = true; 1173 } else { 1174 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1175 if (onode == self) 1176 scope = TIPC_NODE_SCOPE; 1177 else 1178 scope = TIPC_CLUSTER_SCOPE; 1179 exact = false; 1180 lower = msg_namelower(hdr); 1181 upper = msg_nameupper(hdr); 1182 } 1183 1184 /* Create destination port list: */ 1185 tipc_nametbl_mc_lookup(net, type, lower, upper, 1186 scope, exact, &dports); 1187 1188 /* Clone message per destination */ 1189 while (tipc_dest_pop(&dports, NULL, &portid)) { 1190 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1191 if (_skb) { 1192 msg_set_destport(buf_msg(_skb), portid); 1193 __skb_queue_tail(&tmpq, _skb); 1194 continue; 1195 } 1196 pr_warn("Failed to clone mcast rcv buffer\n"); 1197 } 1198 /* Append to inputq if not already done by other thread */ 1199 spin_lock_bh(&inputq->lock); 1200 if (skb_peek(arrvq) == skb) { 1201 skb_queue_splice_tail_init(&tmpq, inputq); 1202 kfree_skb(__skb_dequeue(arrvq)); 1203 } 1204 spin_unlock_bh(&inputq->lock); 1205 __skb_queue_purge(&tmpq); 1206 kfree_skb(skb); 1207 } 1208 tipc_sk_rcv(net, inputq); 1209 } 1210 1211 /** 1212 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1213 * @tsk: receiving socket 1214 * @skb: pointer to message buffer. 1215 */ 1216 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1217 struct sk_buff_head *inputq, 1218 struct sk_buff_head *xmitq) 1219 { 1220 struct tipc_msg *hdr = buf_msg(skb); 1221 u32 onode = tsk_own_node(tsk); 1222 struct sock *sk = &tsk->sk; 1223 int mtyp = msg_type(hdr); 1224 bool conn_cong; 1225 1226 /* Ignore if connection cannot be validated: */ 1227 if (!tsk_peer_msg(tsk, hdr)) { 1228 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!"); 1229 goto exit; 1230 } 1231 1232 if (unlikely(msg_errcode(hdr))) { 1233 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1234 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1235 tsk_peer_port(tsk)); 1236 sk->sk_state_change(sk); 1237 1238 /* State change is ignored if socket already awake, 1239 * - convert msg to abort msg and add to inqueue 1240 */ 1241 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE); 1242 msg_set_type(hdr, TIPC_CONN_MSG); 1243 msg_set_size(hdr, BASIC_H_SIZE); 1244 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1245 __skb_queue_tail(inputq, skb); 1246 return; 1247 } 1248 1249 tsk->probe_unacked = false; 1250 1251 if (mtyp == CONN_PROBE) { 1252 msg_set_type(hdr, CONN_PROBE_REPLY); 1253 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1254 __skb_queue_tail(xmitq, skb); 1255 return; 1256 } else if (mtyp == CONN_ACK) { 1257 conn_cong = tsk_conn_cong(tsk); 1258 tsk->snt_unacked -= msg_conn_ack(hdr); 1259 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1260 tsk->snd_win = msg_adv_win(hdr); 1261 if (conn_cong) 1262 sk->sk_write_space(sk); 1263 } else if (mtyp != CONN_PROBE_REPLY) { 1264 pr_warn("Received unknown CONN_PROTO msg\n"); 1265 } 1266 exit: 1267 kfree_skb(skb); 1268 } 1269 1270 /** 1271 * tipc_sendmsg - send message in connectionless manner 1272 * @sock: socket structure 1273 * @m: message to send 1274 * @dsz: amount of user data to be sent 1275 * 1276 * Message must have an destination specified explicitly. 1277 * Used for SOCK_RDM and SOCK_DGRAM messages, 1278 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1279 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1280 * 1281 * Returns the number of bytes sent on success, or errno otherwise 1282 */ 1283 static int tipc_sendmsg(struct socket *sock, 1284 struct msghdr *m, size_t dsz) 1285 { 1286 struct sock *sk = sock->sk; 1287 int ret; 1288 1289 lock_sock(sk); 1290 ret = __tipc_sendmsg(sock, m, dsz); 1291 release_sock(sk); 1292 1293 return ret; 1294 } 1295 1296 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1297 { 1298 struct sock *sk = sock->sk; 1299 struct net *net = sock_net(sk); 1300 struct tipc_sock *tsk = tipc_sk(sk); 1301 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1302 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1303 struct list_head *clinks = &tsk->cong_links; 1304 bool syn = !tipc_sk_type_connectionless(sk); 1305 struct tipc_group *grp = tsk->group; 1306 struct tipc_msg *hdr = &tsk->phdr; 1307 struct tipc_name_seq *seq; 1308 struct sk_buff_head pkts; 1309 u32 dport, dnode = 0; 1310 u32 type, inst; 1311 int mtu, rc; 1312 1313 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1314 return -EMSGSIZE; 1315 1316 if (likely(dest)) { 1317 if (unlikely(m->msg_namelen < sizeof(*dest))) 1318 return -EINVAL; 1319 if (unlikely(dest->family != AF_TIPC)) 1320 return -EINVAL; 1321 } 1322 1323 if (grp) { 1324 if (!dest) 1325 return tipc_send_group_bcast(sock, m, dlen, timeout); 1326 if (dest->addrtype == TIPC_ADDR_NAME) 1327 return tipc_send_group_anycast(sock, m, dlen, timeout); 1328 if (dest->addrtype == TIPC_ADDR_ID) 1329 return tipc_send_group_unicast(sock, m, dlen, timeout); 1330 if (dest->addrtype == TIPC_ADDR_MCAST) 1331 return tipc_send_group_mcast(sock, m, dlen, timeout); 1332 return -EINVAL; 1333 } 1334 1335 if (unlikely(!dest)) { 1336 dest = &tsk->peer; 1337 if (!syn && dest->family != AF_TIPC) 1338 return -EDESTADDRREQ; 1339 } 1340 1341 if (unlikely(syn)) { 1342 if (sk->sk_state == TIPC_LISTEN) 1343 return -EPIPE; 1344 if (sk->sk_state != TIPC_OPEN) 1345 return -EISCONN; 1346 if (tsk->published) 1347 return -EOPNOTSUPP; 1348 if (dest->addrtype == TIPC_ADDR_NAME) { 1349 tsk->conn_type = dest->addr.name.name.type; 1350 tsk->conn_instance = dest->addr.name.name.instance; 1351 } 1352 msg_set_syn(hdr, 1); 1353 } 1354 1355 seq = &dest->addr.nameseq; 1356 if (dest->addrtype == TIPC_ADDR_MCAST) 1357 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1358 1359 if (dest->addrtype == TIPC_ADDR_NAME) { 1360 type = dest->addr.name.name.type; 1361 inst = dest->addr.name.name.instance; 1362 dnode = dest->addr.name.domain; 1363 msg_set_type(hdr, TIPC_NAMED_MSG); 1364 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1365 msg_set_nametype(hdr, type); 1366 msg_set_nameinst(hdr, inst); 1367 msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); 1368 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1369 msg_set_destnode(hdr, dnode); 1370 msg_set_destport(hdr, dport); 1371 if (unlikely(!dport && !dnode)) 1372 return -EHOSTUNREACH; 1373 } else if (dest->addrtype == TIPC_ADDR_ID) { 1374 dnode = dest->addr.id.node; 1375 msg_set_type(hdr, TIPC_DIRECT_MSG); 1376 msg_set_lookup_scope(hdr, 0); 1377 msg_set_destnode(hdr, dnode); 1378 msg_set_destport(hdr, dest->addr.id.ref); 1379 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1380 } else { 1381 return -EINVAL; 1382 } 1383 1384 /* Block or return if destination link is congested */ 1385 rc = tipc_wait_for_cond(sock, &timeout, 1386 !tipc_dest_find(clinks, dnode, 0)); 1387 if (unlikely(rc)) 1388 return rc; 1389 1390 __skb_queue_head_init(&pkts); 1391 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1392 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1393 if (unlikely(rc != dlen)) 1394 return rc; 1395 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) 1396 return -ENOMEM; 1397 1398 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " "); 1399 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1400 if (unlikely(rc == -ELINKCONG)) { 1401 tipc_dest_push(clinks, dnode, 0); 1402 tsk->cong_link_cnt++; 1403 rc = 0; 1404 } 1405 1406 if (unlikely(syn && !rc)) 1407 tipc_set_sk_state(sk, TIPC_CONNECTING); 1408 1409 return rc ? rc : dlen; 1410 } 1411 1412 /** 1413 * tipc_sendstream - send stream-oriented data 1414 * @sock: socket structure 1415 * @m: data to send 1416 * @dsz: total length of data to be transmitted 1417 * 1418 * Used for SOCK_STREAM data. 1419 * 1420 * Returns the number of bytes sent on success (or partial success), 1421 * or errno if no data sent 1422 */ 1423 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1424 { 1425 struct sock *sk = sock->sk; 1426 int ret; 1427 1428 lock_sock(sk); 1429 ret = __tipc_sendstream(sock, m, dsz); 1430 release_sock(sk); 1431 1432 return ret; 1433 } 1434 1435 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1436 { 1437 struct sock *sk = sock->sk; 1438 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1439 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1440 struct tipc_sock *tsk = tipc_sk(sk); 1441 struct tipc_msg *hdr = &tsk->phdr; 1442 struct net *net = sock_net(sk); 1443 struct sk_buff_head pkts; 1444 u32 dnode = tsk_peer_node(tsk); 1445 int send, sent = 0; 1446 int rc = 0; 1447 1448 __skb_queue_head_init(&pkts); 1449 1450 if (unlikely(dlen > INT_MAX)) 1451 return -EMSGSIZE; 1452 1453 /* Handle implicit connection setup */ 1454 if (unlikely(dest)) { 1455 rc = __tipc_sendmsg(sock, m, dlen); 1456 if (dlen && dlen == rc) { 1457 tsk->peer_caps = tipc_node_get_capabilities(net, dnode); 1458 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1459 } 1460 return rc; 1461 } 1462 1463 do { 1464 rc = tipc_wait_for_cond(sock, &timeout, 1465 (!tsk->cong_link_cnt && 1466 !tsk_conn_cong(tsk) && 1467 tipc_sk_connected(sk))); 1468 if (unlikely(rc)) 1469 break; 1470 1471 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1472 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1473 if (unlikely(rc != send)) 1474 break; 1475 1476 trace_tipc_sk_sendstream(sk, skb_peek(&pkts), 1477 TIPC_DUMP_SK_SNDQ, " "); 1478 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1479 if (unlikely(rc == -ELINKCONG)) { 1480 tsk->cong_link_cnt = 1; 1481 rc = 0; 1482 } 1483 if (likely(!rc)) { 1484 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1485 sent += send; 1486 } 1487 } while (sent < dlen && !rc); 1488 1489 return sent ? sent : rc; 1490 } 1491 1492 /** 1493 * tipc_send_packet - send a connection-oriented message 1494 * @sock: socket structure 1495 * @m: message to send 1496 * @dsz: length of data to be transmitted 1497 * 1498 * Used for SOCK_SEQPACKET messages. 1499 * 1500 * Returns the number of bytes sent on success, or errno otherwise 1501 */ 1502 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1503 { 1504 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1505 return -EMSGSIZE; 1506 1507 return tipc_sendstream(sock, m, dsz); 1508 } 1509 1510 /* tipc_sk_finish_conn - complete the setup of a connection 1511 */ 1512 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1513 u32 peer_node) 1514 { 1515 struct sock *sk = &tsk->sk; 1516 struct net *net = sock_net(sk); 1517 struct tipc_msg *msg = &tsk->phdr; 1518 1519 msg_set_syn(msg, 0); 1520 msg_set_destnode(msg, peer_node); 1521 msg_set_destport(msg, peer_port); 1522 msg_set_type(msg, TIPC_CONN_MSG); 1523 msg_set_lookup_scope(msg, 0); 1524 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1525 1526 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1527 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1528 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1529 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1530 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1531 __skb_queue_purge(&sk->sk_write_queue); 1532 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1533 return; 1534 1535 /* Fall back to message based flow control */ 1536 tsk->rcv_win = FLOWCTL_MSG_WIN; 1537 tsk->snd_win = FLOWCTL_MSG_WIN; 1538 } 1539 1540 /** 1541 * tipc_sk_set_orig_addr - capture sender's address for received message 1542 * @m: descriptor for message info 1543 * @hdr: received message header 1544 * 1545 * Note: Address is not captured if not requested by receiver. 1546 */ 1547 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1548 { 1549 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1550 struct tipc_msg *hdr = buf_msg(skb); 1551 1552 if (!srcaddr) 1553 return; 1554 1555 srcaddr->sock.family = AF_TIPC; 1556 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1557 srcaddr->sock.scope = 0; 1558 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1559 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1560 srcaddr->sock.addr.name.domain = 0; 1561 m->msg_namelen = sizeof(struct sockaddr_tipc); 1562 1563 if (!msg_in_group(hdr)) 1564 return; 1565 1566 /* Group message users may also want to know sending member's id */ 1567 srcaddr->member.family = AF_TIPC; 1568 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1569 srcaddr->member.scope = 0; 1570 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1571 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1572 srcaddr->member.addr.name.domain = 0; 1573 m->msg_namelen = sizeof(*srcaddr); 1574 } 1575 1576 /** 1577 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1578 * @m: descriptor for message info 1579 * @skb: received message buffer 1580 * @tsk: TIPC port associated with message 1581 * 1582 * Note: Ancillary data is not captured if not requested by receiver. 1583 * 1584 * Returns 0 if successful, otherwise errno 1585 */ 1586 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb, 1587 struct tipc_sock *tsk) 1588 { 1589 struct tipc_msg *msg; 1590 u32 anc_data[3]; 1591 u32 err; 1592 u32 dest_type; 1593 int has_name; 1594 int res; 1595 1596 if (likely(m->msg_controllen == 0)) 1597 return 0; 1598 msg = buf_msg(skb); 1599 1600 /* Optionally capture errored message object(s) */ 1601 err = msg ? msg_errcode(msg) : 0; 1602 if (unlikely(err)) { 1603 anc_data[0] = err; 1604 anc_data[1] = msg_data_sz(msg); 1605 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1606 if (res) 1607 return res; 1608 if (anc_data[1]) { 1609 if (skb_linearize(skb)) 1610 return -ENOMEM; 1611 msg = buf_msg(skb); 1612 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1613 msg_data(msg)); 1614 if (res) 1615 return res; 1616 } 1617 } 1618 1619 /* Optionally capture message destination object */ 1620 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1621 switch (dest_type) { 1622 case TIPC_NAMED_MSG: 1623 has_name = 1; 1624 anc_data[0] = msg_nametype(msg); 1625 anc_data[1] = msg_namelower(msg); 1626 anc_data[2] = msg_namelower(msg); 1627 break; 1628 case TIPC_MCAST_MSG: 1629 has_name = 1; 1630 anc_data[0] = msg_nametype(msg); 1631 anc_data[1] = msg_namelower(msg); 1632 anc_data[2] = msg_nameupper(msg); 1633 break; 1634 case TIPC_CONN_MSG: 1635 has_name = (tsk->conn_type != 0); 1636 anc_data[0] = tsk->conn_type; 1637 anc_data[1] = tsk->conn_instance; 1638 anc_data[2] = tsk->conn_instance; 1639 break; 1640 default: 1641 has_name = 0; 1642 } 1643 if (has_name) { 1644 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1645 if (res) 1646 return res; 1647 } 1648 1649 return 0; 1650 } 1651 1652 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1653 { 1654 struct sock *sk = &tsk->sk; 1655 struct net *net = sock_net(sk); 1656 struct sk_buff *skb = NULL; 1657 struct tipc_msg *msg; 1658 u32 peer_port = tsk_peer_port(tsk); 1659 u32 dnode = tsk_peer_node(tsk); 1660 1661 if (!tipc_sk_connected(sk)) 1662 return; 1663 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1664 dnode, tsk_own_node(tsk), peer_port, 1665 tsk->portid, TIPC_OK); 1666 if (!skb) 1667 return; 1668 msg = buf_msg(skb); 1669 msg_set_conn_ack(msg, tsk->rcv_unacked); 1670 tsk->rcv_unacked = 0; 1671 1672 /* Adjust to and advertize the correct window limit */ 1673 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1674 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1675 msg_set_adv_win(msg, tsk->rcv_win); 1676 } 1677 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1678 } 1679 1680 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1681 { 1682 struct sock *sk = sock->sk; 1683 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1684 long timeo = *timeop; 1685 int err = sock_error(sk); 1686 1687 if (err) 1688 return err; 1689 1690 for (;;) { 1691 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1692 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1693 err = -ENOTCONN; 1694 break; 1695 } 1696 add_wait_queue(sk_sleep(sk), &wait); 1697 release_sock(sk); 1698 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); 1699 sched_annotate_sleep(); 1700 lock_sock(sk); 1701 remove_wait_queue(sk_sleep(sk), &wait); 1702 } 1703 err = 0; 1704 if (!skb_queue_empty(&sk->sk_receive_queue)) 1705 break; 1706 err = -EAGAIN; 1707 if (!timeo) 1708 break; 1709 err = sock_intr_errno(timeo); 1710 if (signal_pending(current)) 1711 break; 1712 1713 err = sock_error(sk); 1714 if (err) 1715 break; 1716 } 1717 *timeop = timeo; 1718 return err; 1719 } 1720 1721 /** 1722 * tipc_recvmsg - receive packet-oriented message 1723 * @m: descriptor for message info 1724 * @buflen: length of user buffer area 1725 * @flags: receive flags 1726 * 1727 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1728 * If the complete message doesn't fit in user area, truncate it. 1729 * 1730 * Returns size of returned message data, errno otherwise 1731 */ 1732 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1733 size_t buflen, int flags) 1734 { 1735 struct sock *sk = sock->sk; 1736 bool connected = !tipc_sk_type_connectionless(sk); 1737 struct tipc_sock *tsk = tipc_sk(sk); 1738 int rc, err, hlen, dlen, copy; 1739 struct sk_buff_head xmitq; 1740 struct tipc_msg *hdr; 1741 struct sk_buff *skb; 1742 bool grp_evt; 1743 long timeout; 1744 1745 /* Catch invalid receive requests */ 1746 if (unlikely(!buflen)) 1747 return -EINVAL; 1748 1749 lock_sock(sk); 1750 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1751 rc = -ENOTCONN; 1752 goto exit; 1753 } 1754 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1755 1756 /* Step rcv queue to first msg with data or error; wait if necessary */ 1757 do { 1758 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1759 if (unlikely(rc)) 1760 goto exit; 1761 skb = skb_peek(&sk->sk_receive_queue); 1762 hdr = buf_msg(skb); 1763 dlen = msg_data_sz(hdr); 1764 hlen = msg_hdr_sz(hdr); 1765 err = msg_errcode(hdr); 1766 grp_evt = msg_is_grp_evt(hdr); 1767 if (likely(dlen || err)) 1768 break; 1769 tsk_advance_rx_queue(sk); 1770 } while (1); 1771 1772 /* Collect msg meta data, including error code and rejected data */ 1773 tipc_sk_set_orig_addr(m, skb); 1774 rc = tipc_sk_anc_data_recv(m, skb, tsk); 1775 if (unlikely(rc)) 1776 goto exit; 1777 hdr = buf_msg(skb); 1778 1779 /* Capture data if non-error msg, otherwise just set return value */ 1780 if (likely(!err)) { 1781 copy = min_t(int, dlen, buflen); 1782 if (unlikely(copy != dlen)) 1783 m->msg_flags |= MSG_TRUNC; 1784 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1785 } else { 1786 copy = 0; 1787 rc = 0; 1788 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1789 rc = -ECONNRESET; 1790 } 1791 if (unlikely(rc)) 1792 goto exit; 1793 1794 /* Mark message as group event if applicable */ 1795 if (unlikely(grp_evt)) { 1796 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1797 m->msg_flags |= MSG_EOR; 1798 m->msg_flags |= MSG_OOB; 1799 copy = 0; 1800 } 1801 1802 /* Caption of data or error code/rejected data was successful */ 1803 if (unlikely(flags & MSG_PEEK)) 1804 goto exit; 1805 1806 /* Send group flow control advertisement when applicable */ 1807 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1808 __skb_queue_head_init(&xmitq); 1809 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1810 msg_orignode(hdr), msg_origport(hdr), 1811 &xmitq); 1812 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1813 } 1814 1815 tsk_advance_rx_queue(sk); 1816 1817 if (likely(!connected)) 1818 goto exit; 1819 1820 /* Send connection flow control advertisement when applicable */ 1821 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1822 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1823 tipc_sk_send_ack(tsk); 1824 exit: 1825 release_sock(sk); 1826 return rc ? rc : copy; 1827 } 1828 1829 /** 1830 * tipc_recvstream - receive stream-oriented data 1831 * @m: descriptor for message info 1832 * @buflen: total size of user buffer area 1833 * @flags: receive flags 1834 * 1835 * Used for SOCK_STREAM messages only. If not enough data is available 1836 * will optionally wait for more; never truncates data. 1837 * 1838 * Returns size of returned message data, errno otherwise 1839 */ 1840 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1841 size_t buflen, int flags) 1842 { 1843 struct sock *sk = sock->sk; 1844 struct tipc_sock *tsk = tipc_sk(sk); 1845 struct sk_buff *skb; 1846 struct tipc_msg *hdr; 1847 struct tipc_skb_cb *skb_cb; 1848 bool peek = flags & MSG_PEEK; 1849 int offset, required, copy, copied = 0; 1850 int hlen, dlen, err, rc; 1851 long timeout; 1852 1853 /* Catch invalid receive attempts */ 1854 if (unlikely(!buflen)) 1855 return -EINVAL; 1856 1857 lock_sock(sk); 1858 1859 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1860 rc = -ENOTCONN; 1861 goto exit; 1862 } 1863 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1864 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1865 1866 do { 1867 /* Look at first msg in receive queue; wait if necessary */ 1868 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1869 if (unlikely(rc)) 1870 break; 1871 skb = skb_peek(&sk->sk_receive_queue); 1872 skb_cb = TIPC_SKB_CB(skb); 1873 hdr = buf_msg(skb); 1874 dlen = msg_data_sz(hdr); 1875 hlen = msg_hdr_sz(hdr); 1876 err = msg_errcode(hdr); 1877 1878 /* Discard any empty non-errored (SYN-) message */ 1879 if (unlikely(!dlen && !err)) { 1880 tsk_advance_rx_queue(sk); 1881 continue; 1882 } 1883 1884 /* Collect msg meta data, incl. error code and rejected data */ 1885 if (!copied) { 1886 tipc_sk_set_orig_addr(m, skb); 1887 rc = tipc_sk_anc_data_recv(m, skb, tsk); 1888 if (rc) 1889 break; 1890 hdr = buf_msg(skb); 1891 } 1892 1893 /* Copy data if msg ok, otherwise return error/partial data */ 1894 if (likely(!err)) { 1895 offset = skb_cb->bytes_read; 1896 copy = min_t(int, dlen - offset, buflen - copied); 1897 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1898 if (unlikely(rc)) 1899 break; 1900 copied += copy; 1901 offset += copy; 1902 if (unlikely(offset < dlen)) { 1903 if (!peek) 1904 skb_cb->bytes_read = offset; 1905 break; 1906 } 1907 } else { 1908 rc = 0; 1909 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1910 rc = -ECONNRESET; 1911 if (copied || rc) 1912 break; 1913 } 1914 1915 if (unlikely(peek)) 1916 break; 1917 1918 tsk_advance_rx_queue(sk); 1919 1920 /* Send connection flow control advertisement when applicable */ 1921 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1922 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1923 tipc_sk_send_ack(tsk); 1924 1925 /* Exit if all requested data or FIN/error received */ 1926 if (copied == buflen || err) 1927 break; 1928 1929 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1930 exit: 1931 release_sock(sk); 1932 return copied ? copied : rc; 1933 } 1934 1935 /** 1936 * tipc_write_space - wake up thread if port congestion is released 1937 * @sk: socket 1938 */ 1939 static void tipc_write_space(struct sock *sk) 1940 { 1941 struct socket_wq *wq; 1942 1943 rcu_read_lock(); 1944 wq = rcu_dereference(sk->sk_wq); 1945 if (skwq_has_sleeper(wq)) 1946 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1947 EPOLLWRNORM | EPOLLWRBAND); 1948 rcu_read_unlock(); 1949 } 1950 1951 /** 1952 * tipc_data_ready - wake up threads to indicate messages have been received 1953 * @sk: socket 1954 * @len: the length of messages 1955 */ 1956 static void tipc_data_ready(struct sock *sk) 1957 { 1958 struct socket_wq *wq; 1959 1960 rcu_read_lock(); 1961 wq = rcu_dereference(sk->sk_wq); 1962 if (skwq_has_sleeper(wq)) 1963 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1964 EPOLLRDNORM | EPOLLRDBAND); 1965 rcu_read_unlock(); 1966 } 1967 1968 static void tipc_sock_destruct(struct sock *sk) 1969 { 1970 __skb_queue_purge(&sk->sk_receive_queue); 1971 } 1972 1973 static void tipc_sk_proto_rcv(struct sock *sk, 1974 struct sk_buff_head *inputq, 1975 struct sk_buff_head *xmitq) 1976 { 1977 struct sk_buff *skb = __skb_dequeue(inputq); 1978 struct tipc_sock *tsk = tipc_sk(sk); 1979 struct tipc_msg *hdr = buf_msg(skb); 1980 struct tipc_group *grp = tsk->group; 1981 bool wakeup = false; 1982 1983 switch (msg_user(hdr)) { 1984 case CONN_MANAGER: 1985 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq); 1986 return; 1987 case SOCK_WAKEUP: 1988 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1989 /* coupled with smp_rmb() in tipc_wait_for_cond() */ 1990 smp_wmb(); 1991 tsk->cong_link_cnt--; 1992 wakeup = true; 1993 break; 1994 case GROUP_PROTOCOL: 1995 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1996 break; 1997 case TOP_SRV: 1998 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1999 hdr, inputq, xmitq); 2000 break; 2001 default: 2002 break; 2003 } 2004 2005 if (wakeup) 2006 sk->sk_write_space(sk); 2007 2008 kfree_skb(skb); 2009 } 2010 2011 /** 2012 * tipc_sk_filter_connect - check incoming message for a connection-based socket 2013 * @tsk: TIPC socket 2014 * @skb: pointer to message buffer. 2015 * Returns true if message should be added to receive queue, false otherwise 2016 */ 2017 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 2018 { 2019 struct sock *sk = &tsk->sk; 2020 struct net *net = sock_net(sk); 2021 struct tipc_msg *hdr = buf_msg(skb); 2022 bool con_msg = msg_connected(hdr); 2023 u32 pport = tsk_peer_port(tsk); 2024 u32 pnode = tsk_peer_node(tsk); 2025 u32 oport = msg_origport(hdr); 2026 u32 onode = msg_orignode(hdr); 2027 int err = msg_errcode(hdr); 2028 unsigned long delay; 2029 2030 if (unlikely(msg_mcast(hdr))) 2031 return false; 2032 2033 switch (sk->sk_state) { 2034 case TIPC_CONNECTING: 2035 /* Setup ACK */ 2036 if (likely(con_msg)) { 2037 if (err) 2038 break; 2039 tipc_sk_finish_conn(tsk, oport, onode); 2040 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2041 /* ACK+ message with data is added to receive queue */ 2042 if (msg_data_sz(hdr)) 2043 return true; 2044 /* Empty ACK-, - wake up sleeping connect() and drop */ 2045 sk->sk_state_change(sk); 2046 msg_set_dest_droppable(hdr, 1); 2047 return false; 2048 } 2049 /* Ignore connectionless message if not from listening socket */ 2050 if (oport != pport || onode != pnode) 2051 return false; 2052 2053 /* Rejected SYN */ 2054 if (err != TIPC_ERR_OVERLOAD) 2055 break; 2056 2057 /* Prepare for new setup attempt if we have a SYN clone */ 2058 if (skb_queue_empty(&sk->sk_write_queue)) 2059 break; 2060 get_random_bytes(&delay, 2); 2061 delay %= (tsk->conn_timeout / 4); 2062 delay = msecs_to_jiffies(delay + 100); 2063 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay); 2064 return false; 2065 case TIPC_OPEN: 2066 case TIPC_DISCONNECTING: 2067 return false; 2068 case TIPC_LISTEN: 2069 /* Accept only SYN message */ 2070 if (!msg_is_syn(hdr) && 2071 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT) 2072 return false; 2073 if (!con_msg && !err) 2074 return true; 2075 return false; 2076 case TIPC_ESTABLISHED: 2077 /* Accept only connection-based messages sent by peer */ 2078 if (likely(con_msg && !err && pport == oport && pnode == onode)) 2079 return true; 2080 if (!tsk_peer_msg(tsk, hdr)) 2081 return false; 2082 if (!err) 2083 return true; 2084 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2085 tipc_node_remove_conn(net, pnode, tsk->portid); 2086 sk->sk_state_change(sk); 2087 return true; 2088 default: 2089 pr_err("Unknown sk_state %u\n", sk->sk_state); 2090 } 2091 /* Abort connection setup attempt */ 2092 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2093 sk->sk_err = ECONNREFUSED; 2094 sk->sk_state_change(sk); 2095 return true; 2096 } 2097 2098 /** 2099 * rcvbuf_limit - get proper overload limit of socket receive queue 2100 * @sk: socket 2101 * @skb: message 2102 * 2103 * For connection oriented messages, irrespective of importance, 2104 * default queue limit is 2 MB. 2105 * 2106 * For connectionless messages, queue limits are based on message 2107 * importance as follows: 2108 * 2109 * TIPC_LOW_IMPORTANCE (2 MB) 2110 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2111 * TIPC_HIGH_IMPORTANCE (8 MB) 2112 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2113 * 2114 * Returns overload limit according to corresponding message importance 2115 */ 2116 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2117 { 2118 struct tipc_sock *tsk = tipc_sk(sk); 2119 struct tipc_msg *hdr = buf_msg(skb); 2120 2121 if (unlikely(msg_in_group(hdr))) 2122 return READ_ONCE(sk->sk_rcvbuf); 2123 2124 if (unlikely(!msg_connected(hdr))) 2125 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr); 2126 2127 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2128 return READ_ONCE(sk->sk_rcvbuf); 2129 2130 return FLOWCTL_MSG_LIM; 2131 } 2132 2133 /** 2134 * tipc_sk_filter_rcv - validate incoming message 2135 * @sk: socket 2136 * @skb: pointer to message. 2137 * 2138 * Enqueues message on receive queue if acceptable; optionally handles 2139 * disconnect indication for a connected socket. 2140 * 2141 * Called with socket lock already taken 2142 * 2143 */ 2144 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2145 struct sk_buff_head *xmitq) 2146 { 2147 bool sk_conn = !tipc_sk_type_connectionless(sk); 2148 struct tipc_sock *tsk = tipc_sk(sk); 2149 struct tipc_group *grp = tsk->group; 2150 struct tipc_msg *hdr = buf_msg(skb); 2151 struct net *net = sock_net(sk); 2152 struct sk_buff_head inputq; 2153 int mtyp = msg_type(hdr); 2154 int limit, err = TIPC_OK; 2155 2156 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " "); 2157 TIPC_SKB_CB(skb)->bytes_read = 0; 2158 __skb_queue_head_init(&inputq); 2159 __skb_queue_tail(&inputq, skb); 2160 2161 if (unlikely(!msg_isdata(hdr))) 2162 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2163 2164 if (unlikely(grp)) 2165 tipc_group_filter_msg(grp, &inputq, xmitq); 2166 2167 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG) 2168 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq); 2169 2170 /* Validate and add to receive buffer if there is space */ 2171 while ((skb = __skb_dequeue(&inputq))) { 2172 hdr = buf_msg(skb); 2173 limit = rcvbuf_limit(sk, skb); 2174 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2175 (!sk_conn && msg_connected(hdr)) || 2176 (!grp && msg_in_group(hdr))) 2177 err = TIPC_ERR_NO_PORT; 2178 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2179 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, 2180 "err_overload2!"); 2181 atomic_inc(&sk->sk_drops); 2182 err = TIPC_ERR_OVERLOAD; 2183 } 2184 2185 if (unlikely(err)) { 2186 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) { 2187 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, 2188 "@filter_rcv!"); 2189 __skb_queue_tail(xmitq, skb); 2190 } 2191 err = TIPC_OK; 2192 continue; 2193 } 2194 __skb_queue_tail(&sk->sk_receive_queue, skb); 2195 skb_set_owner_r(skb, sk); 2196 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL, 2197 "rcvq >90% allocated!"); 2198 sk->sk_data_ready(sk); 2199 } 2200 } 2201 2202 /** 2203 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2204 * @sk: socket 2205 * @skb: message 2206 * 2207 * Caller must hold socket lock 2208 */ 2209 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2210 { 2211 unsigned int before = sk_rmem_alloc_get(sk); 2212 struct sk_buff_head xmitq; 2213 unsigned int added; 2214 2215 __skb_queue_head_init(&xmitq); 2216 2217 tipc_sk_filter_rcv(sk, skb, &xmitq); 2218 added = sk_rmem_alloc_get(sk) - before; 2219 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2220 2221 /* Send pending response/rejected messages, if any */ 2222 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2223 return 0; 2224 } 2225 2226 /** 2227 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2228 * inputq and try adding them to socket or backlog queue 2229 * @inputq: list of incoming buffers with potentially different destinations 2230 * @sk: socket where the buffers should be enqueued 2231 * @dport: port number for the socket 2232 * 2233 * Caller must hold socket lock 2234 */ 2235 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2236 u32 dport, struct sk_buff_head *xmitq) 2237 { 2238 unsigned long time_limit = jiffies + 2; 2239 struct sk_buff *skb; 2240 unsigned int lim; 2241 atomic_t *dcnt; 2242 u32 onode; 2243 2244 while (skb_queue_len(inputq)) { 2245 if (unlikely(time_after_eq(jiffies, time_limit))) 2246 return; 2247 2248 skb = tipc_skb_dequeue(inputq, dport); 2249 if (unlikely(!skb)) 2250 return; 2251 2252 /* Add message directly to receive queue if possible */ 2253 if (!sock_owned_by_user(sk)) { 2254 tipc_sk_filter_rcv(sk, skb, xmitq); 2255 continue; 2256 } 2257 2258 /* Try backlog, compensating for double-counted bytes */ 2259 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2260 if (!sk->sk_backlog.len) 2261 atomic_set(dcnt, 0); 2262 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2263 if (likely(!sk_add_backlog(sk, skb, lim))) { 2264 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL, 2265 "bklg & rcvq >90% allocated!"); 2266 continue; 2267 } 2268 2269 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!"); 2270 /* Overload => reject message back to sender */ 2271 onode = tipc_own_addr(sock_net(sk)); 2272 atomic_inc(&sk->sk_drops); 2273 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) { 2274 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL, 2275 "@sk_enqueue!"); 2276 __skb_queue_tail(xmitq, skb); 2277 } 2278 break; 2279 } 2280 } 2281 2282 /** 2283 * tipc_sk_rcv - handle a chain of incoming buffers 2284 * @inputq: buffer list containing the buffers 2285 * Consumes all buffers in list until inputq is empty 2286 * Note: may be called in multiple threads referring to the same queue 2287 */ 2288 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2289 { 2290 struct sk_buff_head xmitq; 2291 u32 dnode, dport = 0; 2292 int err; 2293 struct tipc_sock *tsk; 2294 struct sock *sk; 2295 struct sk_buff *skb; 2296 2297 __skb_queue_head_init(&xmitq); 2298 while (skb_queue_len(inputq)) { 2299 dport = tipc_skb_peek_port(inputq, dport); 2300 tsk = tipc_sk_lookup(net, dport); 2301 2302 if (likely(tsk)) { 2303 sk = &tsk->sk; 2304 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2305 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2306 spin_unlock_bh(&sk->sk_lock.slock); 2307 } 2308 /* Send pending response/rejected messages, if any */ 2309 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2310 sock_put(sk); 2311 continue; 2312 } 2313 /* No destination socket => dequeue skb if still there */ 2314 skb = tipc_skb_dequeue(inputq, dport); 2315 if (!skb) 2316 return; 2317 2318 /* Try secondary lookup if unresolved named message */ 2319 err = TIPC_ERR_NO_PORT; 2320 if (tipc_msg_lookup_dest(net, skb, &err)) 2321 goto xmit; 2322 2323 /* Prepare for message rejection */ 2324 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2325 continue; 2326 2327 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!"); 2328 xmit: 2329 dnode = msg_destnode(buf_msg(skb)); 2330 tipc_node_xmit_skb(net, skb, dnode, dport); 2331 } 2332 } 2333 2334 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2335 { 2336 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2337 struct sock *sk = sock->sk; 2338 int done; 2339 2340 do { 2341 int err = sock_error(sk); 2342 if (err) 2343 return err; 2344 if (!*timeo_p) 2345 return -ETIMEDOUT; 2346 if (signal_pending(current)) 2347 return sock_intr_errno(*timeo_p); 2348 2349 add_wait_queue(sk_sleep(sk), &wait); 2350 done = sk_wait_event(sk, timeo_p, 2351 sk->sk_state != TIPC_CONNECTING, &wait); 2352 remove_wait_queue(sk_sleep(sk), &wait); 2353 } while (!done); 2354 return 0; 2355 } 2356 2357 static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr) 2358 { 2359 if (addr->family != AF_TIPC) 2360 return false; 2361 if (addr->addrtype == TIPC_SERVICE_RANGE) 2362 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper); 2363 return (addr->addrtype == TIPC_SERVICE_ADDR || 2364 addr->addrtype == TIPC_SOCKET_ADDR); 2365 } 2366 2367 /** 2368 * tipc_connect - establish a connection to another TIPC port 2369 * @sock: socket structure 2370 * @dest: socket address for destination port 2371 * @destlen: size of socket address data structure 2372 * @flags: file-related flags associated with socket 2373 * 2374 * Returns 0 on success, errno otherwise 2375 */ 2376 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2377 int destlen, int flags) 2378 { 2379 struct sock *sk = sock->sk; 2380 struct tipc_sock *tsk = tipc_sk(sk); 2381 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2382 struct msghdr m = {NULL,}; 2383 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2384 int previous; 2385 int res = 0; 2386 2387 if (destlen != sizeof(struct sockaddr_tipc)) 2388 return -EINVAL; 2389 2390 lock_sock(sk); 2391 2392 if (tsk->group) { 2393 res = -EINVAL; 2394 goto exit; 2395 } 2396 2397 if (dst->family == AF_UNSPEC) { 2398 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2399 if (!tipc_sk_type_connectionless(sk)) 2400 res = -EINVAL; 2401 goto exit; 2402 } 2403 if (!tipc_sockaddr_is_sane(dst)) { 2404 res = -EINVAL; 2405 goto exit; 2406 } 2407 /* DGRAM/RDM connect(), just save the destaddr */ 2408 if (tipc_sk_type_connectionless(sk)) { 2409 memcpy(&tsk->peer, dest, destlen); 2410 goto exit; 2411 } else if (dst->addrtype == TIPC_SERVICE_RANGE) { 2412 res = -EINVAL; 2413 goto exit; 2414 } 2415 2416 previous = sk->sk_state; 2417 2418 switch (sk->sk_state) { 2419 case TIPC_OPEN: 2420 /* Send a 'SYN-' to destination */ 2421 m.msg_name = dest; 2422 m.msg_namelen = destlen; 2423 2424 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2425 * indicate send_msg() is never blocked. 2426 */ 2427 if (!timeout) 2428 m.msg_flags = MSG_DONTWAIT; 2429 2430 res = __tipc_sendmsg(sock, &m, 0); 2431 if ((res < 0) && (res != -EWOULDBLOCK)) 2432 goto exit; 2433 2434 /* Just entered TIPC_CONNECTING state; the only 2435 * difference is that return value in non-blocking 2436 * case is EINPROGRESS, rather than EALREADY. 2437 */ 2438 res = -EINPROGRESS; 2439 /* fall through */ 2440 case TIPC_CONNECTING: 2441 if (!timeout) { 2442 if (previous == TIPC_CONNECTING) 2443 res = -EALREADY; 2444 goto exit; 2445 } 2446 timeout = msecs_to_jiffies(timeout); 2447 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2448 res = tipc_wait_for_connect(sock, &timeout); 2449 break; 2450 case TIPC_ESTABLISHED: 2451 res = -EISCONN; 2452 break; 2453 default: 2454 res = -EINVAL; 2455 } 2456 2457 exit: 2458 release_sock(sk); 2459 return res; 2460 } 2461 2462 /** 2463 * tipc_listen - allow socket to listen for incoming connections 2464 * @sock: socket structure 2465 * @len: (unused) 2466 * 2467 * Returns 0 on success, errno otherwise 2468 */ 2469 static int tipc_listen(struct socket *sock, int len) 2470 { 2471 struct sock *sk = sock->sk; 2472 int res; 2473 2474 lock_sock(sk); 2475 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2476 release_sock(sk); 2477 2478 return res; 2479 } 2480 2481 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2482 { 2483 struct sock *sk = sock->sk; 2484 DEFINE_WAIT(wait); 2485 int err; 2486 2487 /* True wake-one mechanism for incoming connections: only 2488 * one process gets woken up, not the 'whole herd'. 2489 * Since we do not 'race & poll' for established sockets 2490 * anymore, the common case will execute the loop only once. 2491 */ 2492 for (;;) { 2493 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2494 TASK_INTERRUPTIBLE); 2495 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2496 release_sock(sk); 2497 timeo = schedule_timeout(timeo); 2498 lock_sock(sk); 2499 } 2500 err = 0; 2501 if (!skb_queue_empty(&sk->sk_receive_queue)) 2502 break; 2503 err = -EAGAIN; 2504 if (!timeo) 2505 break; 2506 err = sock_intr_errno(timeo); 2507 if (signal_pending(current)) 2508 break; 2509 } 2510 finish_wait(sk_sleep(sk), &wait); 2511 return err; 2512 } 2513 2514 /** 2515 * tipc_accept - wait for connection request 2516 * @sock: listening socket 2517 * @newsock: new socket that is to be connected 2518 * @flags: file-related flags associated with socket 2519 * 2520 * Returns 0 on success, errno otherwise 2521 */ 2522 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2523 bool kern) 2524 { 2525 struct sock *new_sk, *sk = sock->sk; 2526 struct sk_buff *buf; 2527 struct tipc_sock *new_tsock; 2528 struct tipc_msg *msg; 2529 long timeo; 2530 int res; 2531 2532 lock_sock(sk); 2533 2534 if (sk->sk_state != TIPC_LISTEN) { 2535 res = -EINVAL; 2536 goto exit; 2537 } 2538 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2539 res = tipc_wait_for_accept(sock, timeo); 2540 if (res) 2541 goto exit; 2542 2543 buf = skb_peek(&sk->sk_receive_queue); 2544 2545 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2546 if (res) 2547 goto exit; 2548 security_sk_clone(sock->sk, new_sock->sk); 2549 2550 new_sk = new_sock->sk; 2551 new_tsock = tipc_sk(new_sk); 2552 msg = buf_msg(buf); 2553 2554 /* we lock on new_sk; but lockdep sees the lock on sk */ 2555 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2556 2557 /* 2558 * Reject any stray messages received by new socket 2559 * before the socket lock was taken (very, very unlikely) 2560 */ 2561 tsk_rej_rx_queue(new_sk); 2562 2563 /* Connect new socket to it's peer */ 2564 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2565 2566 tsk_set_importance(new_tsock, msg_importance(msg)); 2567 if (msg_named(msg)) { 2568 new_tsock->conn_type = msg_nametype(msg); 2569 new_tsock->conn_instance = msg_nameinst(msg); 2570 } 2571 2572 /* 2573 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2574 * Respond to 'SYN+' by queuing it on new socket. 2575 */ 2576 if (!msg_data_sz(msg)) { 2577 struct msghdr m = {NULL,}; 2578 2579 tsk_advance_rx_queue(sk); 2580 __tipc_sendstream(new_sock, &m, 0); 2581 } else { 2582 __skb_dequeue(&sk->sk_receive_queue); 2583 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2584 skb_set_owner_r(buf, new_sk); 2585 } 2586 release_sock(new_sk); 2587 exit: 2588 release_sock(sk); 2589 return res; 2590 } 2591 2592 /** 2593 * tipc_shutdown - shutdown socket connection 2594 * @sock: socket structure 2595 * @how: direction to close (must be SHUT_RDWR) 2596 * 2597 * Terminates connection (if necessary), then purges socket's receive queue. 2598 * 2599 * Returns 0 on success, errno otherwise 2600 */ 2601 static int tipc_shutdown(struct socket *sock, int how) 2602 { 2603 struct sock *sk = sock->sk; 2604 int res; 2605 2606 if (how != SHUT_RDWR) 2607 return -EINVAL; 2608 2609 lock_sock(sk); 2610 2611 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); 2612 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2613 sk->sk_shutdown = SEND_SHUTDOWN; 2614 2615 if (sk->sk_state == TIPC_DISCONNECTING) { 2616 /* Discard any unreceived messages */ 2617 __skb_queue_purge(&sk->sk_receive_queue); 2618 2619 /* Wake up anyone sleeping in poll */ 2620 sk->sk_state_change(sk); 2621 res = 0; 2622 } else { 2623 res = -ENOTCONN; 2624 } 2625 2626 release_sock(sk); 2627 return res; 2628 } 2629 2630 static void tipc_sk_check_probing_state(struct sock *sk, 2631 struct sk_buff_head *list) 2632 { 2633 struct tipc_sock *tsk = tipc_sk(sk); 2634 u32 pnode = tsk_peer_node(tsk); 2635 u32 pport = tsk_peer_port(tsk); 2636 u32 self = tsk_own_node(tsk); 2637 u32 oport = tsk->portid; 2638 struct sk_buff *skb; 2639 2640 if (tsk->probe_unacked) { 2641 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2642 sk->sk_err = ECONNABORTED; 2643 tipc_node_remove_conn(sock_net(sk), pnode, pport); 2644 sk->sk_state_change(sk); 2645 return; 2646 } 2647 /* Prepare new probe */ 2648 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2649 pnode, self, pport, oport, TIPC_OK); 2650 if (skb) 2651 __skb_queue_tail(list, skb); 2652 tsk->probe_unacked = true; 2653 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2654 } 2655 2656 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list) 2657 { 2658 struct tipc_sock *tsk = tipc_sk(sk); 2659 2660 /* Try again later if dest link is congested */ 2661 if (tsk->cong_link_cnt) { 2662 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100)); 2663 return; 2664 } 2665 /* Prepare SYN for retransmit */ 2666 tipc_msg_skb_clone(&sk->sk_write_queue, list); 2667 } 2668 2669 static void tipc_sk_timeout(struct timer_list *t) 2670 { 2671 struct sock *sk = from_timer(sk, t, sk_timer); 2672 struct tipc_sock *tsk = tipc_sk(sk); 2673 u32 pnode = tsk_peer_node(tsk); 2674 struct sk_buff_head list; 2675 int rc = 0; 2676 2677 __skb_queue_head_init(&list); 2678 bh_lock_sock(sk); 2679 2680 /* Try again later if socket is busy */ 2681 if (sock_owned_by_user(sk)) { 2682 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2683 bh_unlock_sock(sk); 2684 return; 2685 } 2686 2687 if (sk->sk_state == TIPC_ESTABLISHED) 2688 tipc_sk_check_probing_state(sk, &list); 2689 else if (sk->sk_state == TIPC_CONNECTING) 2690 tipc_sk_retry_connect(sk, &list); 2691 2692 bh_unlock_sock(sk); 2693 2694 if (!skb_queue_empty(&list)) 2695 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid); 2696 2697 /* SYN messages may cause link congestion */ 2698 if (rc == -ELINKCONG) { 2699 tipc_dest_push(&tsk->cong_links, pnode, 0); 2700 tsk->cong_link_cnt = 1; 2701 } 2702 sock_put(sk); 2703 } 2704 2705 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2706 struct tipc_name_seq const *seq) 2707 { 2708 struct sock *sk = &tsk->sk; 2709 struct net *net = sock_net(sk); 2710 struct publication *publ; 2711 u32 key; 2712 2713 if (scope != TIPC_NODE_SCOPE) 2714 scope = TIPC_CLUSTER_SCOPE; 2715 2716 if (tipc_sk_connected(sk)) 2717 return -EINVAL; 2718 key = tsk->portid + tsk->pub_count + 1; 2719 if (key == tsk->portid) 2720 return -EADDRINUSE; 2721 2722 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2723 scope, tsk->portid, key); 2724 if (unlikely(!publ)) 2725 return -EINVAL; 2726 2727 list_add(&publ->binding_sock, &tsk->publications); 2728 tsk->pub_count++; 2729 tsk->published = 1; 2730 return 0; 2731 } 2732 2733 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2734 struct tipc_name_seq const *seq) 2735 { 2736 struct net *net = sock_net(&tsk->sk); 2737 struct publication *publ; 2738 struct publication *safe; 2739 int rc = -EINVAL; 2740 2741 if (scope != TIPC_NODE_SCOPE) 2742 scope = TIPC_CLUSTER_SCOPE; 2743 2744 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { 2745 if (seq) { 2746 if (publ->scope != scope) 2747 continue; 2748 if (publ->type != seq->type) 2749 continue; 2750 if (publ->lower != seq->lower) 2751 continue; 2752 if (publ->upper != seq->upper) 2753 break; 2754 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2755 publ->upper, publ->key); 2756 rc = 0; 2757 break; 2758 } 2759 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2760 publ->upper, publ->key); 2761 rc = 0; 2762 } 2763 if (list_empty(&tsk->publications)) 2764 tsk->published = 0; 2765 return rc; 2766 } 2767 2768 /* tipc_sk_reinit: set non-zero address in all existing sockets 2769 * when we go from standalone to network mode. 2770 */ 2771 void tipc_sk_reinit(struct net *net) 2772 { 2773 struct tipc_net *tn = net_generic(net, tipc_net_id); 2774 struct rhashtable_iter iter; 2775 struct tipc_sock *tsk; 2776 struct tipc_msg *msg; 2777 2778 rhashtable_walk_enter(&tn->sk_rht, &iter); 2779 2780 do { 2781 rhashtable_walk_start(&iter); 2782 2783 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2784 sock_hold(&tsk->sk); 2785 rhashtable_walk_stop(&iter); 2786 lock_sock(&tsk->sk); 2787 msg = &tsk->phdr; 2788 msg_set_prevnode(msg, tipc_own_addr(net)); 2789 msg_set_orignode(msg, tipc_own_addr(net)); 2790 release_sock(&tsk->sk); 2791 rhashtable_walk_start(&iter); 2792 sock_put(&tsk->sk); 2793 } 2794 2795 rhashtable_walk_stop(&iter); 2796 } while (tsk == ERR_PTR(-EAGAIN)); 2797 2798 rhashtable_walk_exit(&iter); 2799 } 2800 2801 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2802 { 2803 struct tipc_net *tn = net_generic(net, tipc_net_id); 2804 struct tipc_sock *tsk; 2805 2806 rcu_read_lock(); 2807 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2808 if (tsk) 2809 sock_hold(&tsk->sk); 2810 rcu_read_unlock(); 2811 2812 return tsk; 2813 } 2814 2815 static int tipc_sk_insert(struct tipc_sock *tsk) 2816 { 2817 struct sock *sk = &tsk->sk; 2818 struct net *net = sock_net(sk); 2819 struct tipc_net *tn = net_generic(net, tipc_net_id); 2820 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2821 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2822 2823 while (remaining--) { 2824 portid++; 2825 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2826 portid = TIPC_MIN_PORT; 2827 tsk->portid = portid; 2828 sock_hold(&tsk->sk); 2829 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2830 tsk_rht_params)) 2831 return 0; 2832 sock_put(&tsk->sk); 2833 } 2834 2835 return -1; 2836 } 2837 2838 static void tipc_sk_remove(struct tipc_sock *tsk) 2839 { 2840 struct sock *sk = &tsk->sk; 2841 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2842 2843 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2844 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2845 __sock_put(sk); 2846 } 2847 } 2848 2849 static const struct rhashtable_params tsk_rht_params = { 2850 .nelem_hint = 192, 2851 .head_offset = offsetof(struct tipc_sock, node), 2852 .key_offset = offsetof(struct tipc_sock, portid), 2853 .key_len = sizeof(u32), /* portid */ 2854 .max_size = 1048576, 2855 .min_size = 256, 2856 .automatic_shrinking = true, 2857 }; 2858 2859 int tipc_sk_rht_init(struct net *net) 2860 { 2861 struct tipc_net *tn = net_generic(net, tipc_net_id); 2862 2863 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2864 } 2865 2866 void tipc_sk_rht_destroy(struct net *net) 2867 { 2868 struct tipc_net *tn = net_generic(net, tipc_net_id); 2869 2870 /* Wait for socket readers to complete */ 2871 synchronize_net(); 2872 2873 rhashtable_destroy(&tn->sk_rht); 2874 } 2875 2876 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2877 { 2878 struct net *net = sock_net(&tsk->sk); 2879 struct tipc_group *grp = tsk->group; 2880 struct tipc_msg *hdr = &tsk->phdr; 2881 struct tipc_name_seq seq; 2882 int rc; 2883 2884 if (mreq->type < TIPC_RESERVED_TYPES) 2885 return -EACCES; 2886 if (mreq->scope > TIPC_NODE_SCOPE) 2887 return -EINVAL; 2888 if (grp) 2889 return -EACCES; 2890 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2891 if (!grp) 2892 return -ENOMEM; 2893 tsk->group = grp; 2894 msg_set_lookup_scope(hdr, mreq->scope); 2895 msg_set_nametype(hdr, mreq->type); 2896 msg_set_dest_droppable(hdr, true); 2897 seq.type = mreq->type; 2898 seq.lower = mreq->instance; 2899 seq.upper = seq.lower; 2900 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2901 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2902 if (rc) { 2903 tipc_group_delete(net, grp); 2904 tsk->group = NULL; 2905 return rc; 2906 } 2907 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2908 tsk->mc_method.rcast = true; 2909 tsk->mc_method.mandatory = true; 2910 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2911 return rc; 2912 } 2913 2914 static int tipc_sk_leave(struct tipc_sock *tsk) 2915 { 2916 struct net *net = sock_net(&tsk->sk); 2917 struct tipc_group *grp = tsk->group; 2918 struct tipc_name_seq seq; 2919 int scope; 2920 2921 if (!grp) 2922 return -EINVAL; 2923 tipc_group_self(grp, &seq, &scope); 2924 tipc_group_delete(net, grp); 2925 tsk->group = NULL; 2926 tipc_sk_withdraw(tsk, scope, &seq); 2927 return 0; 2928 } 2929 2930 /** 2931 * tipc_setsockopt - set socket option 2932 * @sock: socket structure 2933 * @lvl: option level 2934 * @opt: option identifier 2935 * @ov: pointer to new option value 2936 * @ol: length of option value 2937 * 2938 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2939 * (to ease compatibility). 2940 * 2941 * Returns 0 on success, errno otherwise 2942 */ 2943 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2944 char __user *ov, unsigned int ol) 2945 { 2946 struct sock *sk = sock->sk; 2947 struct tipc_sock *tsk = tipc_sk(sk); 2948 struct tipc_group_req mreq; 2949 u32 value = 0; 2950 int res = 0; 2951 2952 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2953 return 0; 2954 if (lvl != SOL_TIPC) 2955 return -ENOPROTOOPT; 2956 2957 switch (opt) { 2958 case TIPC_IMPORTANCE: 2959 case TIPC_SRC_DROPPABLE: 2960 case TIPC_DEST_DROPPABLE: 2961 case TIPC_CONN_TIMEOUT: 2962 if (ol < sizeof(value)) 2963 return -EINVAL; 2964 if (get_user(value, (u32 __user *)ov)) 2965 return -EFAULT; 2966 break; 2967 case TIPC_GROUP_JOIN: 2968 if (ol < sizeof(mreq)) 2969 return -EINVAL; 2970 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2971 return -EFAULT; 2972 break; 2973 default: 2974 if (ov || ol) 2975 return -EINVAL; 2976 } 2977 2978 lock_sock(sk); 2979 2980 switch (opt) { 2981 case TIPC_IMPORTANCE: 2982 res = tsk_set_importance(tsk, value); 2983 break; 2984 case TIPC_SRC_DROPPABLE: 2985 if (sock->type != SOCK_STREAM) 2986 tsk_set_unreliable(tsk, value); 2987 else 2988 res = -ENOPROTOOPT; 2989 break; 2990 case TIPC_DEST_DROPPABLE: 2991 tsk_set_unreturnable(tsk, value); 2992 break; 2993 case TIPC_CONN_TIMEOUT: 2994 tipc_sk(sk)->conn_timeout = value; 2995 break; 2996 case TIPC_MCAST_BROADCAST: 2997 tsk->mc_method.rcast = false; 2998 tsk->mc_method.mandatory = true; 2999 break; 3000 case TIPC_MCAST_REPLICAST: 3001 tsk->mc_method.rcast = true; 3002 tsk->mc_method.mandatory = true; 3003 break; 3004 case TIPC_GROUP_JOIN: 3005 res = tipc_sk_join(tsk, &mreq); 3006 break; 3007 case TIPC_GROUP_LEAVE: 3008 res = tipc_sk_leave(tsk); 3009 break; 3010 default: 3011 res = -EINVAL; 3012 } 3013 3014 release_sock(sk); 3015 3016 return res; 3017 } 3018 3019 /** 3020 * tipc_getsockopt - get socket option 3021 * @sock: socket structure 3022 * @lvl: option level 3023 * @opt: option identifier 3024 * @ov: receptacle for option value 3025 * @ol: receptacle for length of option value 3026 * 3027 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 3028 * (to ease compatibility). 3029 * 3030 * Returns 0 on success, errno otherwise 3031 */ 3032 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 3033 char __user *ov, int __user *ol) 3034 { 3035 struct sock *sk = sock->sk; 3036 struct tipc_sock *tsk = tipc_sk(sk); 3037 struct tipc_name_seq seq; 3038 int len, scope; 3039 u32 value; 3040 int res; 3041 3042 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 3043 return put_user(0, ol); 3044 if (lvl != SOL_TIPC) 3045 return -ENOPROTOOPT; 3046 res = get_user(len, ol); 3047 if (res) 3048 return res; 3049 3050 lock_sock(sk); 3051 3052 switch (opt) { 3053 case TIPC_IMPORTANCE: 3054 value = tsk_importance(tsk); 3055 break; 3056 case TIPC_SRC_DROPPABLE: 3057 value = tsk_unreliable(tsk); 3058 break; 3059 case TIPC_DEST_DROPPABLE: 3060 value = tsk_unreturnable(tsk); 3061 break; 3062 case TIPC_CONN_TIMEOUT: 3063 value = tsk->conn_timeout; 3064 /* no need to set "res", since already 0 at this point */ 3065 break; 3066 case TIPC_NODE_RECVQ_DEPTH: 3067 value = 0; /* was tipc_queue_size, now obsolete */ 3068 break; 3069 case TIPC_SOCK_RECVQ_DEPTH: 3070 value = skb_queue_len(&sk->sk_receive_queue); 3071 break; 3072 case TIPC_SOCK_RECVQ_USED: 3073 value = sk_rmem_alloc_get(sk); 3074 break; 3075 case TIPC_GROUP_JOIN: 3076 seq.type = 0; 3077 if (tsk->group) 3078 tipc_group_self(tsk->group, &seq, &scope); 3079 value = seq.type; 3080 break; 3081 default: 3082 res = -EINVAL; 3083 } 3084 3085 release_sock(sk); 3086 3087 if (res) 3088 return res; /* "get" failed */ 3089 3090 if (len < sizeof(value)) 3091 return -EINVAL; 3092 3093 if (copy_to_user(ov, &value, sizeof(value))) 3094 return -EFAULT; 3095 3096 return put_user(sizeof(value), ol); 3097 } 3098 3099 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3100 { 3101 struct net *net = sock_net(sock->sk); 3102 struct tipc_sioc_nodeid_req nr = {0}; 3103 struct tipc_sioc_ln_req lnr; 3104 void __user *argp = (void __user *)arg; 3105 3106 switch (cmd) { 3107 case SIOCGETLINKNAME: 3108 if (copy_from_user(&lnr, argp, sizeof(lnr))) 3109 return -EFAULT; 3110 if (!tipc_node_get_linkname(net, 3111 lnr.bearer_id & 0xffff, lnr.peer, 3112 lnr.linkname, TIPC_MAX_LINK_NAME)) { 3113 if (copy_to_user(argp, &lnr, sizeof(lnr))) 3114 return -EFAULT; 3115 return 0; 3116 } 3117 return -EADDRNOTAVAIL; 3118 case SIOCGETNODEID: 3119 if (copy_from_user(&nr, argp, sizeof(nr))) 3120 return -EFAULT; 3121 if (!tipc_node_get_id(net, nr.peer, nr.node_id)) 3122 return -EADDRNOTAVAIL; 3123 if (copy_to_user(argp, &nr, sizeof(nr))) 3124 return -EFAULT; 3125 return 0; 3126 default: 3127 return -ENOIOCTLCMD; 3128 } 3129 } 3130 3131 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 3132 { 3133 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 3134 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 3135 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 3136 3137 tsk1->peer.family = AF_TIPC; 3138 tsk1->peer.addrtype = TIPC_ADDR_ID; 3139 tsk1->peer.scope = TIPC_NODE_SCOPE; 3140 tsk1->peer.addr.id.ref = tsk2->portid; 3141 tsk1->peer.addr.id.node = onode; 3142 tsk2->peer.family = AF_TIPC; 3143 tsk2->peer.addrtype = TIPC_ADDR_ID; 3144 tsk2->peer.scope = TIPC_NODE_SCOPE; 3145 tsk2->peer.addr.id.ref = tsk1->portid; 3146 tsk2->peer.addr.id.node = onode; 3147 3148 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3149 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3150 return 0; 3151 } 3152 3153 /* Protocol switches for the various types of TIPC sockets */ 3154 3155 static const struct proto_ops msg_ops = { 3156 .owner = THIS_MODULE, 3157 .family = AF_TIPC, 3158 .release = tipc_release, 3159 .bind = tipc_bind, 3160 .connect = tipc_connect, 3161 .socketpair = tipc_socketpair, 3162 .accept = sock_no_accept, 3163 .getname = tipc_getname, 3164 .poll = tipc_poll, 3165 .ioctl = tipc_ioctl, 3166 .listen = sock_no_listen, 3167 .shutdown = tipc_shutdown, 3168 .setsockopt = tipc_setsockopt, 3169 .getsockopt = tipc_getsockopt, 3170 .sendmsg = tipc_sendmsg, 3171 .recvmsg = tipc_recvmsg, 3172 .mmap = sock_no_mmap, 3173 .sendpage = sock_no_sendpage 3174 }; 3175 3176 static const struct proto_ops packet_ops = { 3177 .owner = THIS_MODULE, 3178 .family = AF_TIPC, 3179 .release = tipc_release, 3180 .bind = tipc_bind, 3181 .connect = tipc_connect, 3182 .socketpair = tipc_socketpair, 3183 .accept = tipc_accept, 3184 .getname = tipc_getname, 3185 .poll = tipc_poll, 3186 .ioctl = tipc_ioctl, 3187 .listen = tipc_listen, 3188 .shutdown = tipc_shutdown, 3189 .setsockopt = tipc_setsockopt, 3190 .getsockopt = tipc_getsockopt, 3191 .sendmsg = tipc_send_packet, 3192 .recvmsg = tipc_recvmsg, 3193 .mmap = sock_no_mmap, 3194 .sendpage = sock_no_sendpage 3195 }; 3196 3197 static const struct proto_ops stream_ops = { 3198 .owner = THIS_MODULE, 3199 .family = AF_TIPC, 3200 .release = tipc_release, 3201 .bind = tipc_bind, 3202 .connect = tipc_connect, 3203 .socketpair = tipc_socketpair, 3204 .accept = tipc_accept, 3205 .getname = tipc_getname, 3206 .poll = tipc_poll, 3207 .ioctl = tipc_ioctl, 3208 .listen = tipc_listen, 3209 .shutdown = tipc_shutdown, 3210 .setsockopt = tipc_setsockopt, 3211 .getsockopt = tipc_getsockopt, 3212 .sendmsg = tipc_sendstream, 3213 .recvmsg = tipc_recvstream, 3214 .mmap = sock_no_mmap, 3215 .sendpage = sock_no_sendpage 3216 }; 3217 3218 static const struct net_proto_family tipc_family_ops = { 3219 .owner = THIS_MODULE, 3220 .family = AF_TIPC, 3221 .create = tipc_sk_create 3222 }; 3223 3224 static struct proto tipc_proto = { 3225 .name = "TIPC", 3226 .owner = THIS_MODULE, 3227 .obj_size = sizeof(struct tipc_sock), 3228 .sysctl_rmem = sysctl_tipc_rmem 3229 }; 3230 3231 /** 3232 * tipc_socket_init - initialize TIPC socket interface 3233 * 3234 * Returns 0 on success, errno otherwise 3235 */ 3236 int tipc_socket_init(void) 3237 { 3238 int res; 3239 3240 res = proto_register(&tipc_proto, 1); 3241 if (res) { 3242 pr_err("Failed to register TIPC protocol type\n"); 3243 goto out; 3244 } 3245 3246 res = sock_register(&tipc_family_ops); 3247 if (res) { 3248 pr_err("Failed to register TIPC socket type\n"); 3249 proto_unregister(&tipc_proto); 3250 goto out; 3251 } 3252 out: 3253 return res; 3254 } 3255 3256 /** 3257 * tipc_socket_stop - stop TIPC socket interface 3258 */ 3259 void tipc_socket_stop(void) 3260 { 3261 sock_unregister(tipc_family_ops.family); 3262 proto_unregister(&tipc_proto); 3263 } 3264 3265 /* Caller should hold socket lock for the passed tipc socket. */ 3266 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3267 { 3268 u32 peer_node; 3269 u32 peer_port; 3270 struct nlattr *nest; 3271 3272 peer_node = tsk_peer_node(tsk); 3273 peer_port = tsk_peer_port(tsk); 3274 3275 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON); 3276 if (!nest) 3277 return -EMSGSIZE; 3278 3279 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3280 goto msg_full; 3281 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3282 goto msg_full; 3283 3284 if (tsk->conn_type != 0) { 3285 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3286 goto msg_full; 3287 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3288 goto msg_full; 3289 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3290 goto msg_full; 3291 } 3292 nla_nest_end(skb, nest); 3293 3294 return 0; 3295 3296 msg_full: 3297 nla_nest_cancel(skb, nest); 3298 3299 return -EMSGSIZE; 3300 } 3301 3302 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock 3303 *tsk) 3304 { 3305 struct net *net = sock_net(skb->sk); 3306 struct sock *sk = &tsk->sk; 3307 3308 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || 3309 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) 3310 return -EMSGSIZE; 3311 3312 if (tipc_sk_connected(sk)) { 3313 if (__tipc_nl_add_sk_con(skb, tsk)) 3314 return -EMSGSIZE; 3315 } else if (!list_empty(&tsk->publications)) { 3316 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3317 return -EMSGSIZE; 3318 } 3319 return 0; 3320 } 3321 3322 /* Caller should hold socket lock for the passed tipc socket. */ 3323 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3324 struct tipc_sock *tsk) 3325 { 3326 struct nlattr *attrs; 3327 void *hdr; 3328 3329 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3330 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3331 if (!hdr) 3332 goto msg_cancel; 3333 3334 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK); 3335 if (!attrs) 3336 goto genlmsg_cancel; 3337 3338 if (__tipc_nl_add_sk_info(skb, tsk)) 3339 goto attr_msg_cancel; 3340 3341 nla_nest_end(skb, attrs); 3342 genlmsg_end(skb, hdr); 3343 3344 return 0; 3345 3346 attr_msg_cancel: 3347 nla_nest_cancel(skb, attrs); 3348 genlmsg_cancel: 3349 genlmsg_cancel(skb, hdr); 3350 msg_cancel: 3351 return -EMSGSIZE; 3352 } 3353 3354 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 3355 int (*skb_handler)(struct sk_buff *skb, 3356 struct netlink_callback *cb, 3357 struct tipc_sock *tsk)) 3358 { 3359 struct rhashtable_iter *iter = (void *)cb->args[4]; 3360 struct tipc_sock *tsk; 3361 int err; 3362 3363 rhashtable_walk_start(iter); 3364 while ((tsk = rhashtable_walk_next(iter)) != NULL) { 3365 if (IS_ERR(tsk)) { 3366 err = PTR_ERR(tsk); 3367 if (err == -EAGAIN) { 3368 err = 0; 3369 continue; 3370 } 3371 break; 3372 } 3373 3374 sock_hold(&tsk->sk); 3375 rhashtable_walk_stop(iter); 3376 lock_sock(&tsk->sk); 3377 err = skb_handler(skb, cb, tsk); 3378 if (err) { 3379 release_sock(&tsk->sk); 3380 sock_put(&tsk->sk); 3381 goto out; 3382 } 3383 release_sock(&tsk->sk); 3384 rhashtable_walk_start(iter); 3385 sock_put(&tsk->sk); 3386 } 3387 rhashtable_walk_stop(iter); 3388 out: 3389 return skb->len; 3390 } 3391 EXPORT_SYMBOL(tipc_nl_sk_walk); 3392 3393 int tipc_dump_start(struct netlink_callback *cb) 3394 { 3395 return __tipc_dump_start(cb, sock_net(cb->skb->sk)); 3396 } 3397 EXPORT_SYMBOL(tipc_dump_start); 3398 3399 int __tipc_dump_start(struct netlink_callback *cb, struct net *net) 3400 { 3401 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */ 3402 struct rhashtable_iter *iter = (void *)cb->args[4]; 3403 struct tipc_net *tn = tipc_net(net); 3404 3405 if (!iter) { 3406 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 3407 if (!iter) 3408 return -ENOMEM; 3409 3410 cb->args[4] = (long)iter; 3411 } 3412 3413 rhashtable_walk_enter(&tn->sk_rht, iter); 3414 return 0; 3415 } 3416 3417 int tipc_dump_done(struct netlink_callback *cb) 3418 { 3419 struct rhashtable_iter *hti = (void *)cb->args[4]; 3420 3421 rhashtable_walk_exit(hti); 3422 kfree(hti); 3423 return 0; 3424 } 3425 EXPORT_SYMBOL(tipc_dump_done); 3426 3427 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3428 struct tipc_sock *tsk, u32 sk_filter_state, 3429 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3430 { 3431 struct sock *sk = &tsk->sk; 3432 struct nlattr *attrs; 3433 struct nlattr *stat; 3434 3435 /*filter response w.r.t sk_state*/ 3436 if (!(sk_filter_state & (1 << sk->sk_state))) 3437 return 0; 3438 3439 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK); 3440 if (!attrs) 3441 goto msg_cancel; 3442 3443 if (__tipc_nl_add_sk_info(skb, tsk)) 3444 goto attr_msg_cancel; 3445 3446 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || 3447 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3448 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3449 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3450 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3451 sock_i_uid(sk))) || 3452 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3453 tipc_diag_gen_cookie(sk), 3454 TIPC_NLA_SOCK_PAD)) 3455 goto attr_msg_cancel; 3456 3457 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT); 3458 if (!stat) 3459 goto attr_msg_cancel; 3460 3461 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3462 skb_queue_len(&sk->sk_receive_queue)) || 3463 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3464 skb_queue_len(&sk->sk_write_queue)) || 3465 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3466 atomic_read(&sk->sk_drops))) 3467 goto stat_msg_cancel; 3468 3469 if (tsk->cong_link_cnt && 3470 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) 3471 goto stat_msg_cancel; 3472 3473 if (tsk_conn_cong(tsk) && 3474 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) 3475 goto stat_msg_cancel; 3476 3477 nla_nest_end(skb, stat); 3478 3479 if (tsk->group) 3480 if (tipc_group_fill_sock_diag(tsk->group, skb)) 3481 goto stat_msg_cancel; 3482 3483 nla_nest_end(skb, attrs); 3484 3485 return 0; 3486 3487 stat_msg_cancel: 3488 nla_nest_cancel(skb, stat); 3489 attr_msg_cancel: 3490 nla_nest_cancel(skb, attrs); 3491 msg_cancel: 3492 return -EMSGSIZE; 3493 } 3494 EXPORT_SYMBOL(tipc_sk_fill_sock_diag); 3495 3496 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3497 { 3498 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); 3499 } 3500 3501 /* Caller should hold socket lock for the passed tipc socket. */ 3502 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3503 struct netlink_callback *cb, 3504 struct publication *publ) 3505 { 3506 void *hdr; 3507 struct nlattr *attrs; 3508 3509 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3510 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3511 if (!hdr) 3512 goto msg_cancel; 3513 3514 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL); 3515 if (!attrs) 3516 goto genlmsg_cancel; 3517 3518 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3519 goto attr_msg_cancel; 3520 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3521 goto attr_msg_cancel; 3522 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3523 goto attr_msg_cancel; 3524 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3525 goto attr_msg_cancel; 3526 3527 nla_nest_end(skb, attrs); 3528 genlmsg_end(skb, hdr); 3529 3530 return 0; 3531 3532 attr_msg_cancel: 3533 nla_nest_cancel(skb, attrs); 3534 genlmsg_cancel: 3535 genlmsg_cancel(skb, hdr); 3536 msg_cancel: 3537 return -EMSGSIZE; 3538 } 3539 3540 /* Caller should hold socket lock for the passed tipc socket. */ 3541 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3542 struct netlink_callback *cb, 3543 struct tipc_sock *tsk, u32 *last_publ) 3544 { 3545 int err; 3546 struct publication *p; 3547 3548 if (*last_publ) { 3549 list_for_each_entry(p, &tsk->publications, binding_sock) { 3550 if (p->key == *last_publ) 3551 break; 3552 } 3553 if (p->key != *last_publ) { 3554 /* We never set seq or call nl_dump_check_consistent() 3555 * this means that setting prev_seq here will cause the 3556 * consistence check to fail in the netlink callback 3557 * handler. Resulting in the last NLMSG_DONE message 3558 * having the NLM_F_DUMP_INTR flag set. 3559 */ 3560 cb->prev_seq = 1; 3561 *last_publ = 0; 3562 return -EPIPE; 3563 } 3564 } else { 3565 p = list_first_entry(&tsk->publications, struct publication, 3566 binding_sock); 3567 } 3568 3569 list_for_each_entry_from(p, &tsk->publications, binding_sock) { 3570 err = __tipc_nl_add_sk_publ(skb, cb, p); 3571 if (err) { 3572 *last_publ = p->key; 3573 return err; 3574 } 3575 } 3576 *last_publ = 0; 3577 3578 return 0; 3579 } 3580 3581 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3582 { 3583 int err; 3584 u32 tsk_portid = cb->args[0]; 3585 u32 last_publ = cb->args[1]; 3586 u32 done = cb->args[2]; 3587 struct net *net = sock_net(skb->sk); 3588 struct tipc_sock *tsk; 3589 3590 if (!tsk_portid) { 3591 struct nlattr **attrs; 3592 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3593 3594 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3595 if (err) 3596 return err; 3597 3598 if (!attrs[TIPC_NLA_SOCK]) 3599 return -EINVAL; 3600 3601 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX, 3602 attrs[TIPC_NLA_SOCK], 3603 tipc_nl_sock_policy, NULL); 3604 if (err) 3605 return err; 3606 3607 if (!sock[TIPC_NLA_SOCK_REF]) 3608 return -EINVAL; 3609 3610 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3611 } 3612 3613 if (done) 3614 return 0; 3615 3616 tsk = tipc_sk_lookup(net, tsk_portid); 3617 if (!tsk) 3618 return -EINVAL; 3619 3620 lock_sock(&tsk->sk); 3621 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3622 if (!err) 3623 done = 1; 3624 release_sock(&tsk->sk); 3625 sock_put(&tsk->sk); 3626 3627 cb->args[0] = tsk_portid; 3628 cb->args[1] = last_publ; 3629 cb->args[2] = done; 3630 3631 return skb->len; 3632 } 3633 3634 /** 3635 * tipc_sk_filtering - check if a socket should be traced 3636 * @sk: the socket to be examined 3637 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering, 3638 * (portid, sock type, name type, name lower, name upper) 3639 * 3640 * Returns true if the socket meets the socket tuple data 3641 * (value 0 = 'any') or when there is no tuple set (all = 0), 3642 * otherwise false 3643 */ 3644 bool tipc_sk_filtering(struct sock *sk) 3645 { 3646 struct tipc_sock *tsk; 3647 struct publication *p; 3648 u32 _port, _sktype, _type, _lower, _upper; 3649 u32 type = 0, lower = 0, upper = 0; 3650 3651 if (!sk) 3652 return true; 3653 3654 tsk = tipc_sk(sk); 3655 3656 _port = sysctl_tipc_sk_filter[0]; 3657 _sktype = sysctl_tipc_sk_filter[1]; 3658 _type = sysctl_tipc_sk_filter[2]; 3659 _lower = sysctl_tipc_sk_filter[3]; 3660 _upper = sysctl_tipc_sk_filter[4]; 3661 3662 if (!_port && !_sktype && !_type && !_lower && !_upper) 3663 return true; 3664 3665 if (_port) 3666 return (_port == tsk->portid); 3667 3668 if (_sktype && _sktype != sk->sk_type) 3669 return false; 3670 3671 if (tsk->published) { 3672 p = list_first_entry_or_null(&tsk->publications, 3673 struct publication, binding_sock); 3674 if (p) { 3675 type = p->type; 3676 lower = p->lower; 3677 upper = p->upper; 3678 } 3679 } 3680 3681 if (!tipc_sk_type_connectionless(sk)) { 3682 type = tsk->conn_type; 3683 lower = tsk->conn_instance; 3684 upper = tsk->conn_instance; 3685 } 3686 3687 if ((_type && _type != type) || (_lower && _lower != lower) || 3688 (_upper && _upper != upper)) 3689 return false; 3690 3691 return true; 3692 } 3693 3694 u32 tipc_sock_get_portid(struct sock *sk) 3695 { 3696 return (sk) ? (tipc_sk(sk))->portid : 0; 3697 } 3698 3699 /** 3700 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded, 3701 * both the rcv and backlog queues are considered 3702 * @sk: tipc sk to be checked 3703 * @skb: tipc msg to be checked 3704 * 3705 * Returns true if the socket rx queue allocation is > 90%, otherwise false 3706 */ 3707 3708 bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb) 3709 { 3710 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt; 3711 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 3712 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk); 3713 3714 return (qsize > lim * 90 / 100); 3715 } 3716 3717 /** 3718 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded, 3719 * only the rcv queue is considered 3720 * @sk: tipc sk to be checked 3721 * @skb: tipc msg to be checked 3722 * 3723 * Returns true if the socket rx queue allocation is > 90%, otherwise false 3724 */ 3725 3726 bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb) 3727 { 3728 unsigned int lim = rcvbuf_limit(sk, skb); 3729 unsigned int qsize = sk_rmem_alloc_get(sk); 3730 3731 return (qsize > lim * 90 / 100); 3732 } 3733 3734 /** 3735 * tipc_sk_dump - dump TIPC socket 3736 * @sk: tipc sk to be dumped 3737 * @dqueues: bitmask to decide if any socket queue to be dumped? 3738 * - TIPC_DUMP_NONE: don't dump socket queues 3739 * - TIPC_DUMP_SK_SNDQ: dump socket send queue 3740 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue 3741 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue 3742 * - TIPC_DUMP_ALL: dump all the socket queues above 3743 * @buf: returned buffer of dump data in format 3744 */ 3745 int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf) 3746 { 3747 int i = 0; 3748 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN; 3749 struct tipc_sock *tsk; 3750 struct publication *p; 3751 bool tsk_connected; 3752 3753 if (!sk) { 3754 i += scnprintf(buf, sz, "sk data: (null)\n"); 3755 return i; 3756 } 3757 3758 tsk = tipc_sk(sk); 3759 tsk_connected = !tipc_sk_type_connectionless(sk); 3760 3761 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type); 3762 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state); 3763 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk)); 3764 i += scnprintf(buf + i, sz - i, " %u", tsk->portid); 3765 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected); 3766 if (tsk_connected) { 3767 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk)); 3768 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk)); 3769 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type); 3770 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance); 3771 } 3772 i += scnprintf(buf + i, sz - i, " | %u", tsk->published); 3773 if (tsk->published) { 3774 p = list_first_entry_or_null(&tsk->publications, 3775 struct publication, binding_sock); 3776 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0); 3777 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0); 3778 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0); 3779 } 3780 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win); 3781 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win); 3782 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt); 3783 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps); 3784 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt); 3785 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked); 3786 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked); 3787 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt)); 3788 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown); 3789 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk)); 3790 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf); 3791 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk)); 3792 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf); 3793 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len)); 3794 3795 if (dqueues & TIPC_DUMP_SK_SNDQ) { 3796 i += scnprintf(buf + i, sz - i, "sk_write_queue: "); 3797 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i); 3798 } 3799 3800 if (dqueues & TIPC_DUMP_SK_RCVQ) { 3801 i += scnprintf(buf + i, sz - i, "sk_receive_queue: "); 3802 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i); 3803 } 3804 3805 if (dqueues & TIPC_DUMP_SK_BKLGQ) { 3806 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head "); 3807 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i); 3808 if (sk->sk_backlog.tail != sk->sk_backlog.head) { 3809 i += scnprintf(buf + i, sz - i, " tail "); 3810 i += tipc_skb_dump(sk->sk_backlog.tail, false, 3811 buf + i); 3812 } 3813 } 3814 3815 return i; 3816 } 3817