1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 52 #define TIPC_FWD_MSG 1 53 #define TIPC_MAX_PORT 0xffffffff 54 #define TIPC_MIN_PORT 1 55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 56 57 enum { 58 TIPC_LISTEN = TCP_LISTEN, 59 TIPC_ESTABLISHED = TCP_ESTABLISHED, 60 TIPC_OPEN = TCP_CLOSE, 61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 62 TIPC_CONNECTING = TCP_SYN_SENT, 63 }; 64 65 struct sockaddr_pair { 66 struct sockaddr_tipc sock; 67 struct sockaddr_tipc member; 68 }; 69 70 /** 71 * struct tipc_sock - TIPC socket structure 72 * @sk: socket - interacts with 'port' and with user via the socket API 73 * @conn_type: TIPC type used when connection was established 74 * @conn_instance: TIPC instance used when connection was established 75 * @published: non-zero if port has one or more associated names 76 * @max_pkt: maximum packet size "hint" used when building messages sent by port 77 * @portid: unique port identity in TIPC socket hash table 78 * @phdr: preformatted message header used when sending messages 79 * #cong_links: list of congested links 80 * @publications: list of publications for port 81 * @blocking_link: address of the congested link we are currently sleeping on 82 * @pub_count: total # of publications port has made during its lifetime 83 * @conn_timeout: the time we can wait for an unresponded setup request 84 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 85 * @cong_link_cnt: number of congested links 86 * @snt_unacked: # messages sent by socket, and not yet acked by peer 87 * @rcv_unacked: # messages read by user, but not yet acked back to peer 88 * @peer: 'connected' peer for dgram/rdm 89 * @node: hash table node 90 * @mc_method: cookie for use between socket and broadcast layer 91 * @rcu: rcu struct for tipc_sock 92 */ 93 struct tipc_sock { 94 struct sock sk; 95 u32 conn_type; 96 u32 conn_instance; 97 int published; 98 u32 max_pkt; 99 u32 portid; 100 struct tipc_msg phdr; 101 struct list_head cong_links; 102 struct list_head publications; 103 u32 pub_count; 104 atomic_t dupl_rcvcnt; 105 u16 conn_timeout; 106 bool probe_unacked; 107 u16 cong_link_cnt; 108 u16 snt_unacked; 109 u16 snd_win; 110 u16 peer_caps; 111 u16 rcv_unacked; 112 u16 rcv_win; 113 struct sockaddr_tipc peer; 114 struct rhash_head node; 115 struct tipc_mc_method mc_method; 116 struct rcu_head rcu; 117 struct tipc_group *group; 118 bool group_is_open; 119 }; 120 121 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 122 static void tipc_data_ready(struct sock *sk); 123 static void tipc_write_space(struct sock *sk); 124 static void tipc_sock_destruct(struct sock *sk); 125 static int tipc_release(struct socket *sock); 126 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 127 bool kern); 128 static void tipc_sk_timeout(struct timer_list *t); 129 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 130 struct tipc_name_seq const *seq); 131 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 132 struct tipc_name_seq const *seq); 133 static int tipc_sk_leave(struct tipc_sock *tsk); 134 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 135 static int tipc_sk_insert(struct tipc_sock *tsk); 136 static void tipc_sk_remove(struct tipc_sock *tsk); 137 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 138 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 139 140 static const struct proto_ops packet_ops; 141 static const struct proto_ops stream_ops; 142 static const struct proto_ops msg_ops; 143 static struct proto tipc_proto; 144 static const struct rhashtable_params tsk_rht_params; 145 146 static u32 tsk_own_node(struct tipc_sock *tsk) 147 { 148 return msg_prevnode(&tsk->phdr); 149 } 150 151 static u32 tsk_peer_node(struct tipc_sock *tsk) 152 { 153 return msg_destnode(&tsk->phdr); 154 } 155 156 static u32 tsk_peer_port(struct tipc_sock *tsk) 157 { 158 return msg_destport(&tsk->phdr); 159 } 160 161 static bool tsk_unreliable(struct tipc_sock *tsk) 162 { 163 return msg_src_droppable(&tsk->phdr) != 0; 164 } 165 166 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 167 { 168 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 169 } 170 171 static bool tsk_unreturnable(struct tipc_sock *tsk) 172 { 173 return msg_dest_droppable(&tsk->phdr) != 0; 174 } 175 176 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 177 { 178 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 179 } 180 181 static int tsk_importance(struct tipc_sock *tsk) 182 { 183 return msg_importance(&tsk->phdr); 184 } 185 186 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 187 { 188 if (imp > TIPC_CRITICAL_IMPORTANCE) 189 return -EINVAL; 190 msg_set_importance(&tsk->phdr, (u32)imp); 191 return 0; 192 } 193 194 static struct tipc_sock *tipc_sk(const struct sock *sk) 195 { 196 return container_of(sk, struct tipc_sock, sk); 197 } 198 199 static bool tsk_conn_cong(struct tipc_sock *tsk) 200 { 201 return tsk->snt_unacked > tsk->snd_win; 202 } 203 204 static u16 tsk_blocks(int len) 205 { 206 return ((len / FLOWCTL_BLK_SZ) + 1); 207 } 208 209 /* tsk_blocks(): translate a buffer size in bytes to number of 210 * advertisable blocks, taking into account the ratio truesize(len)/len 211 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 212 */ 213 static u16 tsk_adv_blocks(int len) 214 { 215 return len / FLOWCTL_BLK_SZ / 4; 216 } 217 218 /* tsk_inc(): increment counter for sent or received data 219 * - If block based flow control is not supported by peer we 220 * fall back to message based ditto, incrementing the counter 221 */ 222 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 223 { 224 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 225 return ((msglen / FLOWCTL_BLK_SZ) + 1); 226 return 1; 227 } 228 229 /** 230 * tsk_advance_rx_queue - discard first buffer in socket receive queue 231 * 232 * Caller must hold socket lock 233 */ 234 static void tsk_advance_rx_queue(struct sock *sk) 235 { 236 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 237 } 238 239 /* tipc_sk_respond() : send response message back to sender 240 */ 241 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 242 { 243 u32 selector; 244 u32 dnode; 245 u32 onode = tipc_own_addr(sock_net(sk)); 246 247 if (!tipc_msg_reverse(onode, &skb, err)) 248 return; 249 250 dnode = msg_destnode(buf_msg(skb)); 251 selector = msg_origport(buf_msg(skb)); 252 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 253 } 254 255 /** 256 * tsk_rej_rx_queue - reject all buffers in socket receive queue 257 * 258 * Caller must hold socket lock 259 */ 260 static void tsk_rej_rx_queue(struct sock *sk) 261 { 262 struct sk_buff *skb; 263 264 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 265 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 266 } 267 268 static bool tipc_sk_connected(struct sock *sk) 269 { 270 return sk->sk_state == TIPC_ESTABLISHED; 271 } 272 273 /* tipc_sk_type_connectionless - check if the socket is datagram socket 274 * @sk: socket 275 * 276 * Returns true if connection less, false otherwise 277 */ 278 static bool tipc_sk_type_connectionless(struct sock *sk) 279 { 280 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 281 } 282 283 /* tsk_peer_msg - verify if message was sent by connected port's peer 284 * 285 * Handles cases where the node's network address has changed from 286 * the default of <0.0.0> to its configured setting. 287 */ 288 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 289 { 290 struct sock *sk = &tsk->sk; 291 u32 self = tipc_own_addr(sock_net(sk)); 292 u32 peer_port = tsk_peer_port(tsk); 293 u32 orig_node, peer_node; 294 295 if (unlikely(!tipc_sk_connected(sk))) 296 return false; 297 298 if (unlikely(msg_origport(msg) != peer_port)) 299 return false; 300 301 orig_node = msg_orignode(msg); 302 peer_node = tsk_peer_node(tsk); 303 304 if (likely(orig_node == peer_node)) 305 return true; 306 307 if (!orig_node && peer_node == self) 308 return true; 309 310 if (!peer_node && orig_node == self) 311 return true; 312 313 return false; 314 } 315 316 /* tipc_set_sk_state - set the sk_state of the socket 317 * @sk: socket 318 * 319 * Caller must hold socket lock 320 * 321 * Returns 0 on success, errno otherwise 322 */ 323 static int tipc_set_sk_state(struct sock *sk, int state) 324 { 325 int oldsk_state = sk->sk_state; 326 int res = -EINVAL; 327 328 switch (state) { 329 case TIPC_OPEN: 330 res = 0; 331 break; 332 case TIPC_LISTEN: 333 case TIPC_CONNECTING: 334 if (oldsk_state == TIPC_OPEN) 335 res = 0; 336 break; 337 case TIPC_ESTABLISHED: 338 if (oldsk_state == TIPC_CONNECTING || 339 oldsk_state == TIPC_OPEN) 340 res = 0; 341 break; 342 case TIPC_DISCONNECTING: 343 if (oldsk_state == TIPC_CONNECTING || 344 oldsk_state == TIPC_ESTABLISHED) 345 res = 0; 346 break; 347 } 348 349 if (!res) 350 sk->sk_state = state; 351 352 return res; 353 } 354 355 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 356 { 357 struct sock *sk = sock->sk; 358 int err = sock_error(sk); 359 int typ = sock->type; 360 361 if (err) 362 return err; 363 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 364 if (sk->sk_state == TIPC_DISCONNECTING) 365 return -EPIPE; 366 else if (!tipc_sk_connected(sk)) 367 return -ENOTCONN; 368 } 369 if (!*timeout) 370 return -EAGAIN; 371 if (signal_pending(current)) 372 return sock_intr_errno(*timeout); 373 374 return 0; 375 } 376 377 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 378 ({ \ 379 struct sock *sk_; \ 380 int rc_; \ 381 \ 382 while ((rc_ = !(condition_))) { \ 383 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 384 sk_ = (sock_)->sk; \ 385 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 386 if (rc_) \ 387 break; \ 388 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 389 release_sock(sk_); \ 390 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 391 sched_annotate_sleep(); \ 392 lock_sock(sk_); \ 393 remove_wait_queue(sk_sleep(sk_), &wait_); \ 394 } \ 395 rc_; \ 396 }) 397 398 /** 399 * tipc_sk_create - create a TIPC socket 400 * @net: network namespace (must be default network) 401 * @sock: pre-allocated socket structure 402 * @protocol: protocol indicator (must be 0) 403 * @kern: caused by kernel or by userspace? 404 * 405 * This routine creates additional data structures used by the TIPC socket, 406 * initializes them, and links them together. 407 * 408 * Returns 0 on success, errno otherwise 409 */ 410 static int tipc_sk_create(struct net *net, struct socket *sock, 411 int protocol, int kern) 412 { 413 const struct proto_ops *ops; 414 struct sock *sk; 415 struct tipc_sock *tsk; 416 struct tipc_msg *msg; 417 418 /* Validate arguments */ 419 if (unlikely(protocol != 0)) 420 return -EPROTONOSUPPORT; 421 422 switch (sock->type) { 423 case SOCK_STREAM: 424 ops = &stream_ops; 425 break; 426 case SOCK_SEQPACKET: 427 ops = &packet_ops; 428 break; 429 case SOCK_DGRAM: 430 case SOCK_RDM: 431 ops = &msg_ops; 432 break; 433 default: 434 return -EPROTOTYPE; 435 } 436 437 /* Allocate socket's protocol area */ 438 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 439 if (sk == NULL) 440 return -ENOMEM; 441 442 tsk = tipc_sk(sk); 443 tsk->max_pkt = MAX_PKT_DEFAULT; 444 INIT_LIST_HEAD(&tsk->publications); 445 INIT_LIST_HEAD(&tsk->cong_links); 446 msg = &tsk->phdr; 447 448 /* Finish initializing socket data structures */ 449 sock->ops = ops; 450 sock_init_data(sock, sk); 451 tipc_set_sk_state(sk, TIPC_OPEN); 452 if (tipc_sk_insert(tsk)) { 453 pr_warn("Socket create failed; port number exhausted\n"); 454 return -EINVAL; 455 } 456 457 /* Ensure tsk is visible before we read own_addr. */ 458 smp_mb(); 459 460 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, 461 TIPC_NAMED_MSG, NAMED_H_SIZE, 0); 462 463 msg_set_origport(msg, tsk->portid); 464 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 465 sk->sk_shutdown = 0; 466 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 467 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 468 sk->sk_data_ready = tipc_data_ready; 469 sk->sk_write_space = tipc_write_space; 470 sk->sk_destruct = tipc_sock_destruct; 471 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 472 tsk->group_is_open = true; 473 atomic_set(&tsk->dupl_rcvcnt, 0); 474 475 /* Start out with safe limits until we receive an advertised window */ 476 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 477 tsk->rcv_win = tsk->snd_win; 478 479 if (tipc_sk_type_connectionless(sk)) { 480 tsk_set_unreturnable(tsk, true); 481 if (sock->type == SOCK_DGRAM) 482 tsk_set_unreliable(tsk, true); 483 } 484 485 return 0; 486 } 487 488 static void tipc_sk_callback(struct rcu_head *head) 489 { 490 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 491 492 sock_put(&tsk->sk); 493 } 494 495 /* Caller should hold socket lock for the socket. */ 496 static void __tipc_shutdown(struct socket *sock, int error) 497 { 498 struct sock *sk = sock->sk; 499 struct tipc_sock *tsk = tipc_sk(sk); 500 struct net *net = sock_net(sk); 501 long timeout = CONN_TIMEOUT_DEFAULT; 502 u32 dnode = tsk_peer_node(tsk); 503 struct sk_buff *skb; 504 505 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 506 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 507 !tsk_conn_cong(tsk))); 508 509 /* Remove any pending SYN message */ 510 __skb_queue_purge(&sk->sk_write_queue); 511 512 /* Reject all unreceived messages, except on an active connection 513 * (which disconnects locally & sends a 'FIN+' to peer). 514 */ 515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 516 if (TIPC_SKB_CB(skb)->bytes_read) { 517 kfree_skb(skb); 518 continue; 519 } 520 if (!tipc_sk_type_connectionless(sk) && 521 sk->sk_state != TIPC_DISCONNECTING) { 522 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 523 tipc_node_remove_conn(net, dnode, tsk->portid); 524 } 525 tipc_sk_respond(sk, skb, error); 526 } 527 528 if (tipc_sk_type_connectionless(sk)) 529 return; 530 531 if (sk->sk_state != TIPC_DISCONNECTING) { 532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 534 tsk_own_node(tsk), tsk_peer_port(tsk), 535 tsk->portid, error); 536 if (skb) 537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 538 tipc_node_remove_conn(net, dnode, tsk->portid); 539 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 540 } 541 } 542 543 /** 544 * tipc_release - destroy a TIPC socket 545 * @sock: socket to destroy 546 * 547 * This routine cleans up any messages that are still queued on the socket. 548 * For DGRAM and RDM socket types, all queued messages are rejected. 549 * For SEQPACKET and STREAM socket types, the first message is rejected 550 * and any others are discarded. (If the first message on a STREAM socket 551 * is partially-read, it is discarded and the next one is rejected instead.) 552 * 553 * NOTE: Rejected messages are not necessarily returned to the sender! They 554 * are returned or discarded according to the "destination droppable" setting 555 * specified for the message by the sender. 556 * 557 * Returns 0 on success, errno otherwise 558 */ 559 static int tipc_release(struct socket *sock) 560 { 561 struct sock *sk = sock->sk; 562 struct tipc_sock *tsk; 563 564 /* 565 * Exit if socket isn't fully initialized (occurs when a failed accept() 566 * releases a pre-allocated child socket that was never used) 567 */ 568 if (sk == NULL) 569 return 0; 570 571 tsk = tipc_sk(sk); 572 lock_sock(sk); 573 574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 575 sk->sk_shutdown = SHUTDOWN_MASK; 576 tipc_sk_leave(tsk); 577 tipc_sk_withdraw(tsk, 0, NULL); 578 sk_stop_timer(sk, &sk->sk_timer); 579 tipc_sk_remove(tsk); 580 581 sock_orphan(sk); 582 /* Reject any messages that accumulated in backlog queue */ 583 release_sock(sk); 584 tipc_dest_list_purge(&tsk->cong_links); 585 tsk->cong_link_cnt = 0; 586 call_rcu(&tsk->rcu, tipc_sk_callback); 587 sock->sk = NULL; 588 589 return 0; 590 } 591 592 /** 593 * tipc_bind - associate or disassocate TIPC name(s) with a socket 594 * @sock: socket structure 595 * @uaddr: socket address describing name(s) and desired operation 596 * @uaddr_len: size of socket address data structure 597 * 598 * Name and name sequence binding is indicated using a positive scope value; 599 * a negative scope value unbinds the specified name. Specifying no name 600 * (i.e. a socket address length of 0) unbinds all names from the socket. 601 * 602 * Returns 0 on success, errno otherwise 603 * 604 * NOTE: This routine doesn't need to take the socket lock since it doesn't 605 * access any non-constant socket information. 606 */ 607 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 608 int uaddr_len) 609 { 610 struct sock *sk = sock->sk; 611 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 612 struct tipc_sock *tsk = tipc_sk(sk); 613 int res = -EINVAL; 614 615 lock_sock(sk); 616 if (unlikely(!uaddr_len)) { 617 res = tipc_sk_withdraw(tsk, 0, NULL); 618 goto exit; 619 } 620 if (tsk->group) { 621 res = -EACCES; 622 goto exit; 623 } 624 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 625 res = -EINVAL; 626 goto exit; 627 } 628 if (addr->family != AF_TIPC) { 629 res = -EAFNOSUPPORT; 630 goto exit; 631 } 632 633 if (addr->addrtype == TIPC_ADDR_NAME) 634 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 635 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 636 res = -EAFNOSUPPORT; 637 goto exit; 638 } 639 640 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 641 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 642 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 643 res = -EACCES; 644 goto exit; 645 } 646 647 res = (addr->scope >= 0) ? 648 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 649 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 650 exit: 651 release_sock(sk); 652 return res; 653 } 654 655 /** 656 * tipc_getname - get port ID of socket or peer socket 657 * @sock: socket structure 658 * @uaddr: area for returned socket address 659 * @uaddr_len: area for returned length of socket address 660 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 661 * 662 * Returns 0 on success, errno otherwise 663 * 664 * NOTE: This routine doesn't need to take the socket lock since it only 665 * accesses socket information that is unchanging (or which changes in 666 * a completely predictable manner). 667 */ 668 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 669 int peer) 670 { 671 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 672 struct sock *sk = sock->sk; 673 struct tipc_sock *tsk = tipc_sk(sk); 674 675 memset(addr, 0, sizeof(*addr)); 676 if (peer) { 677 if ((!tipc_sk_connected(sk)) && 678 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 679 return -ENOTCONN; 680 addr->addr.id.ref = tsk_peer_port(tsk); 681 addr->addr.id.node = tsk_peer_node(tsk); 682 } else { 683 addr->addr.id.ref = tsk->portid; 684 addr->addr.id.node = tipc_own_addr(sock_net(sk)); 685 } 686 687 addr->addrtype = TIPC_ADDR_ID; 688 addr->family = AF_TIPC; 689 addr->scope = 0; 690 addr->addr.name.domain = 0; 691 692 return sizeof(*addr); 693 } 694 695 /** 696 * tipc_poll - read and possibly block on pollmask 697 * @file: file structure associated with the socket 698 * @sock: socket for which to calculate the poll bits 699 * @wait: ??? 700 * 701 * Returns pollmask value 702 * 703 * COMMENTARY: 704 * It appears that the usual socket locking mechanisms are not useful here 705 * since the pollmask info is potentially out-of-date the moment this routine 706 * exits. TCP and other protocols seem to rely on higher level poll routines 707 * to handle any preventable race conditions, so TIPC will do the same ... 708 * 709 * IMPORTANT: The fact that a read or write operation is indicated does NOT 710 * imply that the operation will succeed, merely that it should be performed 711 * and will not block. 712 */ 713 static __poll_t tipc_poll(struct file *file, struct socket *sock, 714 poll_table *wait) 715 { 716 struct sock *sk = sock->sk; 717 struct tipc_sock *tsk = tipc_sk(sk); 718 __poll_t revents = 0; 719 720 sock_poll_wait(file, wait); 721 722 if (sk->sk_shutdown & RCV_SHUTDOWN) 723 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 724 if (sk->sk_shutdown == SHUTDOWN_MASK) 725 revents |= EPOLLHUP; 726 727 switch (sk->sk_state) { 728 case TIPC_ESTABLISHED: 729 case TIPC_CONNECTING: 730 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 731 revents |= EPOLLOUT; 732 /* fall thru' */ 733 case TIPC_LISTEN: 734 if (!skb_queue_empty(&sk->sk_receive_queue)) 735 revents |= EPOLLIN | EPOLLRDNORM; 736 break; 737 case TIPC_OPEN: 738 if (tsk->group_is_open && !tsk->cong_link_cnt) 739 revents |= EPOLLOUT; 740 if (!tipc_sk_type_connectionless(sk)) 741 break; 742 if (skb_queue_empty(&sk->sk_receive_queue)) 743 break; 744 revents |= EPOLLIN | EPOLLRDNORM; 745 break; 746 case TIPC_DISCONNECTING: 747 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 748 break; 749 } 750 return revents; 751 } 752 753 /** 754 * tipc_sendmcast - send multicast message 755 * @sock: socket structure 756 * @seq: destination address 757 * @msg: message to send 758 * @dlen: length of data to send 759 * @timeout: timeout to wait for wakeup 760 * 761 * Called from function tipc_sendmsg(), which has done all sanity checks 762 * Returns the number of bytes sent on success, or errno 763 */ 764 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 765 struct msghdr *msg, size_t dlen, long timeout) 766 { 767 struct sock *sk = sock->sk; 768 struct tipc_sock *tsk = tipc_sk(sk); 769 struct tipc_msg *hdr = &tsk->phdr; 770 struct net *net = sock_net(sk); 771 int mtu = tipc_bcast_get_mtu(net); 772 struct tipc_mc_method *method = &tsk->mc_method; 773 struct sk_buff_head pkts; 774 struct tipc_nlist dsts; 775 int rc; 776 777 if (tsk->group) 778 return -EACCES; 779 780 /* Block or return if any destination link is congested */ 781 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 782 if (unlikely(rc)) 783 return rc; 784 785 /* Lookup destination nodes */ 786 tipc_nlist_init(&dsts, tipc_own_addr(net)); 787 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 788 seq->upper, &dsts); 789 if (!dsts.local && !dsts.remote) 790 return -EHOSTUNREACH; 791 792 /* Build message header */ 793 msg_set_type(hdr, TIPC_MCAST_MSG); 794 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 795 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 796 msg_set_destport(hdr, 0); 797 msg_set_destnode(hdr, 0); 798 msg_set_nametype(hdr, seq->type); 799 msg_set_namelower(hdr, seq->lower); 800 msg_set_nameupper(hdr, seq->upper); 801 802 /* Build message as chain of buffers */ 803 skb_queue_head_init(&pkts); 804 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 805 806 /* Send message if build was successful */ 807 if (unlikely(rc == dlen)) 808 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 809 &tsk->cong_link_cnt); 810 811 tipc_nlist_purge(&dsts); 812 813 return rc ? rc : dlen; 814 } 815 816 /** 817 * tipc_send_group_msg - send a message to a member in the group 818 * @net: network namespace 819 * @m: message to send 820 * @mb: group member 821 * @dnode: destination node 822 * @dport: destination port 823 * @dlen: total length of message data 824 */ 825 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 826 struct msghdr *m, struct tipc_member *mb, 827 u32 dnode, u32 dport, int dlen) 828 { 829 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 830 struct tipc_mc_method *method = &tsk->mc_method; 831 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 832 struct tipc_msg *hdr = &tsk->phdr; 833 struct sk_buff_head pkts; 834 int mtu, rc; 835 836 /* Complete message header */ 837 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 838 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 839 msg_set_destport(hdr, dport); 840 msg_set_destnode(hdr, dnode); 841 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 842 843 /* Build message as chain of buffers */ 844 skb_queue_head_init(&pkts); 845 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 846 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 847 if (unlikely(rc != dlen)) 848 return rc; 849 850 /* Send message */ 851 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 852 if (unlikely(rc == -ELINKCONG)) { 853 tipc_dest_push(&tsk->cong_links, dnode, 0); 854 tsk->cong_link_cnt++; 855 } 856 857 /* Update send window */ 858 tipc_group_update_member(mb, blks); 859 860 /* A broadcast sent within next EXPIRE period must follow same path */ 861 method->rcast = true; 862 method->mandatory = true; 863 return dlen; 864 } 865 866 /** 867 * tipc_send_group_unicast - send message to a member in the group 868 * @sock: socket structure 869 * @m: message to send 870 * @dlen: total length of message data 871 * @timeout: timeout to wait for wakeup 872 * 873 * Called from function tipc_sendmsg(), which has done all sanity checks 874 * Returns the number of bytes sent on success, or errno 875 */ 876 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 877 int dlen, long timeout) 878 { 879 struct sock *sk = sock->sk; 880 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 881 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 882 struct tipc_sock *tsk = tipc_sk(sk); 883 struct tipc_group *grp = tsk->group; 884 struct net *net = sock_net(sk); 885 struct tipc_member *mb = NULL; 886 u32 node, port; 887 int rc; 888 889 node = dest->addr.id.node; 890 port = dest->addr.id.ref; 891 if (!port && !node) 892 return -EHOSTUNREACH; 893 894 /* Block or return if destination link or member is congested */ 895 rc = tipc_wait_for_cond(sock, &timeout, 896 !tipc_dest_find(&tsk->cong_links, node, 0) && 897 !tipc_group_cong(grp, node, port, blks, &mb)); 898 if (unlikely(rc)) 899 return rc; 900 901 if (unlikely(!mb)) 902 return -EHOSTUNREACH; 903 904 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 905 906 return rc ? rc : dlen; 907 } 908 909 /** 910 * tipc_send_group_anycast - send message to any member with given identity 911 * @sock: socket structure 912 * @m: message to send 913 * @dlen: total length of message data 914 * @timeout: timeout to wait for wakeup 915 * 916 * Called from function tipc_sendmsg(), which has done all sanity checks 917 * Returns the number of bytes sent on success, or errno 918 */ 919 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 920 int dlen, long timeout) 921 { 922 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 923 struct sock *sk = sock->sk; 924 struct tipc_sock *tsk = tipc_sk(sk); 925 struct list_head *cong_links = &tsk->cong_links; 926 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 927 struct tipc_group *grp = tsk->group; 928 struct tipc_msg *hdr = &tsk->phdr; 929 struct tipc_member *first = NULL; 930 struct tipc_member *mbr = NULL; 931 struct net *net = sock_net(sk); 932 u32 node, port, exclude; 933 struct list_head dsts; 934 u32 type, inst, scope; 935 int lookups = 0; 936 int dstcnt, rc; 937 bool cong; 938 939 INIT_LIST_HEAD(&dsts); 940 941 type = msg_nametype(hdr); 942 inst = dest->addr.name.name.instance; 943 scope = msg_lookup_scope(hdr); 944 exclude = tipc_group_exclude(grp); 945 946 while (++lookups < 4) { 947 first = NULL; 948 949 /* Look for a non-congested destination member, if any */ 950 while (1) { 951 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 952 &dstcnt, exclude, false)) 953 return -EHOSTUNREACH; 954 tipc_dest_pop(&dsts, &node, &port); 955 cong = tipc_group_cong(grp, node, port, blks, &mbr); 956 if (!cong) 957 break; 958 if (mbr == first) 959 break; 960 if (!first) 961 first = mbr; 962 } 963 964 /* Start over if destination was not in member list */ 965 if (unlikely(!mbr)) 966 continue; 967 968 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 969 break; 970 971 /* Block or return if destination link or member is congested */ 972 rc = tipc_wait_for_cond(sock, &timeout, 973 !tipc_dest_find(cong_links, node, 0) && 974 !tipc_group_cong(grp, node, port, 975 blks, &mbr)); 976 if (unlikely(rc)) 977 return rc; 978 979 /* Send, unless destination disappeared while waiting */ 980 if (likely(mbr)) 981 break; 982 } 983 984 if (unlikely(lookups >= 4)) 985 return -EHOSTUNREACH; 986 987 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 988 989 return rc ? rc : dlen; 990 } 991 992 /** 993 * tipc_send_group_bcast - send message to all members in communication group 994 * @sk: socket structure 995 * @m: message to send 996 * @dlen: total length of message data 997 * @timeout: timeout to wait for wakeup 998 * 999 * Called from function tipc_sendmsg(), which has done all sanity checks 1000 * Returns the number of bytes sent on success, or errno 1001 */ 1002 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 1003 int dlen, long timeout) 1004 { 1005 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1006 struct sock *sk = sock->sk; 1007 struct net *net = sock_net(sk); 1008 struct tipc_sock *tsk = tipc_sk(sk); 1009 struct tipc_group *grp = tsk->group; 1010 struct tipc_nlist *dsts = tipc_group_dests(grp); 1011 struct tipc_mc_method *method = &tsk->mc_method; 1012 bool ack = method->mandatory && method->rcast; 1013 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1014 struct tipc_msg *hdr = &tsk->phdr; 1015 int mtu = tipc_bcast_get_mtu(net); 1016 struct sk_buff_head pkts; 1017 int rc = -EHOSTUNREACH; 1018 1019 if (!dsts->local && !dsts->remote) 1020 return -EHOSTUNREACH; 1021 1022 /* Block or return if any destination link or member is congested */ 1023 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1024 !tipc_group_bc_cong(grp, blks)); 1025 if (unlikely(rc)) 1026 return rc; 1027 1028 /* Complete message header */ 1029 if (dest) { 1030 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1031 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1032 } else { 1033 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1034 msg_set_nameinst(hdr, 0); 1035 } 1036 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1037 msg_set_destport(hdr, 0); 1038 msg_set_destnode(hdr, 0); 1039 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1040 1041 /* Avoid getting stuck with repeated forced replicasts */ 1042 msg_set_grp_bc_ack_req(hdr, ack); 1043 1044 /* Build message as chain of buffers */ 1045 skb_queue_head_init(&pkts); 1046 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1047 if (unlikely(rc != dlen)) 1048 return rc; 1049 1050 /* Send message */ 1051 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1052 if (unlikely(rc)) 1053 return rc; 1054 1055 /* Update broadcast sequence number and send windows */ 1056 tipc_group_update_bc_members(tsk->group, blks, ack); 1057 1058 /* Broadcast link is now free to choose method for next broadcast */ 1059 method->mandatory = false; 1060 method->expires = jiffies; 1061 1062 return dlen; 1063 } 1064 1065 /** 1066 * tipc_send_group_mcast - send message to all members with given identity 1067 * @sock: socket structure 1068 * @m: message to send 1069 * @dlen: total length of message data 1070 * @timeout: timeout to wait for wakeup 1071 * 1072 * Called from function tipc_sendmsg(), which has done all sanity checks 1073 * Returns the number of bytes sent on success, or errno 1074 */ 1075 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1076 int dlen, long timeout) 1077 { 1078 struct sock *sk = sock->sk; 1079 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1080 struct tipc_sock *tsk = tipc_sk(sk); 1081 struct tipc_group *grp = tsk->group; 1082 struct tipc_msg *hdr = &tsk->phdr; 1083 struct net *net = sock_net(sk); 1084 u32 type, inst, scope, exclude; 1085 struct list_head dsts; 1086 u32 dstcnt; 1087 1088 INIT_LIST_HEAD(&dsts); 1089 1090 type = msg_nametype(hdr); 1091 inst = dest->addr.name.name.instance; 1092 scope = msg_lookup_scope(hdr); 1093 exclude = tipc_group_exclude(grp); 1094 1095 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1096 &dstcnt, exclude, true)) 1097 return -EHOSTUNREACH; 1098 1099 if (dstcnt == 1) { 1100 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1101 return tipc_send_group_unicast(sock, m, dlen, timeout); 1102 } 1103 1104 tipc_dest_list_purge(&dsts); 1105 return tipc_send_group_bcast(sock, m, dlen, timeout); 1106 } 1107 1108 /** 1109 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1110 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1111 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1112 * 1113 * Multi-threaded: parallel calls with reference to same queues may occur 1114 */ 1115 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1116 struct sk_buff_head *inputq) 1117 { 1118 u32 self = tipc_own_addr(net); 1119 u32 type, lower, upper, scope; 1120 struct sk_buff *skb, *_skb; 1121 u32 portid, onode; 1122 struct sk_buff_head tmpq; 1123 struct list_head dports; 1124 struct tipc_msg *hdr; 1125 int user, mtyp, hlen; 1126 bool exact; 1127 1128 __skb_queue_head_init(&tmpq); 1129 INIT_LIST_HEAD(&dports); 1130 1131 skb = tipc_skb_peek(arrvq, &inputq->lock); 1132 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1133 hdr = buf_msg(skb); 1134 user = msg_user(hdr); 1135 mtyp = msg_type(hdr); 1136 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1137 onode = msg_orignode(hdr); 1138 type = msg_nametype(hdr); 1139 1140 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1141 spin_lock_bh(&inputq->lock); 1142 if (skb_peek(arrvq) == skb) { 1143 __skb_dequeue(arrvq); 1144 __skb_queue_tail(inputq, skb); 1145 } 1146 kfree_skb(skb); 1147 spin_unlock_bh(&inputq->lock); 1148 continue; 1149 } 1150 1151 /* Group messages require exact scope match */ 1152 if (msg_in_group(hdr)) { 1153 lower = 0; 1154 upper = ~0; 1155 scope = msg_lookup_scope(hdr); 1156 exact = true; 1157 } else { 1158 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1159 if (onode == self) 1160 scope = TIPC_NODE_SCOPE; 1161 else 1162 scope = TIPC_CLUSTER_SCOPE; 1163 exact = false; 1164 lower = msg_namelower(hdr); 1165 upper = msg_nameupper(hdr); 1166 } 1167 1168 /* Create destination port list: */ 1169 tipc_nametbl_mc_lookup(net, type, lower, upper, 1170 scope, exact, &dports); 1171 1172 /* Clone message per destination */ 1173 while (tipc_dest_pop(&dports, NULL, &portid)) { 1174 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1175 if (_skb) { 1176 msg_set_destport(buf_msg(_skb), portid); 1177 __skb_queue_tail(&tmpq, _skb); 1178 continue; 1179 } 1180 pr_warn("Failed to clone mcast rcv buffer\n"); 1181 } 1182 /* Append to inputq if not already done by other thread */ 1183 spin_lock_bh(&inputq->lock); 1184 if (skb_peek(arrvq) == skb) { 1185 skb_queue_splice_tail_init(&tmpq, inputq); 1186 kfree_skb(__skb_dequeue(arrvq)); 1187 } 1188 spin_unlock_bh(&inputq->lock); 1189 __skb_queue_purge(&tmpq); 1190 kfree_skb(skb); 1191 } 1192 tipc_sk_rcv(net, inputq); 1193 } 1194 1195 /** 1196 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1197 * @tsk: receiving socket 1198 * @skb: pointer to message buffer. 1199 */ 1200 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1201 struct sk_buff_head *xmitq) 1202 { 1203 struct tipc_msg *hdr = buf_msg(skb); 1204 u32 onode = tsk_own_node(tsk); 1205 struct sock *sk = &tsk->sk; 1206 int mtyp = msg_type(hdr); 1207 bool conn_cong; 1208 1209 /* Ignore if connection cannot be validated: */ 1210 if (!tsk_peer_msg(tsk, hdr)) 1211 goto exit; 1212 1213 if (unlikely(msg_errcode(hdr))) { 1214 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1215 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1216 tsk_peer_port(tsk)); 1217 sk->sk_state_change(sk); 1218 goto exit; 1219 } 1220 1221 tsk->probe_unacked = false; 1222 1223 if (mtyp == CONN_PROBE) { 1224 msg_set_type(hdr, CONN_PROBE_REPLY); 1225 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1226 __skb_queue_tail(xmitq, skb); 1227 return; 1228 } else if (mtyp == CONN_ACK) { 1229 conn_cong = tsk_conn_cong(tsk); 1230 tsk->snt_unacked -= msg_conn_ack(hdr); 1231 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1232 tsk->snd_win = msg_adv_win(hdr); 1233 if (conn_cong) 1234 sk->sk_write_space(sk); 1235 } else if (mtyp != CONN_PROBE_REPLY) { 1236 pr_warn("Received unknown CONN_PROTO msg\n"); 1237 } 1238 exit: 1239 kfree_skb(skb); 1240 } 1241 1242 /** 1243 * tipc_sendmsg - send message in connectionless manner 1244 * @sock: socket structure 1245 * @m: message to send 1246 * @dsz: amount of user data to be sent 1247 * 1248 * Message must have an destination specified explicitly. 1249 * Used for SOCK_RDM and SOCK_DGRAM messages, 1250 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1251 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1252 * 1253 * Returns the number of bytes sent on success, or errno otherwise 1254 */ 1255 static int tipc_sendmsg(struct socket *sock, 1256 struct msghdr *m, size_t dsz) 1257 { 1258 struct sock *sk = sock->sk; 1259 int ret; 1260 1261 lock_sock(sk); 1262 ret = __tipc_sendmsg(sock, m, dsz); 1263 release_sock(sk); 1264 1265 return ret; 1266 } 1267 1268 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1269 { 1270 struct sock *sk = sock->sk; 1271 struct net *net = sock_net(sk); 1272 struct tipc_sock *tsk = tipc_sk(sk); 1273 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1274 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1275 struct list_head *clinks = &tsk->cong_links; 1276 bool syn = !tipc_sk_type_connectionless(sk); 1277 struct tipc_group *grp = tsk->group; 1278 struct tipc_msg *hdr = &tsk->phdr; 1279 struct tipc_name_seq *seq; 1280 struct sk_buff_head pkts; 1281 u32 dport, dnode = 0; 1282 u32 type, inst; 1283 int mtu, rc; 1284 1285 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1286 return -EMSGSIZE; 1287 1288 if (likely(dest)) { 1289 if (unlikely(m->msg_namelen < sizeof(*dest))) 1290 return -EINVAL; 1291 if (unlikely(dest->family != AF_TIPC)) 1292 return -EINVAL; 1293 } 1294 1295 if (grp) { 1296 if (!dest) 1297 return tipc_send_group_bcast(sock, m, dlen, timeout); 1298 if (dest->addrtype == TIPC_ADDR_NAME) 1299 return tipc_send_group_anycast(sock, m, dlen, timeout); 1300 if (dest->addrtype == TIPC_ADDR_ID) 1301 return tipc_send_group_unicast(sock, m, dlen, timeout); 1302 if (dest->addrtype == TIPC_ADDR_MCAST) 1303 return tipc_send_group_mcast(sock, m, dlen, timeout); 1304 return -EINVAL; 1305 } 1306 1307 if (unlikely(!dest)) { 1308 dest = &tsk->peer; 1309 if (!syn || dest->family != AF_TIPC) 1310 return -EDESTADDRREQ; 1311 } 1312 1313 if (unlikely(syn)) { 1314 if (sk->sk_state == TIPC_LISTEN) 1315 return -EPIPE; 1316 if (sk->sk_state != TIPC_OPEN) 1317 return -EISCONN; 1318 if (tsk->published) 1319 return -EOPNOTSUPP; 1320 if (dest->addrtype == TIPC_ADDR_NAME) { 1321 tsk->conn_type = dest->addr.name.name.type; 1322 tsk->conn_instance = dest->addr.name.name.instance; 1323 } 1324 msg_set_syn(hdr, 1); 1325 } 1326 1327 seq = &dest->addr.nameseq; 1328 if (dest->addrtype == TIPC_ADDR_MCAST) 1329 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1330 1331 if (dest->addrtype == TIPC_ADDR_NAME) { 1332 type = dest->addr.name.name.type; 1333 inst = dest->addr.name.name.instance; 1334 dnode = dest->addr.name.domain; 1335 msg_set_type(hdr, TIPC_NAMED_MSG); 1336 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1337 msg_set_nametype(hdr, type); 1338 msg_set_nameinst(hdr, inst); 1339 msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); 1340 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1341 msg_set_destnode(hdr, dnode); 1342 msg_set_destport(hdr, dport); 1343 if (unlikely(!dport && !dnode)) 1344 return -EHOSTUNREACH; 1345 } else if (dest->addrtype == TIPC_ADDR_ID) { 1346 dnode = dest->addr.id.node; 1347 msg_set_type(hdr, TIPC_DIRECT_MSG); 1348 msg_set_lookup_scope(hdr, 0); 1349 msg_set_destnode(hdr, dnode); 1350 msg_set_destport(hdr, dest->addr.id.ref); 1351 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1352 } else { 1353 return -EINVAL; 1354 } 1355 1356 /* Block or return if destination link is congested */ 1357 rc = tipc_wait_for_cond(sock, &timeout, 1358 !tipc_dest_find(clinks, dnode, 0)); 1359 if (unlikely(rc)) 1360 return rc; 1361 1362 skb_queue_head_init(&pkts); 1363 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1364 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1365 if (unlikely(rc != dlen)) 1366 return rc; 1367 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) 1368 return -ENOMEM; 1369 1370 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1371 if (unlikely(rc == -ELINKCONG)) { 1372 tipc_dest_push(clinks, dnode, 0); 1373 tsk->cong_link_cnt++; 1374 rc = 0; 1375 } 1376 1377 if (unlikely(syn && !rc)) 1378 tipc_set_sk_state(sk, TIPC_CONNECTING); 1379 1380 return rc ? rc : dlen; 1381 } 1382 1383 /** 1384 * tipc_sendstream - send stream-oriented data 1385 * @sock: socket structure 1386 * @m: data to send 1387 * @dsz: total length of data to be transmitted 1388 * 1389 * Used for SOCK_STREAM data. 1390 * 1391 * Returns the number of bytes sent on success (or partial success), 1392 * or errno if no data sent 1393 */ 1394 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1395 { 1396 struct sock *sk = sock->sk; 1397 int ret; 1398 1399 lock_sock(sk); 1400 ret = __tipc_sendstream(sock, m, dsz); 1401 release_sock(sk); 1402 1403 return ret; 1404 } 1405 1406 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1407 { 1408 struct sock *sk = sock->sk; 1409 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1410 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1411 struct tipc_sock *tsk = tipc_sk(sk); 1412 struct tipc_msg *hdr = &tsk->phdr; 1413 struct net *net = sock_net(sk); 1414 struct sk_buff_head pkts; 1415 u32 dnode = tsk_peer_node(tsk); 1416 int send, sent = 0; 1417 int rc = 0; 1418 1419 skb_queue_head_init(&pkts); 1420 1421 if (unlikely(dlen > INT_MAX)) 1422 return -EMSGSIZE; 1423 1424 /* Handle implicit connection setup */ 1425 if (unlikely(dest)) { 1426 rc = __tipc_sendmsg(sock, m, dlen); 1427 if (dlen && dlen == rc) { 1428 tsk->peer_caps = tipc_node_get_capabilities(net, dnode); 1429 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1430 } 1431 return rc; 1432 } 1433 1434 do { 1435 rc = tipc_wait_for_cond(sock, &timeout, 1436 (!tsk->cong_link_cnt && 1437 !tsk_conn_cong(tsk) && 1438 tipc_sk_connected(sk))); 1439 if (unlikely(rc)) 1440 break; 1441 1442 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1443 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1444 if (unlikely(rc != send)) 1445 break; 1446 1447 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1448 if (unlikely(rc == -ELINKCONG)) { 1449 tsk->cong_link_cnt = 1; 1450 rc = 0; 1451 } 1452 if (likely(!rc)) { 1453 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1454 sent += send; 1455 } 1456 } while (sent < dlen && !rc); 1457 1458 return sent ? sent : rc; 1459 } 1460 1461 /** 1462 * tipc_send_packet - send a connection-oriented message 1463 * @sock: socket structure 1464 * @m: message to send 1465 * @dsz: length of data to be transmitted 1466 * 1467 * Used for SOCK_SEQPACKET messages. 1468 * 1469 * Returns the number of bytes sent on success, or errno otherwise 1470 */ 1471 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1472 { 1473 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1474 return -EMSGSIZE; 1475 1476 return tipc_sendstream(sock, m, dsz); 1477 } 1478 1479 /* tipc_sk_finish_conn - complete the setup of a connection 1480 */ 1481 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1482 u32 peer_node) 1483 { 1484 struct sock *sk = &tsk->sk; 1485 struct net *net = sock_net(sk); 1486 struct tipc_msg *msg = &tsk->phdr; 1487 1488 msg_set_syn(msg, 0); 1489 msg_set_destnode(msg, peer_node); 1490 msg_set_destport(msg, peer_port); 1491 msg_set_type(msg, TIPC_CONN_MSG); 1492 msg_set_lookup_scope(msg, 0); 1493 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1494 1495 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1496 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1497 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1498 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1499 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1500 __skb_queue_purge(&sk->sk_write_queue); 1501 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1502 return; 1503 1504 /* Fall back to message based flow control */ 1505 tsk->rcv_win = FLOWCTL_MSG_WIN; 1506 tsk->snd_win = FLOWCTL_MSG_WIN; 1507 } 1508 1509 /** 1510 * tipc_sk_set_orig_addr - capture sender's address for received message 1511 * @m: descriptor for message info 1512 * @hdr: received message header 1513 * 1514 * Note: Address is not captured if not requested by receiver. 1515 */ 1516 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1517 { 1518 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1519 struct tipc_msg *hdr = buf_msg(skb); 1520 1521 if (!srcaddr) 1522 return; 1523 1524 srcaddr->sock.family = AF_TIPC; 1525 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1526 srcaddr->sock.scope = 0; 1527 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1528 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1529 srcaddr->sock.addr.name.domain = 0; 1530 m->msg_namelen = sizeof(struct sockaddr_tipc); 1531 1532 if (!msg_in_group(hdr)) 1533 return; 1534 1535 /* Group message users may also want to know sending member's id */ 1536 srcaddr->member.family = AF_TIPC; 1537 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1538 srcaddr->member.scope = 0; 1539 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1540 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1541 srcaddr->member.addr.name.domain = 0; 1542 m->msg_namelen = sizeof(*srcaddr); 1543 } 1544 1545 /** 1546 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1547 * @m: descriptor for message info 1548 * @msg: received message header 1549 * @tsk: TIPC port associated with message 1550 * 1551 * Note: Ancillary data is not captured if not requested by receiver. 1552 * 1553 * Returns 0 if successful, otherwise errno 1554 */ 1555 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1556 struct tipc_sock *tsk) 1557 { 1558 u32 anc_data[3]; 1559 u32 err; 1560 u32 dest_type; 1561 int has_name; 1562 int res; 1563 1564 if (likely(m->msg_controllen == 0)) 1565 return 0; 1566 1567 /* Optionally capture errored message object(s) */ 1568 err = msg ? msg_errcode(msg) : 0; 1569 if (unlikely(err)) { 1570 anc_data[0] = err; 1571 anc_data[1] = msg_data_sz(msg); 1572 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1573 if (res) 1574 return res; 1575 if (anc_data[1]) { 1576 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1577 msg_data(msg)); 1578 if (res) 1579 return res; 1580 } 1581 } 1582 1583 /* Optionally capture message destination object */ 1584 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1585 switch (dest_type) { 1586 case TIPC_NAMED_MSG: 1587 has_name = 1; 1588 anc_data[0] = msg_nametype(msg); 1589 anc_data[1] = msg_namelower(msg); 1590 anc_data[2] = msg_namelower(msg); 1591 break; 1592 case TIPC_MCAST_MSG: 1593 has_name = 1; 1594 anc_data[0] = msg_nametype(msg); 1595 anc_data[1] = msg_namelower(msg); 1596 anc_data[2] = msg_nameupper(msg); 1597 break; 1598 case TIPC_CONN_MSG: 1599 has_name = (tsk->conn_type != 0); 1600 anc_data[0] = tsk->conn_type; 1601 anc_data[1] = tsk->conn_instance; 1602 anc_data[2] = tsk->conn_instance; 1603 break; 1604 default: 1605 has_name = 0; 1606 } 1607 if (has_name) { 1608 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1609 if (res) 1610 return res; 1611 } 1612 1613 return 0; 1614 } 1615 1616 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1617 { 1618 struct sock *sk = &tsk->sk; 1619 struct net *net = sock_net(sk); 1620 struct sk_buff *skb = NULL; 1621 struct tipc_msg *msg; 1622 u32 peer_port = tsk_peer_port(tsk); 1623 u32 dnode = tsk_peer_node(tsk); 1624 1625 if (!tipc_sk_connected(sk)) 1626 return; 1627 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1628 dnode, tsk_own_node(tsk), peer_port, 1629 tsk->portid, TIPC_OK); 1630 if (!skb) 1631 return; 1632 msg = buf_msg(skb); 1633 msg_set_conn_ack(msg, tsk->rcv_unacked); 1634 tsk->rcv_unacked = 0; 1635 1636 /* Adjust to and advertize the correct window limit */ 1637 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1638 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1639 msg_set_adv_win(msg, tsk->rcv_win); 1640 } 1641 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1642 } 1643 1644 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1645 { 1646 struct sock *sk = sock->sk; 1647 DEFINE_WAIT(wait); 1648 long timeo = *timeop; 1649 int err = sock_error(sk); 1650 1651 if (err) 1652 return err; 1653 1654 for (;;) { 1655 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1656 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1657 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1658 err = -ENOTCONN; 1659 break; 1660 } 1661 release_sock(sk); 1662 timeo = schedule_timeout(timeo); 1663 lock_sock(sk); 1664 } 1665 err = 0; 1666 if (!skb_queue_empty(&sk->sk_receive_queue)) 1667 break; 1668 err = -EAGAIN; 1669 if (!timeo) 1670 break; 1671 err = sock_intr_errno(timeo); 1672 if (signal_pending(current)) 1673 break; 1674 1675 err = sock_error(sk); 1676 if (err) 1677 break; 1678 } 1679 finish_wait(sk_sleep(sk), &wait); 1680 *timeop = timeo; 1681 return err; 1682 } 1683 1684 /** 1685 * tipc_recvmsg - receive packet-oriented message 1686 * @m: descriptor for message info 1687 * @buflen: length of user buffer area 1688 * @flags: receive flags 1689 * 1690 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1691 * If the complete message doesn't fit in user area, truncate it. 1692 * 1693 * Returns size of returned message data, errno otherwise 1694 */ 1695 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1696 size_t buflen, int flags) 1697 { 1698 struct sock *sk = sock->sk; 1699 bool connected = !tipc_sk_type_connectionless(sk); 1700 struct tipc_sock *tsk = tipc_sk(sk); 1701 int rc, err, hlen, dlen, copy; 1702 struct sk_buff_head xmitq; 1703 struct tipc_msg *hdr; 1704 struct sk_buff *skb; 1705 bool grp_evt; 1706 long timeout; 1707 1708 /* Catch invalid receive requests */ 1709 if (unlikely(!buflen)) 1710 return -EINVAL; 1711 1712 lock_sock(sk); 1713 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1714 rc = -ENOTCONN; 1715 goto exit; 1716 } 1717 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1718 1719 /* Step rcv queue to first msg with data or error; wait if necessary */ 1720 do { 1721 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1722 if (unlikely(rc)) 1723 goto exit; 1724 skb = skb_peek(&sk->sk_receive_queue); 1725 hdr = buf_msg(skb); 1726 dlen = msg_data_sz(hdr); 1727 hlen = msg_hdr_sz(hdr); 1728 err = msg_errcode(hdr); 1729 grp_evt = msg_is_grp_evt(hdr); 1730 if (likely(dlen || err)) 1731 break; 1732 tsk_advance_rx_queue(sk); 1733 } while (1); 1734 1735 /* Collect msg meta data, including error code and rejected data */ 1736 tipc_sk_set_orig_addr(m, skb); 1737 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1738 if (unlikely(rc)) 1739 goto exit; 1740 1741 /* Capture data if non-error msg, otherwise just set return value */ 1742 if (likely(!err)) { 1743 copy = min_t(int, dlen, buflen); 1744 if (unlikely(copy != dlen)) 1745 m->msg_flags |= MSG_TRUNC; 1746 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1747 } else { 1748 copy = 0; 1749 rc = 0; 1750 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1751 rc = -ECONNRESET; 1752 } 1753 if (unlikely(rc)) 1754 goto exit; 1755 1756 /* Mark message as group event if applicable */ 1757 if (unlikely(grp_evt)) { 1758 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1759 m->msg_flags |= MSG_EOR; 1760 m->msg_flags |= MSG_OOB; 1761 copy = 0; 1762 } 1763 1764 /* Caption of data or error code/rejected data was successful */ 1765 if (unlikely(flags & MSG_PEEK)) 1766 goto exit; 1767 1768 /* Send group flow control advertisement when applicable */ 1769 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1770 skb_queue_head_init(&xmitq); 1771 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1772 msg_orignode(hdr), msg_origport(hdr), 1773 &xmitq); 1774 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1775 } 1776 1777 tsk_advance_rx_queue(sk); 1778 1779 if (likely(!connected)) 1780 goto exit; 1781 1782 /* Send connection flow control advertisement when applicable */ 1783 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1784 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1785 tipc_sk_send_ack(tsk); 1786 exit: 1787 release_sock(sk); 1788 return rc ? rc : copy; 1789 } 1790 1791 /** 1792 * tipc_recvstream - receive stream-oriented data 1793 * @m: descriptor for message info 1794 * @buflen: total size of user buffer area 1795 * @flags: receive flags 1796 * 1797 * Used for SOCK_STREAM messages only. If not enough data is available 1798 * will optionally wait for more; never truncates data. 1799 * 1800 * Returns size of returned message data, errno otherwise 1801 */ 1802 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1803 size_t buflen, int flags) 1804 { 1805 struct sock *sk = sock->sk; 1806 struct tipc_sock *tsk = tipc_sk(sk); 1807 struct sk_buff *skb; 1808 struct tipc_msg *hdr; 1809 struct tipc_skb_cb *skb_cb; 1810 bool peek = flags & MSG_PEEK; 1811 int offset, required, copy, copied = 0; 1812 int hlen, dlen, err, rc; 1813 long timeout; 1814 1815 /* Catch invalid receive attempts */ 1816 if (unlikely(!buflen)) 1817 return -EINVAL; 1818 1819 lock_sock(sk); 1820 1821 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1822 rc = -ENOTCONN; 1823 goto exit; 1824 } 1825 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1826 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1827 1828 do { 1829 /* Look at first msg in receive queue; wait if necessary */ 1830 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1831 if (unlikely(rc)) 1832 break; 1833 skb = skb_peek(&sk->sk_receive_queue); 1834 skb_cb = TIPC_SKB_CB(skb); 1835 hdr = buf_msg(skb); 1836 dlen = msg_data_sz(hdr); 1837 hlen = msg_hdr_sz(hdr); 1838 err = msg_errcode(hdr); 1839 1840 /* Discard any empty non-errored (SYN-) message */ 1841 if (unlikely(!dlen && !err)) { 1842 tsk_advance_rx_queue(sk); 1843 continue; 1844 } 1845 1846 /* Collect msg meta data, incl. error code and rejected data */ 1847 if (!copied) { 1848 tipc_sk_set_orig_addr(m, skb); 1849 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1850 if (rc) 1851 break; 1852 } 1853 1854 /* Copy data if msg ok, otherwise return error/partial data */ 1855 if (likely(!err)) { 1856 offset = skb_cb->bytes_read; 1857 copy = min_t(int, dlen - offset, buflen - copied); 1858 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1859 if (unlikely(rc)) 1860 break; 1861 copied += copy; 1862 offset += copy; 1863 if (unlikely(offset < dlen)) { 1864 if (!peek) 1865 skb_cb->bytes_read = offset; 1866 break; 1867 } 1868 } else { 1869 rc = 0; 1870 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1871 rc = -ECONNRESET; 1872 if (copied || rc) 1873 break; 1874 } 1875 1876 if (unlikely(peek)) 1877 break; 1878 1879 tsk_advance_rx_queue(sk); 1880 1881 /* Send connection flow control advertisement when applicable */ 1882 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1883 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1884 tipc_sk_send_ack(tsk); 1885 1886 /* Exit if all requested data or FIN/error received */ 1887 if (copied == buflen || err) 1888 break; 1889 1890 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1891 exit: 1892 release_sock(sk); 1893 return copied ? copied : rc; 1894 } 1895 1896 /** 1897 * tipc_write_space - wake up thread if port congestion is released 1898 * @sk: socket 1899 */ 1900 static void tipc_write_space(struct sock *sk) 1901 { 1902 struct socket_wq *wq; 1903 1904 rcu_read_lock(); 1905 wq = rcu_dereference(sk->sk_wq); 1906 if (skwq_has_sleeper(wq)) 1907 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1908 EPOLLWRNORM | EPOLLWRBAND); 1909 rcu_read_unlock(); 1910 } 1911 1912 /** 1913 * tipc_data_ready - wake up threads to indicate messages have been received 1914 * @sk: socket 1915 * @len: the length of messages 1916 */ 1917 static void tipc_data_ready(struct sock *sk) 1918 { 1919 struct socket_wq *wq; 1920 1921 rcu_read_lock(); 1922 wq = rcu_dereference(sk->sk_wq); 1923 if (skwq_has_sleeper(wq)) 1924 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1925 EPOLLRDNORM | EPOLLRDBAND); 1926 rcu_read_unlock(); 1927 } 1928 1929 static void tipc_sock_destruct(struct sock *sk) 1930 { 1931 __skb_queue_purge(&sk->sk_receive_queue); 1932 } 1933 1934 static void tipc_sk_proto_rcv(struct sock *sk, 1935 struct sk_buff_head *inputq, 1936 struct sk_buff_head *xmitq) 1937 { 1938 struct sk_buff *skb = __skb_dequeue(inputq); 1939 struct tipc_sock *tsk = tipc_sk(sk); 1940 struct tipc_msg *hdr = buf_msg(skb); 1941 struct tipc_group *grp = tsk->group; 1942 bool wakeup = false; 1943 1944 switch (msg_user(hdr)) { 1945 case CONN_MANAGER: 1946 tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1947 return; 1948 case SOCK_WAKEUP: 1949 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1950 tsk->cong_link_cnt--; 1951 wakeup = true; 1952 break; 1953 case GROUP_PROTOCOL: 1954 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1955 break; 1956 case TOP_SRV: 1957 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1958 hdr, inputq, xmitq); 1959 break; 1960 default: 1961 break; 1962 } 1963 1964 if (wakeup) 1965 sk->sk_write_space(sk); 1966 1967 kfree_skb(skb); 1968 } 1969 1970 /** 1971 * tipc_sk_filter_connect - check incoming message for a connection-based socket 1972 * @tsk: TIPC socket 1973 * @skb: pointer to message buffer. 1974 * Returns true if message should be added to receive queue, false otherwise 1975 */ 1976 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1977 { 1978 struct sock *sk = &tsk->sk; 1979 struct net *net = sock_net(sk); 1980 struct tipc_msg *hdr = buf_msg(skb); 1981 bool con_msg = msg_connected(hdr); 1982 u32 pport = tsk_peer_port(tsk); 1983 u32 pnode = tsk_peer_node(tsk); 1984 u32 oport = msg_origport(hdr); 1985 u32 onode = msg_orignode(hdr); 1986 int err = msg_errcode(hdr); 1987 unsigned long delay; 1988 1989 if (unlikely(msg_mcast(hdr))) 1990 return false; 1991 1992 switch (sk->sk_state) { 1993 case TIPC_CONNECTING: 1994 /* Setup ACK */ 1995 if (likely(con_msg)) { 1996 if (err) 1997 break; 1998 tipc_sk_finish_conn(tsk, oport, onode); 1999 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2000 /* ACK+ message with data is added to receive queue */ 2001 if (msg_data_sz(hdr)) 2002 return true; 2003 /* Empty ACK-, - wake up sleeping connect() and drop */ 2004 sk->sk_data_ready(sk); 2005 msg_set_dest_droppable(hdr, 1); 2006 return false; 2007 } 2008 /* Ignore connectionless message if not from listening socket */ 2009 if (oport != pport || onode != pnode) 2010 return false; 2011 2012 /* Rejected SYN */ 2013 if (err != TIPC_ERR_OVERLOAD) 2014 break; 2015 2016 /* Prepare for new setup attempt if we have a SYN clone */ 2017 if (skb_queue_empty(&sk->sk_write_queue)) 2018 break; 2019 get_random_bytes(&delay, 2); 2020 delay %= (tsk->conn_timeout / 4); 2021 delay = msecs_to_jiffies(delay + 100); 2022 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay); 2023 return false; 2024 case TIPC_OPEN: 2025 case TIPC_DISCONNECTING: 2026 return false; 2027 case TIPC_LISTEN: 2028 /* Accept only SYN message */ 2029 if (!msg_is_syn(hdr) && 2030 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT) 2031 return false; 2032 if (!con_msg && !err) 2033 return true; 2034 return false; 2035 case TIPC_ESTABLISHED: 2036 /* Accept only connection-based messages sent by peer */ 2037 if (likely(con_msg && !err && pport == oport && pnode == onode)) 2038 return true; 2039 if (!tsk_peer_msg(tsk, hdr)) 2040 return false; 2041 if (!err) 2042 return true; 2043 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2044 tipc_node_remove_conn(net, pnode, tsk->portid); 2045 sk->sk_state_change(sk); 2046 return true; 2047 default: 2048 pr_err("Unknown sk_state %u\n", sk->sk_state); 2049 } 2050 /* Abort connection setup attempt */ 2051 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2052 sk->sk_err = ECONNREFUSED; 2053 sk->sk_state_change(sk); 2054 return true; 2055 } 2056 2057 /** 2058 * rcvbuf_limit - get proper overload limit of socket receive queue 2059 * @sk: socket 2060 * @skb: message 2061 * 2062 * For connection oriented messages, irrespective of importance, 2063 * default queue limit is 2 MB. 2064 * 2065 * For connectionless messages, queue limits are based on message 2066 * importance as follows: 2067 * 2068 * TIPC_LOW_IMPORTANCE (2 MB) 2069 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2070 * TIPC_HIGH_IMPORTANCE (8 MB) 2071 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2072 * 2073 * Returns overload limit according to corresponding message importance 2074 */ 2075 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2076 { 2077 struct tipc_sock *tsk = tipc_sk(sk); 2078 struct tipc_msg *hdr = buf_msg(skb); 2079 2080 if (unlikely(msg_in_group(hdr))) 2081 return sk->sk_rcvbuf; 2082 2083 if (unlikely(!msg_connected(hdr))) 2084 return sk->sk_rcvbuf << msg_importance(hdr); 2085 2086 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2087 return sk->sk_rcvbuf; 2088 2089 return FLOWCTL_MSG_LIM; 2090 } 2091 2092 /** 2093 * tipc_sk_filter_rcv - validate incoming message 2094 * @sk: socket 2095 * @skb: pointer to message. 2096 * 2097 * Enqueues message on receive queue if acceptable; optionally handles 2098 * disconnect indication for a connected socket. 2099 * 2100 * Called with socket lock already taken 2101 * 2102 */ 2103 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2104 struct sk_buff_head *xmitq) 2105 { 2106 bool sk_conn = !tipc_sk_type_connectionless(sk); 2107 struct tipc_sock *tsk = tipc_sk(sk); 2108 struct tipc_group *grp = tsk->group; 2109 struct tipc_msg *hdr = buf_msg(skb); 2110 struct net *net = sock_net(sk); 2111 struct sk_buff_head inputq; 2112 int limit, err = TIPC_OK; 2113 2114 TIPC_SKB_CB(skb)->bytes_read = 0; 2115 __skb_queue_head_init(&inputq); 2116 __skb_queue_tail(&inputq, skb); 2117 2118 if (unlikely(!msg_isdata(hdr))) 2119 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2120 2121 if (unlikely(grp)) 2122 tipc_group_filter_msg(grp, &inputq, xmitq); 2123 2124 /* Validate and add to receive buffer if there is space */ 2125 while ((skb = __skb_dequeue(&inputq))) { 2126 hdr = buf_msg(skb); 2127 limit = rcvbuf_limit(sk, skb); 2128 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2129 (!sk_conn && msg_connected(hdr)) || 2130 (!grp && msg_in_group(hdr))) 2131 err = TIPC_ERR_NO_PORT; 2132 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2133 atomic_inc(&sk->sk_drops); 2134 err = TIPC_ERR_OVERLOAD; 2135 } 2136 2137 if (unlikely(err)) { 2138 tipc_skb_reject(net, err, skb, xmitq); 2139 err = TIPC_OK; 2140 continue; 2141 } 2142 __skb_queue_tail(&sk->sk_receive_queue, skb); 2143 skb_set_owner_r(skb, sk); 2144 sk->sk_data_ready(sk); 2145 } 2146 } 2147 2148 /** 2149 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2150 * @sk: socket 2151 * @skb: message 2152 * 2153 * Caller must hold socket lock 2154 */ 2155 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2156 { 2157 unsigned int before = sk_rmem_alloc_get(sk); 2158 struct sk_buff_head xmitq; 2159 unsigned int added; 2160 2161 __skb_queue_head_init(&xmitq); 2162 2163 tipc_sk_filter_rcv(sk, skb, &xmitq); 2164 added = sk_rmem_alloc_get(sk) - before; 2165 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2166 2167 /* Send pending response/rejected messages, if any */ 2168 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2169 return 0; 2170 } 2171 2172 /** 2173 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2174 * inputq and try adding them to socket or backlog queue 2175 * @inputq: list of incoming buffers with potentially different destinations 2176 * @sk: socket where the buffers should be enqueued 2177 * @dport: port number for the socket 2178 * 2179 * Caller must hold socket lock 2180 */ 2181 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2182 u32 dport, struct sk_buff_head *xmitq) 2183 { 2184 unsigned long time_limit = jiffies + 2; 2185 struct sk_buff *skb; 2186 unsigned int lim; 2187 atomic_t *dcnt; 2188 u32 onode; 2189 2190 while (skb_queue_len(inputq)) { 2191 if (unlikely(time_after_eq(jiffies, time_limit))) 2192 return; 2193 2194 skb = tipc_skb_dequeue(inputq, dport); 2195 if (unlikely(!skb)) 2196 return; 2197 2198 /* Add message directly to receive queue if possible */ 2199 if (!sock_owned_by_user(sk)) { 2200 tipc_sk_filter_rcv(sk, skb, xmitq); 2201 continue; 2202 } 2203 2204 /* Try backlog, compensating for double-counted bytes */ 2205 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2206 if (!sk->sk_backlog.len) 2207 atomic_set(dcnt, 0); 2208 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2209 if (likely(!sk_add_backlog(sk, skb, lim))) 2210 continue; 2211 2212 /* Overload => reject message back to sender */ 2213 onode = tipc_own_addr(sock_net(sk)); 2214 atomic_inc(&sk->sk_drops); 2215 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2216 __skb_queue_tail(xmitq, skb); 2217 break; 2218 } 2219 } 2220 2221 /** 2222 * tipc_sk_rcv - handle a chain of incoming buffers 2223 * @inputq: buffer list containing the buffers 2224 * Consumes all buffers in list until inputq is empty 2225 * Note: may be called in multiple threads referring to the same queue 2226 */ 2227 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2228 { 2229 struct sk_buff_head xmitq; 2230 u32 dnode, dport = 0; 2231 int err; 2232 struct tipc_sock *tsk; 2233 struct sock *sk; 2234 struct sk_buff *skb; 2235 2236 __skb_queue_head_init(&xmitq); 2237 while (skb_queue_len(inputq)) { 2238 dport = tipc_skb_peek_port(inputq, dport); 2239 tsk = tipc_sk_lookup(net, dport); 2240 2241 if (likely(tsk)) { 2242 sk = &tsk->sk; 2243 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2244 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2245 spin_unlock_bh(&sk->sk_lock.slock); 2246 } 2247 /* Send pending response/rejected messages, if any */ 2248 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2249 sock_put(sk); 2250 continue; 2251 } 2252 /* No destination socket => dequeue skb if still there */ 2253 skb = tipc_skb_dequeue(inputq, dport); 2254 if (!skb) 2255 return; 2256 2257 /* Try secondary lookup if unresolved named message */ 2258 err = TIPC_ERR_NO_PORT; 2259 if (tipc_msg_lookup_dest(net, skb, &err)) 2260 goto xmit; 2261 2262 /* Prepare for message rejection */ 2263 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2264 continue; 2265 xmit: 2266 dnode = msg_destnode(buf_msg(skb)); 2267 tipc_node_xmit_skb(net, skb, dnode, dport); 2268 } 2269 } 2270 2271 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2272 { 2273 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2274 struct sock *sk = sock->sk; 2275 int done; 2276 2277 do { 2278 int err = sock_error(sk); 2279 if (err) 2280 return err; 2281 if (!*timeo_p) 2282 return -ETIMEDOUT; 2283 if (signal_pending(current)) 2284 return sock_intr_errno(*timeo_p); 2285 2286 add_wait_queue(sk_sleep(sk), &wait); 2287 done = sk_wait_event(sk, timeo_p, 2288 sk->sk_state != TIPC_CONNECTING, &wait); 2289 remove_wait_queue(sk_sleep(sk), &wait); 2290 } while (!done); 2291 return 0; 2292 } 2293 2294 /** 2295 * tipc_connect - establish a connection to another TIPC port 2296 * @sock: socket structure 2297 * @dest: socket address for destination port 2298 * @destlen: size of socket address data structure 2299 * @flags: file-related flags associated with socket 2300 * 2301 * Returns 0 on success, errno otherwise 2302 */ 2303 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2304 int destlen, int flags) 2305 { 2306 struct sock *sk = sock->sk; 2307 struct tipc_sock *tsk = tipc_sk(sk); 2308 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2309 struct msghdr m = {NULL,}; 2310 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2311 int previous; 2312 int res = 0; 2313 2314 if (destlen != sizeof(struct sockaddr_tipc)) 2315 return -EINVAL; 2316 2317 lock_sock(sk); 2318 2319 if (tsk->group) { 2320 res = -EINVAL; 2321 goto exit; 2322 } 2323 2324 if (dst->family == AF_UNSPEC) { 2325 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2326 if (!tipc_sk_type_connectionless(sk)) 2327 res = -EINVAL; 2328 goto exit; 2329 } else if (dst->family != AF_TIPC) { 2330 res = -EINVAL; 2331 } 2332 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2333 res = -EINVAL; 2334 if (res) 2335 goto exit; 2336 2337 /* DGRAM/RDM connect(), just save the destaddr */ 2338 if (tipc_sk_type_connectionless(sk)) { 2339 memcpy(&tsk->peer, dest, destlen); 2340 goto exit; 2341 } 2342 2343 previous = sk->sk_state; 2344 2345 switch (sk->sk_state) { 2346 case TIPC_OPEN: 2347 /* Send a 'SYN-' to destination */ 2348 m.msg_name = dest; 2349 m.msg_namelen = destlen; 2350 2351 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2352 * indicate send_msg() is never blocked. 2353 */ 2354 if (!timeout) 2355 m.msg_flags = MSG_DONTWAIT; 2356 2357 res = __tipc_sendmsg(sock, &m, 0); 2358 if ((res < 0) && (res != -EWOULDBLOCK)) 2359 goto exit; 2360 2361 /* Just entered TIPC_CONNECTING state; the only 2362 * difference is that return value in non-blocking 2363 * case is EINPROGRESS, rather than EALREADY. 2364 */ 2365 res = -EINPROGRESS; 2366 /* fall thru' */ 2367 case TIPC_CONNECTING: 2368 if (!timeout) { 2369 if (previous == TIPC_CONNECTING) 2370 res = -EALREADY; 2371 goto exit; 2372 } 2373 timeout = msecs_to_jiffies(timeout); 2374 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2375 res = tipc_wait_for_connect(sock, &timeout); 2376 break; 2377 case TIPC_ESTABLISHED: 2378 res = -EISCONN; 2379 break; 2380 default: 2381 res = -EINVAL; 2382 } 2383 2384 exit: 2385 release_sock(sk); 2386 return res; 2387 } 2388 2389 /** 2390 * tipc_listen - allow socket to listen for incoming connections 2391 * @sock: socket structure 2392 * @len: (unused) 2393 * 2394 * Returns 0 on success, errno otherwise 2395 */ 2396 static int tipc_listen(struct socket *sock, int len) 2397 { 2398 struct sock *sk = sock->sk; 2399 int res; 2400 2401 lock_sock(sk); 2402 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2403 release_sock(sk); 2404 2405 return res; 2406 } 2407 2408 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2409 { 2410 struct sock *sk = sock->sk; 2411 DEFINE_WAIT(wait); 2412 int err; 2413 2414 /* True wake-one mechanism for incoming connections: only 2415 * one process gets woken up, not the 'whole herd'. 2416 * Since we do not 'race & poll' for established sockets 2417 * anymore, the common case will execute the loop only once. 2418 */ 2419 for (;;) { 2420 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2421 TASK_INTERRUPTIBLE); 2422 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2423 release_sock(sk); 2424 timeo = schedule_timeout(timeo); 2425 lock_sock(sk); 2426 } 2427 err = 0; 2428 if (!skb_queue_empty(&sk->sk_receive_queue)) 2429 break; 2430 err = -EAGAIN; 2431 if (!timeo) 2432 break; 2433 err = sock_intr_errno(timeo); 2434 if (signal_pending(current)) 2435 break; 2436 } 2437 finish_wait(sk_sleep(sk), &wait); 2438 return err; 2439 } 2440 2441 /** 2442 * tipc_accept - wait for connection request 2443 * @sock: listening socket 2444 * @newsock: new socket that is to be connected 2445 * @flags: file-related flags associated with socket 2446 * 2447 * Returns 0 on success, errno otherwise 2448 */ 2449 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2450 bool kern) 2451 { 2452 struct sock *new_sk, *sk = sock->sk; 2453 struct sk_buff *buf; 2454 struct tipc_sock *new_tsock; 2455 struct tipc_msg *msg; 2456 long timeo; 2457 int res; 2458 2459 lock_sock(sk); 2460 2461 if (sk->sk_state != TIPC_LISTEN) { 2462 res = -EINVAL; 2463 goto exit; 2464 } 2465 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2466 res = tipc_wait_for_accept(sock, timeo); 2467 if (res) 2468 goto exit; 2469 2470 buf = skb_peek(&sk->sk_receive_queue); 2471 2472 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2473 if (res) 2474 goto exit; 2475 security_sk_clone(sock->sk, new_sock->sk); 2476 2477 new_sk = new_sock->sk; 2478 new_tsock = tipc_sk(new_sk); 2479 msg = buf_msg(buf); 2480 2481 /* we lock on new_sk; but lockdep sees the lock on sk */ 2482 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2483 2484 /* 2485 * Reject any stray messages received by new socket 2486 * before the socket lock was taken (very, very unlikely) 2487 */ 2488 tsk_rej_rx_queue(new_sk); 2489 2490 /* Connect new socket to it's peer */ 2491 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2492 2493 tsk_set_importance(new_tsock, msg_importance(msg)); 2494 if (msg_named(msg)) { 2495 new_tsock->conn_type = msg_nametype(msg); 2496 new_tsock->conn_instance = msg_nameinst(msg); 2497 } 2498 2499 /* 2500 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2501 * Respond to 'SYN+' by queuing it on new socket. 2502 */ 2503 if (!msg_data_sz(msg)) { 2504 struct msghdr m = {NULL,}; 2505 2506 tsk_advance_rx_queue(sk); 2507 __tipc_sendstream(new_sock, &m, 0); 2508 } else { 2509 __skb_dequeue(&sk->sk_receive_queue); 2510 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2511 skb_set_owner_r(buf, new_sk); 2512 } 2513 release_sock(new_sk); 2514 exit: 2515 release_sock(sk); 2516 return res; 2517 } 2518 2519 /** 2520 * tipc_shutdown - shutdown socket connection 2521 * @sock: socket structure 2522 * @how: direction to close (must be SHUT_RDWR) 2523 * 2524 * Terminates connection (if necessary), then purges socket's receive queue. 2525 * 2526 * Returns 0 on success, errno otherwise 2527 */ 2528 static int tipc_shutdown(struct socket *sock, int how) 2529 { 2530 struct sock *sk = sock->sk; 2531 int res; 2532 2533 if (how != SHUT_RDWR) 2534 return -EINVAL; 2535 2536 lock_sock(sk); 2537 2538 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2539 sk->sk_shutdown = SEND_SHUTDOWN; 2540 2541 if (sk->sk_state == TIPC_DISCONNECTING) { 2542 /* Discard any unreceived messages */ 2543 __skb_queue_purge(&sk->sk_receive_queue); 2544 2545 /* Wake up anyone sleeping in poll */ 2546 sk->sk_state_change(sk); 2547 res = 0; 2548 } else { 2549 res = -ENOTCONN; 2550 } 2551 2552 release_sock(sk); 2553 return res; 2554 } 2555 2556 static void tipc_sk_check_probing_state(struct sock *sk, 2557 struct sk_buff_head *list) 2558 { 2559 struct tipc_sock *tsk = tipc_sk(sk); 2560 u32 pnode = tsk_peer_node(tsk); 2561 u32 pport = tsk_peer_port(tsk); 2562 u32 self = tsk_own_node(tsk); 2563 u32 oport = tsk->portid; 2564 struct sk_buff *skb; 2565 2566 if (tsk->probe_unacked) { 2567 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2568 sk->sk_err = ECONNABORTED; 2569 tipc_node_remove_conn(sock_net(sk), pnode, pport); 2570 sk->sk_state_change(sk); 2571 return; 2572 } 2573 /* Prepare new probe */ 2574 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2575 pnode, self, pport, oport, TIPC_OK); 2576 if (skb) 2577 __skb_queue_tail(list, skb); 2578 tsk->probe_unacked = true; 2579 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2580 } 2581 2582 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list) 2583 { 2584 struct tipc_sock *tsk = tipc_sk(sk); 2585 2586 /* Try again later if dest link is congested */ 2587 if (tsk->cong_link_cnt) { 2588 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100)); 2589 return; 2590 } 2591 /* Prepare SYN for retransmit */ 2592 tipc_msg_skb_clone(&sk->sk_write_queue, list); 2593 } 2594 2595 static void tipc_sk_timeout(struct timer_list *t) 2596 { 2597 struct sock *sk = from_timer(sk, t, sk_timer); 2598 struct tipc_sock *tsk = tipc_sk(sk); 2599 u32 pnode = tsk_peer_node(tsk); 2600 struct sk_buff_head list; 2601 int rc = 0; 2602 2603 skb_queue_head_init(&list); 2604 bh_lock_sock(sk); 2605 2606 /* Try again later if socket is busy */ 2607 if (sock_owned_by_user(sk)) { 2608 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2609 bh_unlock_sock(sk); 2610 return; 2611 } 2612 2613 if (sk->sk_state == TIPC_ESTABLISHED) 2614 tipc_sk_check_probing_state(sk, &list); 2615 else if (sk->sk_state == TIPC_CONNECTING) 2616 tipc_sk_retry_connect(sk, &list); 2617 2618 bh_unlock_sock(sk); 2619 2620 if (!skb_queue_empty(&list)) 2621 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid); 2622 2623 /* SYN messages may cause link congestion */ 2624 if (rc == -ELINKCONG) { 2625 tipc_dest_push(&tsk->cong_links, pnode, 0); 2626 tsk->cong_link_cnt = 1; 2627 } 2628 sock_put(sk); 2629 } 2630 2631 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2632 struct tipc_name_seq const *seq) 2633 { 2634 struct sock *sk = &tsk->sk; 2635 struct net *net = sock_net(sk); 2636 struct publication *publ; 2637 u32 key; 2638 2639 if (scope != TIPC_NODE_SCOPE) 2640 scope = TIPC_CLUSTER_SCOPE; 2641 2642 if (tipc_sk_connected(sk)) 2643 return -EINVAL; 2644 key = tsk->portid + tsk->pub_count + 1; 2645 if (key == tsk->portid) 2646 return -EADDRINUSE; 2647 2648 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2649 scope, tsk->portid, key); 2650 if (unlikely(!publ)) 2651 return -EINVAL; 2652 2653 list_add(&publ->binding_sock, &tsk->publications); 2654 tsk->pub_count++; 2655 tsk->published = 1; 2656 return 0; 2657 } 2658 2659 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2660 struct tipc_name_seq const *seq) 2661 { 2662 struct net *net = sock_net(&tsk->sk); 2663 struct publication *publ; 2664 struct publication *safe; 2665 int rc = -EINVAL; 2666 2667 if (scope != TIPC_NODE_SCOPE) 2668 scope = TIPC_CLUSTER_SCOPE; 2669 2670 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { 2671 if (seq) { 2672 if (publ->scope != scope) 2673 continue; 2674 if (publ->type != seq->type) 2675 continue; 2676 if (publ->lower != seq->lower) 2677 continue; 2678 if (publ->upper != seq->upper) 2679 break; 2680 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2681 publ->upper, publ->key); 2682 rc = 0; 2683 break; 2684 } 2685 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2686 publ->upper, publ->key); 2687 rc = 0; 2688 } 2689 if (list_empty(&tsk->publications)) 2690 tsk->published = 0; 2691 return rc; 2692 } 2693 2694 /* tipc_sk_reinit: set non-zero address in all existing sockets 2695 * when we go from standalone to network mode. 2696 */ 2697 void tipc_sk_reinit(struct net *net) 2698 { 2699 struct tipc_net *tn = net_generic(net, tipc_net_id); 2700 struct rhashtable_iter iter; 2701 struct tipc_sock *tsk; 2702 struct tipc_msg *msg; 2703 2704 rhashtable_walk_enter(&tn->sk_rht, &iter); 2705 2706 do { 2707 rhashtable_walk_start(&iter); 2708 2709 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2710 spin_lock_bh(&tsk->sk.sk_lock.slock); 2711 msg = &tsk->phdr; 2712 msg_set_prevnode(msg, tipc_own_addr(net)); 2713 msg_set_orignode(msg, tipc_own_addr(net)); 2714 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2715 } 2716 2717 rhashtable_walk_stop(&iter); 2718 } while (tsk == ERR_PTR(-EAGAIN)); 2719 2720 rhashtable_walk_exit(&iter); 2721 } 2722 2723 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2724 { 2725 struct tipc_net *tn = net_generic(net, tipc_net_id); 2726 struct tipc_sock *tsk; 2727 2728 rcu_read_lock(); 2729 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2730 if (tsk) 2731 sock_hold(&tsk->sk); 2732 rcu_read_unlock(); 2733 2734 return tsk; 2735 } 2736 2737 static int tipc_sk_insert(struct tipc_sock *tsk) 2738 { 2739 struct sock *sk = &tsk->sk; 2740 struct net *net = sock_net(sk); 2741 struct tipc_net *tn = net_generic(net, tipc_net_id); 2742 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2743 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2744 2745 while (remaining--) { 2746 portid++; 2747 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2748 portid = TIPC_MIN_PORT; 2749 tsk->portid = portid; 2750 sock_hold(&tsk->sk); 2751 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2752 tsk_rht_params)) 2753 return 0; 2754 sock_put(&tsk->sk); 2755 } 2756 2757 return -1; 2758 } 2759 2760 static void tipc_sk_remove(struct tipc_sock *tsk) 2761 { 2762 struct sock *sk = &tsk->sk; 2763 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2764 2765 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2766 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2767 __sock_put(sk); 2768 } 2769 } 2770 2771 static const struct rhashtable_params tsk_rht_params = { 2772 .nelem_hint = 192, 2773 .head_offset = offsetof(struct tipc_sock, node), 2774 .key_offset = offsetof(struct tipc_sock, portid), 2775 .key_len = sizeof(u32), /* portid */ 2776 .max_size = 1048576, 2777 .min_size = 256, 2778 .automatic_shrinking = true, 2779 }; 2780 2781 int tipc_sk_rht_init(struct net *net) 2782 { 2783 struct tipc_net *tn = net_generic(net, tipc_net_id); 2784 2785 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2786 } 2787 2788 void tipc_sk_rht_destroy(struct net *net) 2789 { 2790 struct tipc_net *tn = net_generic(net, tipc_net_id); 2791 2792 /* Wait for socket readers to complete */ 2793 synchronize_net(); 2794 2795 rhashtable_destroy(&tn->sk_rht); 2796 } 2797 2798 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2799 { 2800 struct net *net = sock_net(&tsk->sk); 2801 struct tipc_group *grp = tsk->group; 2802 struct tipc_msg *hdr = &tsk->phdr; 2803 struct tipc_name_seq seq; 2804 int rc; 2805 2806 if (mreq->type < TIPC_RESERVED_TYPES) 2807 return -EACCES; 2808 if (mreq->scope > TIPC_NODE_SCOPE) 2809 return -EINVAL; 2810 if (grp) 2811 return -EACCES; 2812 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2813 if (!grp) 2814 return -ENOMEM; 2815 tsk->group = grp; 2816 msg_set_lookup_scope(hdr, mreq->scope); 2817 msg_set_nametype(hdr, mreq->type); 2818 msg_set_dest_droppable(hdr, true); 2819 seq.type = mreq->type; 2820 seq.lower = mreq->instance; 2821 seq.upper = seq.lower; 2822 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2823 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2824 if (rc) { 2825 tipc_group_delete(net, grp); 2826 tsk->group = NULL; 2827 return rc; 2828 } 2829 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2830 tsk->mc_method.rcast = true; 2831 tsk->mc_method.mandatory = true; 2832 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2833 return rc; 2834 } 2835 2836 static int tipc_sk_leave(struct tipc_sock *tsk) 2837 { 2838 struct net *net = sock_net(&tsk->sk); 2839 struct tipc_group *grp = tsk->group; 2840 struct tipc_name_seq seq; 2841 int scope; 2842 2843 if (!grp) 2844 return -EINVAL; 2845 tipc_group_self(grp, &seq, &scope); 2846 tipc_group_delete(net, grp); 2847 tsk->group = NULL; 2848 tipc_sk_withdraw(tsk, scope, &seq); 2849 return 0; 2850 } 2851 2852 /** 2853 * tipc_setsockopt - set socket option 2854 * @sock: socket structure 2855 * @lvl: option level 2856 * @opt: option identifier 2857 * @ov: pointer to new option value 2858 * @ol: length of option value 2859 * 2860 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2861 * (to ease compatibility). 2862 * 2863 * Returns 0 on success, errno otherwise 2864 */ 2865 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2866 char __user *ov, unsigned int ol) 2867 { 2868 struct sock *sk = sock->sk; 2869 struct tipc_sock *tsk = tipc_sk(sk); 2870 struct tipc_group_req mreq; 2871 u32 value = 0; 2872 int res = 0; 2873 2874 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2875 return 0; 2876 if (lvl != SOL_TIPC) 2877 return -ENOPROTOOPT; 2878 2879 switch (opt) { 2880 case TIPC_IMPORTANCE: 2881 case TIPC_SRC_DROPPABLE: 2882 case TIPC_DEST_DROPPABLE: 2883 case TIPC_CONN_TIMEOUT: 2884 if (ol < sizeof(value)) 2885 return -EINVAL; 2886 if (get_user(value, (u32 __user *)ov)) 2887 return -EFAULT; 2888 break; 2889 case TIPC_GROUP_JOIN: 2890 if (ol < sizeof(mreq)) 2891 return -EINVAL; 2892 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2893 return -EFAULT; 2894 break; 2895 default: 2896 if (ov || ol) 2897 return -EINVAL; 2898 } 2899 2900 lock_sock(sk); 2901 2902 switch (opt) { 2903 case TIPC_IMPORTANCE: 2904 res = tsk_set_importance(tsk, value); 2905 break; 2906 case TIPC_SRC_DROPPABLE: 2907 if (sock->type != SOCK_STREAM) 2908 tsk_set_unreliable(tsk, value); 2909 else 2910 res = -ENOPROTOOPT; 2911 break; 2912 case TIPC_DEST_DROPPABLE: 2913 tsk_set_unreturnable(tsk, value); 2914 break; 2915 case TIPC_CONN_TIMEOUT: 2916 tipc_sk(sk)->conn_timeout = value; 2917 break; 2918 case TIPC_MCAST_BROADCAST: 2919 tsk->mc_method.rcast = false; 2920 tsk->mc_method.mandatory = true; 2921 break; 2922 case TIPC_MCAST_REPLICAST: 2923 tsk->mc_method.rcast = true; 2924 tsk->mc_method.mandatory = true; 2925 break; 2926 case TIPC_GROUP_JOIN: 2927 res = tipc_sk_join(tsk, &mreq); 2928 break; 2929 case TIPC_GROUP_LEAVE: 2930 res = tipc_sk_leave(tsk); 2931 break; 2932 default: 2933 res = -EINVAL; 2934 } 2935 2936 release_sock(sk); 2937 2938 return res; 2939 } 2940 2941 /** 2942 * tipc_getsockopt - get socket option 2943 * @sock: socket structure 2944 * @lvl: option level 2945 * @opt: option identifier 2946 * @ov: receptacle for option value 2947 * @ol: receptacle for length of option value 2948 * 2949 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 2950 * (to ease compatibility). 2951 * 2952 * Returns 0 on success, errno otherwise 2953 */ 2954 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 2955 char __user *ov, int __user *ol) 2956 { 2957 struct sock *sk = sock->sk; 2958 struct tipc_sock *tsk = tipc_sk(sk); 2959 struct tipc_name_seq seq; 2960 int len, scope; 2961 u32 value; 2962 int res; 2963 2964 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2965 return put_user(0, ol); 2966 if (lvl != SOL_TIPC) 2967 return -ENOPROTOOPT; 2968 res = get_user(len, ol); 2969 if (res) 2970 return res; 2971 2972 lock_sock(sk); 2973 2974 switch (opt) { 2975 case TIPC_IMPORTANCE: 2976 value = tsk_importance(tsk); 2977 break; 2978 case TIPC_SRC_DROPPABLE: 2979 value = tsk_unreliable(tsk); 2980 break; 2981 case TIPC_DEST_DROPPABLE: 2982 value = tsk_unreturnable(tsk); 2983 break; 2984 case TIPC_CONN_TIMEOUT: 2985 value = tsk->conn_timeout; 2986 /* no need to set "res", since already 0 at this point */ 2987 break; 2988 case TIPC_NODE_RECVQ_DEPTH: 2989 value = 0; /* was tipc_queue_size, now obsolete */ 2990 break; 2991 case TIPC_SOCK_RECVQ_DEPTH: 2992 value = skb_queue_len(&sk->sk_receive_queue); 2993 break; 2994 case TIPC_GROUP_JOIN: 2995 seq.type = 0; 2996 if (tsk->group) 2997 tipc_group_self(tsk->group, &seq, &scope); 2998 value = seq.type; 2999 break; 3000 default: 3001 res = -EINVAL; 3002 } 3003 3004 release_sock(sk); 3005 3006 if (res) 3007 return res; /* "get" failed */ 3008 3009 if (len < sizeof(value)) 3010 return -EINVAL; 3011 3012 if (copy_to_user(ov, &value, sizeof(value))) 3013 return -EFAULT; 3014 3015 return put_user(sizeof(value), ol); 3016 } 3017 3018 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3019 { 3020 struct net *net = sock_net(sock->sk); 3021 struct tipc_sioc_nodeid_req nr = {0}; 3022 struct tipc_sioc_ln_req lnr; 3023 void __user *argp = (void __user *)arg; 3024 3025 switch (cmd) { 3026 case SIOCGETLINKNAME: 3027 if (copy_from_user(&lnr, argp, sizeof(lnr))) 3028 return -EFAULT; 3029 if (!tipc_node_get_linkname(net, 3030 lnr.bearer_id & 0xffff, lnr.peer, 3031 lnr.linkname, TIPC_MAX_LINK_NAME)) { 3032 if (copy_to_user(argp, &lnr, sizeof(lnr))) 3033 return -EFAULT; 3034 return 0; 3035 } 3036 return -EADDRNOTAVAIL; 3037 case SIOCGETNODEID: 3038 if (copy_from_user(&nr, argp, sizeof(nr))) 3039 return -EFAULT; 3040 if (!tipc_node_get_id(net, nr.peer, nr.node_id)) 3041 return -EADDRNOTAVAIL; 3042 if (copy_to_user(argp, &nr, sizeof(nr))) 3043 return -EFAULT; 3044 return 0; 3045 default: 3046 return -ENOIOCTLCMD; 3047 } 3048 } 3049 3050 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 3051 { 3052 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 3053 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 3054 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 3055 3056 tsk1->peer.family = AF_TIPC; 3057 tsk1->peer.addrtype = TIPC_ADDR_ID; 3058 tsk1->peer.scope = TIPC_NODE_SCOPE; 3059 tsk1->peer.addr.id.ref = tsk2->portid; 3060 tsk1->peer.addr.id.node = onode; 3061 tsk2->peer.family = AF_TIPC; 3062 tsk2->peer.addrtype = TIPC_ADDR_ID; 3063 tsk2->peer.scope = TIPC_NODE_SCOPE; 3064 tsk2->peer.addr.id.ref = tsk1->portid; 3065 tsk2->peer.addr.id.node = onode; 3066 3067 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3068 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3069 return 0; 3070 } 3071 3072 /* Protocol switches for the various types of TIPC sockets */ 3073 3074 static const struct proto_ops msg_ops = { 3075 .owner = THIS_MODULE, 3076 .family = AF_TIPC, 3077 .release = tipc_release, 3078 .bind = tipc_bind, 3079 .connect = tipc_connect, 3080 .socketpair = tipc_socketpair, 3081 .accept = sock_no_accept, 3082 .getname = tipc_getname, 3083 .poll = tipc_poll, 3084 .ioctl = tipc_ioctl, 3085 .listen = sock_no_listen, 3086 .shutdown = tipc_shutdown, 3087 .setsockopt = tipc_setsockopt, 3088 .getsockopt = tipc_getsockopt, 3089 .sendmsg = tipc_sendmsg, 3090 .recvmsg = tipc_recvmsg, 3091 .mmap = sock_no_mmap, 3092 .sendpage = sock_no_sendpage 3093 }; 3094 3095 static const struct proto_ops packet_ops = { 3096 .owner = THIS_MODULE, 3097 .family = AF_TIPC, 3098 .release = tipc_release, 3099 .bind = tipc_bind, 3100 .connect = tipc_connect, 3101 .socketpair = tipc_socketpair, 3102 .accept = tipc_accept, 3103 .getname = tipc_getname, 3104 .poll = tipc_poll, 3105 .ioctl = tipc_ioctl, 3106 .listen = tipc_listen, 3107 .shutdown = tipc_shutdown, 3108 .setsockopt = tipc_setsockopt, 3109 .getsockopt = tipc_getsockopt, 3110 .sendmsg = tipc_send_packet, 3111 .recvmsg = tipc_recvmsg, 3112 .mmap = sock_no_mmap, 3113 .sendpage = sock_no_sendpage 3114 }; 3115 3116 static const struct proto_ops stream_ops = { 3117 .owner = THIS_MODULE, 3118 .family = AF_TIPC, 3119 .release = tipc_release, 3120 .bind = tipc_bind, 3121 .connect = tipc_connect, 3122 .socketpair = tipc_socketpair, 3123 .accept = tipc_accept, 3124 .getname = tipc_getname, 3125 .poll = tipc_poll, 3126 .ioctl = tipc_ioctl, 3127 .listen = tipc_listen, 3128 .shutdown = tipc_shutdown, 3129 .setsockopt = tipc_setsockopt, 3130 .getsockopt = tipc_getsockopt, 3131 .sendmsg = tipc_sendstream, 3132 .recvmsg = tipc_recvstream, 3133 .mmap = sock_no_mmap, 3134 .sendpage = sock_no_sendpage 3135 }; 3136 3137 static const struct net_proto_family tipc_family_ops = { 3138 .owner = THIS_MODULE, 3139 .family = AF_TIPC, 3140 .create = tipc_sk_create 3141 }; 3142 3143 static struct proto tipc_proto = { 3144 .name = "TIPC", 3145 .owner = THIS_MODULE, 3146 .obj_size = sizeof(struct tipc_sock), 3147 .sysctl_rmem = sysctl_tipc_rmem 3148 }; 3149 3150 /** 3151 * tipc_socket_init - initialize TIPC socket interface 3152 * 3153 * Returns 0 on success, errno otherwise 3154 */ 3155 int tipc_socket_init(void) 3156 { 3157 int res; 3158 3159 res = proto_register(&tipc_proto, 1); 3160 if (res) { 3161 pr_err("Failed to register TIPC protocol type\n"); 3162 goto out; 3163 } 3164 3165 res = sock_register(&tipc_family_ops); 3166 if (res) { 3167 pr_err("Failed to register TIPC socket type\n"); 3168 proto_unregister(&tipc_proto); 3169 goto out; 3170 } 3171 out: 3172 return res; 3173 } 3174 3175 /** 3176 * tipc_socket_stop - stop TIPC socket interface 3177 */ 3178 void tipc_socket_stop(void) 3179 { 3180 sock_unregister(tipc_family_ops.family); 3181 proto_unregister(&tipc_proto); 3182 } 3183 3184 /* Caller should hold socket lock for the passed tipc socket. */ 3185 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3186 { 3187 u32 peer_node; 3188 u32 peer_port; 3189 struct nlattr *nest; 3190 3191 peer_node = tsk_peer_node(tsk); 3192 peer_port = tsk_peer_port(tsk); 3193 3194 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3195 3196 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3197 goto msg_full; 3198 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3199 goto msg_full; 3200 3201 if (tsk->conn_type != 0) { 3202 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3203 goto msg_full; 3204 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3205 goto msg_full; 3206 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3207 goto msg_full; 3208 } 3209 nla_nest_end(skb, nest); 3210 3211 return 0; 3212 3213 msg_full: 3214 nla_nest_cancel(skb, nest); 3215 3216 return -EMSGSIZE; 3217 } 3218 3219 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock 3220 *tsk) 3221 { 3222 struct net *net = sock_net(skb->sk); 3223 struct sock *sk = &tsk->sk; 3224 3225 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || 3226 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) 3227 return -EMSGSIZE; 3228 3229 if (tipc_sk_connected(sk)) { 3230 if (__tipc_nl_add_sk_con(skb, tsk)) 3231 return -EMSGSIZE; 3232 } else if (!list_empty(&tsk->publications)) { 3233 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3234 return -EMSGSIZE; 3235 } 3236 return 0; 3237 } 3238 3239 /* Caller should hold socket lock for the passed tipc socket. */ 3240 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3241 struct tipc_sock *tsk) 3242 { 3243 struct nlattr *attrs; 3244 void *hdr; 3245 3246 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3247 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3248 if (!hdr) 3249 goto msg_cancel; 3250 3251 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3252 if (!attrs) 3253 goto genlmsg_cancel; 3254 3255 if (__tipc_nl_add_sk_info(skb, tsk)) 3256 goto attr_msg_cancel; 3257 3258 nla_nest_end(skb, attrs); 3259 genlmsg_end(skb, hdr); 3260 3261 return 0; 3262 3263 attr_msg_cancel: 3264 nla_nest_cancel(skb, attrs); 3265 genlmsg_cancel: 3266 genlmsg_cancel(skb, hdr); 3267 msg_cancel: 3268 return -EMSGSIZE; 3269 } 3270 3271 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 3272 int (*skb_handler)(struct sk_buff *skb, 3273 struct netlink_callback *cb, 3274 struct tipc_sock *tsk)) 3275 { 3276 struct rhashtable_iter *iter = (void *)cb->args[4]; 3277 struct tipc_sock *tsk; 3278 int err; 3279 3280 rhashtable_walk_start(iter); 3281 while ((tsk = rhashtable_walk_next(iter)) != NULL) { 3282 if (IS_ERR(tsk)) { 3283 err = PTR_ERR(tsk); 3284 if (err == -EAGAIN) { 3285 err = 0; 3286 continue; 3287 } 3288 break; 3289 } 3290 3291 sock_hold(&tsk->sk); 3292 rhashtable_walk_stop(iter); 3293 lock_sock(&tsk->sk); 3294 err = skb_handler(skb, cb, tsk); 3295 if (err) { 3296 release_sock(&tsk->sk); 3297 sock_put(&tsk->sk); 3298 goto out; 3299 } 3300 release_sock(&tsk->sk); 3301 rhashtable_walk_start(iter); 3302 sock_put(&tsk->sk); 3303 } 3304 rhashtable_walk_stop(iter); 3305 out: 3306 return skb->len; 3307 } 3308 EXPORT_SYMBOL(tipc_nl_sk_walk); 3309 3310 int tipc_dump_start(struct netlink_callback *cb) 3311 { 3312 return __tipc_dump_start(cb, sock_net(cb->skb->sk)); 3313 } 3314 EXPORT_SYMBOL(tipc_dump_start); 3315 3316 int __tipc_dump_start(struct netlink_callback *cb, struct net *net) 3317 { 3318 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */ 3319 struct rhashtable_iter *iter = (void *)cb->args[4]; 3320 struct tipc_net *tn = tipc_net(net); 3321 3322 if (!iter) { 3323 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 3324 if (!iter) 3325 return -ENOMEM; 3326 3327 cb->args[4] = (long)iter; 3328 } 3329 3330 rhashtable_walk_enter(&tn->sk_rht, iter); 3331 return 0; 3332 } 3333 3334 int tipc_dump_done(struct netlink_callback *cb) 3335 { 3336 struct rhashtable_iter *hti = (void *)cb->args[4]; 3337 3338 rhashtable_walk_exit(hti); 3339 kfree(hti); 3340 return 0; 3341 } 3342 EXPORT_SYMBOL(tipc_dump_done); 3343 3344 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3345 struct tipc_sock *tsk, u32 sk_filter_state, 3346 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3347 { 3348 struct sock *sk = &tsk->sk; 3349 struct nlattr *attrs; 3350 struct nlattr *stat; 3351 3352 /*filter response w.r.t sk_state*/ 3353 if (!(sk_filter_state & (1 << sk->sk_state))) 3354 return 0; 3355 3356 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3357 if (!attrs) 3358 goto msg_cancel; 3359 3360 if (__tipc_nl_add_sk_info(skb, tsk)) 3361 goto attr_msg_cancel; 3362 3363 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || 3364 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3365 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3366 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3367 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3368 sock_i_uid(sk))) || 3369 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3370 tipc_diag_gen_cookie(sk), 3371 TIPC_NLA_SOCK_PAD)) 3372 goto attr_msg_cancel; 3373 3374 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); 3375 if (!stat) 3376 goto attr_msg_cancel; 3377 3378 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3379 skb_queue_len(&sk->sk_receive_queue)) || 3380 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3381 skb_queue_len(&sk->sk_write_queue)) || 3382 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3383 atomic_read(&sk->sk_drops))) 3384 goto stat_msg_cancel; 3385 3386 if (tsk->cong_link_cnt && 3387 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) 3388 goto stat_msg_cancel; 3389 3390 if (tsk_conn_cong(tsk) && 3391 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) 3392 goto stat_msg_cancel; 3393 3394 nla_nest_end(skb, stat); 3395 3396 if (tsk->group) 3397 if (tipc_group_fill_sock_diag(tsk->group, skb)) 3398 goto stat_msg_cancel; 3399 3400 nla_nest_end(skb, attrs); 3401 3402 return 0; 3403 3404 stat_msg_cancel: 3405 nla_nest_cancel(skb, stat); 3406 attr_msg_cancel: 3407 nla_nest_cancel(skb, attrs); 3408 msg_cancel: 3409 return -EMSGSIZE; 3410 } 3411 EXPORT_SYMBOL(tipc_sk_fill_sock_diag); 3412 3413 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3414 { 3415 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); 3416 } 3417 3418 /* Caller should hold socket lock for the passed tipc socket. */ 3419 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3420 struct netlink_callback *cb, 3421 struct publication *publ) 3422 { 3423 void *hdr; 3424 struct nlattr *attrs; 3425 3426 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3427 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3428 if (!hdr) 3429 goto msg_cancel; 3430 3431 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 3432 if (!attrs) 3433 goto genlmsg_cancel; 3434 3435 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3436 goto attr_msg_cancel; 3437 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3438 goto attr_msg_cancel; 3439 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3440 goto attr_msg_cancel; 3441 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3442 goto attr_msg_cancel; 3443 3444 nla_nest_end(skb, attrs); 3445 genlmsg_end(skb, hdr); 3446 3447 return 0; 3448 3449 attr_msg_cancel: 3450 nla_nest_cancel(skb, attrs); 3451 genlmsg_cancel: 3452 genlmsg_cancel(skb, hdr); 3453 msg_cancel: 3454 return -EMSGSIZE; 3455 } 3456 3457 /* Caller should hold socket lock for the passed tipc socket. */ 3458 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3459 struct netlink_callback *cb, 3460 struct tipc_sock *tsk, u32 *last_publ) 3461 { 3462 int err; 3463 struct publication *p; 3464 3465 if (*last_publ) { 3466 list_for_each_entry(p, &tsk->publications, binding_sock) { 3467 if (p->key == *last_publ) 3468 break; 3469 } 3470 if (p->key != *last_publ) { 3471 /* We never set seq or call nl_dump_check_consistent() 3472 * this means that setting prev_seq here will cause the 3473 * consistence check to fail in the netlink callback 3474 * handler. Resulting in the last NLMSG_DONE message 3475 * having the NLM_F_DUMP_INTR flag set. 3476 */ 3477 cb->prev_seq = 1; 3478 *last_publ = 0; 3479 return -EPIPE; 3480 } 3481 } else { 3482 p = list_first_entry(&tsk->publications, struct publication, 3483 binding_sock); 3484 } 3485 3486 list_for_each_entry_from(p, &tsk->publications, binding_sock) { 3487 err = __tipc_nl_add_sk_publ(skb, cb, p); 3488 if (err) { 3489 *last_publ = p->key; 3490 return err; 3491 } 3492 } 3493 *last_publ = 0; 3494 3495 return 0; 3496 } 3497 3498 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3499 { 3500 int err; 3501 u32 tsk_portid = cb->args[0]; 3502 u32 last_publ = cb->args[1]; 3503 u32 done = cb->args[2]; 3504 struct net *net = sock_net(skb->sk); 3505 struct tipc_sock *tsk; 3506 3507 if (!tsk_portid) { 3508 struct nlattr **attrs; 3509 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3510 3511 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3512 if (err) 3513 return err; 3514 3515 if (!attrs[TIPC_NLA_SOCK]) 3516 return -EINVAL; 3517 3518 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 3519 attrs[TIPC_NLA_SOCK], 3520 tipc_nl_sock_policy, NULL); 3521 if (err) 3522 return err; 3523 3524 if (!sock[TIPC_NLA_SOCK_REF]) 3525 return -EINVAL; 3526 3527 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3528 } 3529 3530 if (done) 3531 return 0; 3532 3533 tsk = tipc_sk_lookup(net, tsk_portid); 3534 if (!tsk) 3535 return -EINVAL; 3536 3537 lock_sock(&tsk->sk); 3538 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3539 if (!err) 3540 done = 1; 3541 release_sock(&tsk->sk); 3542 sock_put(&tsk->sk); 3543 3544 cb->args[0] = tsk_portid; 3545 cb->args[1] = last_publ; 3546 cb->args[2] = done; 3547 3548 return skb->len; 3549 } 3550