1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 52 #define TIPC_FWD_MSG 1 53 #define TIPC_MAX_PORT 0xffffffff 54 #define TIPC_MIN_PORT 1 55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 56 57 enum { 58 TIPC_LISTEN = TCP_LISTEN, 59 TIPC_ESTABLISHED = TCP_ESTABLISHED, 60 TIPC_OPEN = TCP_CLOSE, 61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 62 TIPC_CONNECTING = TCP_SYN_SENT, 63 }; 64 65 struct sockaddr_pair { 66 struct sockaddr_tipc sock; 67 struct sockaddr_tipc member; 68 }; 69 70 /** 71 * struct tipc_sock - TIPC socket structure 72 * @sk: socket - interacts with 'port' and with user via the socket API 73 * @conn_type: TIPC type used when connection was established 74 * @conn_instance: TIPC instance used when connection was established 75 * @published: non-zero if port has one or more associated names 76 * @max_pkt: maximum packet size "hint" used when building messages sent by port 77 * @portid: unique port identity in TIPC socket hash table 78 * @phdr: preformatted message header used when sending messages 79 * #cong_links: list of congested links 80 * @publications: list of publications for port 81 * @blocking_link: address of the congested link we are currently sleeping on 82 * @pub_count: total # of publications port has made during its lifetime 83 * @probing_state: 84 * @conn_timeout: the time we can wait for an unresponded setup request 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 86 * @cong_link_cnt: number of congested links 87 * @snt_unacked: # messages sent by socket, and not yet acked by peer 88 * @rcv_unacked: # messages read by user, but not yet acked back to peer 89 * @peer: 'connected' peer for dgram/rdm 90 * @node: hash table node 91 * @mc_method: cookie for use between socket and broadcast layer 92 * @rcu: rcu struct for tipc_sock 93 */ 94 struct tipc_sock { 95 struct sock sk; 96 u32 conn_type; 97 u32 conn_instance; 98 int published; 99 u32 max_pkt; 100 u32 portid; 101 struct tipc_msg phdr; 102 struct list_head cong_links; 103 struct list_head publications; 104 u32 pub_count; 105 uint conn_timeout; 106 atomic_t dupl_rcvcnt; 107 bool probe_unacked; 108 u16 cong_link_cnt; 109 u16 snt_unacked; 110 u16 snd_win; 111 u16 peer_caps; 112 u16 rcv_unacked; 113 u16 rcv_win; 114 struct sockaddr_tipc peer; 115 struct rhash_head node; 116 struct tipc_mc_method mc_method; 117 struct rcu_head rcu; 118 struct tipc_group *group; 119 bool group_is_open; 120 }; 121 122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 123 static void tipc_data_ready(struct sock *sk); 124 static void tipc_write_space(struct sock *sk); 125 static void tipc_sock_destruct(struct sock *sk); 126 static int tipc_release(struct socket *sock); 127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 128 bool kern); 129 static void tipc_sk_timeout(struct timer_list *t); 130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 131 struct tipc_name_seq const *seq); 132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 133 struct tipc_name_seq const *seq); 134 static int tipc_sk_leave(struct tipc_sock *tsk); 135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 136 static int tipc_sk_insert(struct tipc_sock *tsk); 137 static void tipc_sk_remove(struct tipc_sock *tsk); 138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 140 141 static const struct proto_ops packet_ops; 142 static const struct proto_ops stream_ops; 143 static const struct proto_ops msg_ops; 144 static struct proto tipc_proto; 145 static const struct rhashtable_params tsk_rht_params; 146 147 static u32 tsk_own_node(struct tipc_sock *tsk) 148 { 149 return msg_prevnode(&tsk->phdr); 150 } 151 152 static u32 tsk_peer_node(struct tipc_sock *tsk) 153 { 154 return msg_destnode(&tsk->phdr); 155 } 156 157 static u32 tsk_peer_port(struct tipc_sock *tsk) 158 { 159 return msg_destport(&tsk->phdr); 160 } 161 162 static bool tsk_unreliable(struct tipc_sock *tsk) 163 { 164 return msg_src_droppable(&tsk->phdr) != 0; 165 } 166 167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 168 { 169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 170 } 171 172 static bool tsk_unreturnable(struct tipc_sock *tsk) 173 { 174 return msg_dest_droppable(&tsk->phdr) != 0; 175 } 176 177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 178 { 179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 180 } 181 182 static int tsk_importance(struct tipc_sock *tsk) 183 { 184 return msg_importance(&tsk->phdr); 185 } 186 187 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 188 { 189 if (imp > TIPC_CRITICAL_IMPORTANCE) 190 return -EINVAL; 191 msg_set_importance(&tsk->phdr, (u32)imp); 192 return 0; 193 } 194 195 static struct tipc_sock *tipc_sk(const struct sock *sk) 196 { 197 return container_of(sk, struct tipc_sock, sk); 198 } 199 200 static bool tsk_conn_cong(struct tipc_sock *tsk) 201 { 202 return tsk->snt_unacked > tsk->snd_win; 203 } 204 205 static u16 tsk_blocks(int len) 206 { 207 return ((len / FLOWCTL_BLK_SZ) + 1); 208 } 209 210 /* tsk_blocks(): translate a buffer size in bytes to number of 211 * advertisable blocks, taking into account the ratio truesize(len)/len 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 213 */ 214 static u16 tsk_adv_blocks(int len) 215 { 216 return len / FLOWCTL_BLK_SZ / 4; 217 } 218 219 /* tsk_inc(): increment counter for sent or received data 220 * - If block based flow control is not supported by peer we 221 * fall back to message based ditto, incrementing the counter 222 */ 223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 224 { 225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 226 return ((msglen / FLOWCTL_BLK_SZ) + 1); 227 return 1; 228 } 229 230 /** 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue 232 * 233 * Caller must hold socket lock 234 */ 235 static void tsk_advance_rx_queue(struct sock *sk) 236 { 237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 238 } 239 240 /* tipc_sk_respond() : send response message back to sender 241 */ 242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 243 { 244 u32 selector; 245 u32 dnode; 246 u32 onode = tipc_own_addr(sock_net(sk)); 247 248 if (!tipc_msg_reverse(onode, &skb, err)) 249 return; 250 251 dnode = msg_destnode(buf_msg(skb)); 252 selector = msg_origport(buf_msg(skb)); 253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 254 } 255 256 /** 257 * tsk_rej_rx_queue - reject all buffers in socket receive queue 258 * 259 * Caller must hold socket lock 260 */ 261 static void tsk_rej_rx_queue(struct sock *sk) 262 { 263 struct sk_buff *skb; 264 265 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 267 } 268 269 static bool tipc_sk_connected(struct sock *sk) 270 { 271 return sk->sk_state == TIPC_ESTABLISHED; 272 } 273 274 /* tipc_sk_type_connectionless - check if the socket is datagram socket 275 * @sk: socket 276 * 277 * Returns true if connection less, false otherwise 278 */ 279 static bool tipc_sk_type_connectionless(struct sock *sk) 280 { 281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 282 } 283 284 /* tsk_peer_msg - verify if message was sent by connected port's peer 285 * 286 * Handles cases where the node's network address has changed from 287 * the default of <0.0.0> to its configured setting. 288 */ 289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 290 { 291 struct sock *sk = &tsk->sk; 292 u32 self = tipc_own_addr(sock_net(sk)); 293 u32 peer_port = tsk_peer_port(tsk); 294 u32 orig_node, peer_node; 295 296 if (unlikely(!tipc_sk_connected(sk))) 297 return false; 298 299 if (unlikely(msg_origport(msg) != peer_port)) 300 return false; 301 302 orig_node = msg_orignode(msg); 303 peer_node = tsk_peer_node(tsk); 304 305 if (likely(orig_node == peer_node)) 306 return true; 307 308 if (!orig_node && peer_node == self) 309 return true; 310 311 if (!peer_node && orig_node == self) 312 return true; 313 314 return false; 315 } 316 317 /* tipc_set_sk_state - set the sk_state of the socket 318 * @sk: socket 319 * 320 * Caller must hold socket lock 321 * 322 * Returns 0 on success, errno otherwise 323 */ 324 static int tipc_set_sk_state(struct sock *sk, int state) 325 { 326 int oldsk_state = sk->sk_state; 327 int res = -EINVAL; 328 329 switch (state) { 330 case TIPC_OPEN: 331 res = 0; 332 break; 333 case TIPC_LISTEN: 334 case TIPC_CONNECTING: 335 if (oldsk_state == TIPC_OPEN) 336 res = 0; 337 break; 338 case TIPC_ESTABLISHED: 339 if (oldsk_state == TIPC_CONNECTING || 340 oldsk_state == TIPC_OPEN) 341 res = 0; 342 break; 343 case TIPC_DISCONNECTING: 344 if (oldsk_state == TIPC_CONNECTING || 345 oldsk_state == TIPC_ESTABLISHED) 346 res = 0; 347 break; 348 } 349 350 if (!res) 351 sk->sk_state = state; 352 353 return res; 354 } 355 356 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 357 { 358 struct sock *sk = sock->sk; 359 int err = sock_error(sk); 360 int typ = sock->type; 361 362 if (err) 363 return err; 364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 365 if (sk->sk_state == TIPC_DISCONNECTING) 366 return -EPIPE; 367 else if (!tipc_sk_connected(sk)) 368 return -ENOTCONN; 369 } 370 if (!*timeout) 371 return -EAGAIN; 372 if (signal_pending(current)) 373 return sock_intr_errno(*timeout); 374 375 return 0; 376 } 377 378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 379 ({ \ 380 struct sock *sk_; \ 381 int rc_; \ 382 \ 383 while ((rc_ = !(condition_))) { \ 384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 385 sk_ = (sock_)->sk; \ 386 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 387 if (rc_) \ 388 break; \ 389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 390 release_sock(sk_); \ 391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 392 sched_annotate_sleep(); \ 393 lock_sock(sk_); \ 394 remove_wait_queue(sk_sleep(sk_), &wait_); \ 395 } \ 396 rc_; \ 397 }) 398 399 /** 400 * tipc_sk_create - create a TIPC socket 401 * @net: network namespace (must be default network) 402 * @sock: pre-allocated socket structure 403 * @protocol: protocol indicator (must be 0) 404 * @kern: caused by kernel or by userspace? 405 * 406 * This routine creates additional data structures used by the TIPC socket, 407 * initializes them, and links them together. 408 * 409 * Returns 0 on success, errno otherwise 410 */ 411 static int tipc_sk_create(struct net *net, struct socket *sock, 412 int protocol, int kern) 413 { 414 struct tipc_net *tn; 415 const struct proto_ops *ops; 416 struct sock *sk; 417 struct tipc_sock *tsk; 418 struct tipc_msg *msg; 419 420 /* Validate arguments */ 421 if (unlikely(protocol != 0)) 422 return -EPROTONOSUPPORT; 423 424 switch (sock->type) { 425 case SOCK_STREAM: 426 ops = &stream_ops; 427 break; 428 case SOCK_SEQPACKET: 429 ops = &packet_ops; 430 break; 431 case SOCK_DGRAM: 432 case SOCK_RDM: 433 ops = &msg_ops; 434 break; 435 default: 436 return -EPROTOTYPE; 437 } 438 439 /* Allocate socket's protocol area */ 440 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 441 if (sk == NULL) 442 return -ENOMEM; 443 444 tsk = tipc_sk(sk); 445 tsk->max_pkt = MAX_PKT_DEFAULT; 446 INIT_LIST_HEAD(&tsk->publications); 447 INIT_LIST_HEAD(&tsk->cong_links); 448 msg = &tsk->phdr; 449 tn = net_generic(sock_net(sk), tipc_net_id); 450 451 /* Finish initializing socket data structures */ 452 sock->ops = ops; 453 sock_init_data(sock, sk); 454 tipc_set_sk_state(sk, TIPC_OPEN); 455 if (tipc_sk_insert(tsk)) { 456 pr_warn("Socket create failed; port number exhausted\n"); 457 return -EINVAL; 458 } 459 460 /* Ensure tsk is visible before we read own_addr. */ 461 smp_mb(); 462 463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, 464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0); 465 466 msg_set_origport(msg, tsk->portid); 467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 468 sk->sk_shutdown = 0; 469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 470 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 471 sk->sk_data_ready = tipc_data_ready; 472 sk->sk_write_space = tipc_write_space; 473 sk->sk_destruct = tipc_sock_destruct; 474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 475 tsk->group_is_open = true; 476 atomic_set(&tsk->dupl_rcvcnt, 0); 477 478 /* Start out with safe limits until we receive an advertised window */ 479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 480 tsk->rcv_win = tsk->snd_win; 481 482 if (tipc_sk_type_connectionless(sk)) { 483 tsk_set_unreturnable(tsk, true); 484 if (sock->type == SOCK_DGRAM) 485 tsk_set_unreliable(tsk, true); 486 } 487 488 return 0; 489 } 490 491 static void tipc_sk_callback(struct rcu_head *head) 492 { 493 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 494 495 sock_put(&tsk->sk); 496 } 497 498 /* Caller should hold socket lock for the socket. */ 499 static void __tipc_shutdown(struct socket *sock, int error) 500 { 501 struct sock *sk = sock->sk; 502 struct tipc_sock *tsk = tipc_sk(sk); 503 struct net *net = sock_net(sk); 504 long timeout = CONN_TIMEOUT_DEFAULT; 505 u32 dnode = tsk_peer_node(tsk); 506 struct sk_buff *skb; 507 508 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 509 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 510 !tsk_conn_cong(tsk))); 511 512 /* Reject all unreceived messages, except on an active connection 513 * (which disconnects locally & sends a 'FIN+' to peer). 514 */ 515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 516 if (TIPC_SKB_CB(skb)->bytes_read) { 517 kfree_skb(skb); 518 continue; 519 } 520 if (!tipc_sk_type_connectionless(sk) && 521 sk->sk_state != TIPC_DISCONNECTING) { 522 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 523 tipc_node_remove_conn(net, dnode, tsk->portid); 524 } 525 tipc_sk_respond(sk, skb, error); 526 } 527 528 if (tipc_sk_type_connectionless(sk)) 529 return; 530 531 if (sk->sk_state != TIPC_DISCONNECTING) { 532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 534 tsk_own_node(tsk), tsk_peer_port(tsk), 535 tsk->portid, error); 536 if (skb) 537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 538 tipc_node_remove_conn(net, dnode, tsk->portid); 539 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 540 } 541 } 542 543 /** 544 * tipc_release - destroy a TIPC socket 545 * @sock: socket to destroy 546 * 547 * This routine cleans up any messages that are still queued on the socket. 548 * For DGRAM and RDM socket types, all queued messages are rejected. 549 * For SEQPACKET and STREAM socket types, the first message is rejected 550 * and any others are discarded. (If the first message on a STREAM socket 551 * is partially-read, it is discarded and the next one is rejected instead.) 552 * 553 * NOTE: Rejected messages are not necessarily returned to the sender! They 554 * are returned or discarded according to the "destination droppable" setting 555 * specified for the message by the sender. 556 * 557 * Returns 0 on success, errno otherwise 558 */ 559 static int tipc_release(struct socket *sock) 560 { 561 struct sock *sk = sock->sk; 562 struct tipc_sock *tsk; 563 564 /* 565 * Exit if socket isn't fully initialized (occurs when a failed accept() 566 * releases a pre-allocated child socket that was never used) 567 */ 568 if (sk == NULL) 569 return 0; 570 571 tsk = tipc_sk(sk); 572 lock_sock(sk); 573 574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 575 sk->sk_shutdown = SHUTDOWN_MASK; 576 tipc_sk_leave(tsk); 577 tipc_sk_withdraw(tsk, 0, NULL); 578 sk_stop_timer(sk, &sk->sk_timer); 579 tipc_sk_remove(tsk); 580 581 /* Reject any messages that accumulated in backlog queue */ 582 release_sock(sk); 583 tipc_dest_list_purge(&tsk->cong_links); 584 tsk->cong_link_cnt = 0; 585 call_rcu(&tsk->rcu, tipc_sk_callback); 586 sock->sk = NULL; 587 588 return 0; 589 } 590 591 /** 592 * tipc_bind - associate or disassocate TIPC name(s) with a socket 593 * @sock: socket structure 594 * @uaddr: socket address describing name(s) and desired operation 595 * @uaddr_len: size of socket address data structure 596 * 597 * Name and name sequence binding is indicated using a positive scope value; 598 * a negative scope value unbinds the specified name. Specifying no name 599 * (i.e. a socket address length of 0) unbinds all names from the socket. 600 * 601 * Returns 0 on success, errno otherwise 602 * 603 * NOTE: This routine doesn't need to take the socket lock since it doesn't 604 * access any non-constant socket information. 605 */ 606 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 607 int uaddr_len) 608 { 609 struct sock *sk = sock->sk; 610 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 611 struct tipc_sock *tsk = tipc_sk(sk); 612 int res = -EINVAL; 613 614 lock_sock(sk); 615 if (unlikely(!uaddr_len)) { 616 res = tipc_sk_withdraw(tsk, 0, NULL); 617 goto exit; 618 } 619 if (tsk->group) { 620 res = -EACCES; 621 goto exit; 622 } 623 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 624 res = -EINVAL; 625 goto exit; 626 } 627 if (addr->family != AF_TIPC) { 628 res = -EAFNOSUPPORT; 629 goto exit; 630 } 631 632 if (addr->addrtype == TIPC_ADDR_NAME) 633 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 634 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 635 res = -EAFNOSUPPORT; 636 goto exit; 637 } 638 639 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 640 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 641 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 642 res = -EACCES; 643 goto exit; 644 } 645 646 res = (addr->scope >= 0) ? 647 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 648 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 649 exit: 650 release_sock(sk); 651 return res; 652 } 653 654 /** 655 * tipc_getname - get port ID of socket or peer socket 656 * @sock: socket structure 657 * @uaddr: area for returned socket address 658 * @uaddr_len: area for returned length of socket address 659 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 660 * 661 * Returns 0 on success, errno otherwise 662 * 663 * NOTE: This routine doesn't need to take the socket lock since it only 664 * accesses socket information that is unchanging (or which changes in 665 * a completely predictable manner). 666 */ 667 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 668 int peer) 669 { 670 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 671 struct sock *sk = sock->sk; 672 struct tipc_sock *tsk = tipc_sk(sk); 673 674 memset(addr, 0, sizeof(*addr)); 675 if (peer) { 676 if ((!tipc_sk_connected(sk)) && 677 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 678 return -ENOTCONN; 679 addr->addr.id.ref = tsk_peer_port(tsk); 680 addr->addr.id.node = tsk_peer_node(tsk); 681 } else { 682 addr->addr.id.ref = tsk->portid; 683 addr->addr.id.node = tipc_own_addr(sock_net(sk)); 684 } 685 686 addr->addrtype = TIPC_ADDR_ID; 687 addr->family = AF_TIPC; 688 addr->scope = 0; 689 addr->addr.name.domain = 0; 690 691 return sizeof(*addr); 692 } 693 694 /** 695 * tipc_poll - read and possibly block on pollmask 696 * @file: file structure associated with the socket 697 * @sock: socket for which to calculate the poll bits 698 * @wait: ??? 699 * 700 * Returns pollmask value 701 * 702 * COMMENTARY: 703 * It appears that the usual socket locking mechanisms are not useful here 704 * since the pollmask info is potentially out-of-date the moment this routine 705 * exits. TCP and other protocols seem to rely on higher level poll routines 706 * to handle any preventable race conditions, so TIPC will do the same ... 707 * 708 * IMPORTANT: The fact that a read or write operation is indicated does NOT 709 * imply that the operation will succeed, merely that it should be performed 710 * and will not block. 711 */ 712 static __poll_t tipc_poll(struct file *file, struct socket *sock, 713 poll_table *wait) 714 { 715 struct sock *sk = sock->sk; 716 struct tipc_sock *tsk = tipc_sk(sk); 717 __poll_t revents = 0; 718 719 sock_poll_wait(file, sk_sleep(sk), wait); 720 721 if (sk->sk_shutdown & RCV_SHUTDOWN) 722 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 723 if (sk->sk_shutdown == SHUTDOWN_MASK) 724 revents |= EPOLLHUP; 725 726 switch (sk->sk_state) { 727 case TIPC_ESTABLISHED: 728 case TIPC_CONNECTING: 729 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 730 revents |= EPOLLOUT; 731 /* fall thru' */ 732 case TIPC_LISTEN: 733 if (!skb_queue_empty(&sk->sk_receive_queue)) 734 revents |= EPOLLIN | EPOLLRDNORM; 735 break; 736 case TIPC_OPEN: 737 if (tsk->group_is_open && !tsk->cong_link_cnt) 738 revents |= EPOLLOUT; 739 if (!tipc_sk_type_connectionless(sk)) 740 break; 741 if (skb_queue_empty(&sk->sk_receive_queue)) 742 break; 743 revents |= EPOLLIN | EPOLLRDNORM; 744 break; 745 case TIPC_DISCONNECTING: 746 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 747 break; 748 } 749 return revents; 750 } 751 752 /** 753 * tipc_sendmcast - send multicast message 754 * @sock: socket structure 755 * @seq: destination address 756 * @msg: message to send 757 * @dlen: length of data to send 758 * @timeout: timeout to wait for wakeup 759 * 760 * Called from function tipc_sendmsg(), which has done all sanity checks 761 * Returns the number of bytes sent on success, or errno 762 */ 763 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 764 struct msghdr *msg, size_t dlen, long timeout) 765 { 766 struct sock *sk = sock->sk; 767 struct tipc_sock *tsk = tipc_sk(sk); 768 struct tipc_msg *hdr = &tsk->phdr; 769 struct net *net = sock_net(sk); 770 int mtu = tipc_bcast_get_mtu(net); 771 struct tipc_mc_method *method = &tsk->mc_method; 772 struct sk_buff_head pkts; 773 struct tipc_nlist dsts; 774 int rc; 775 776 if (tsk->group) 777 return -EACCES; 778 779 /* Block or return if any destination link is congested */ 780 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 781 if (unlikely(rc)) 782 return rc; 783 784 /* Lookup destination nodes */ 785 tipc_nlist_init(&dsts, tipc_own_addr(net)); 786 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 787 seq->upper, &dsts); 788 if (!dsts.local && !dsts.remote) 789 return -EHOSTUNREACH; 790 791 /* Build message header */ 792 msg_set_type(hdr, TIPC_MCAST_MSG); 793 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 794 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 795 msg_set_destport(hdr, 0); 796 msg_set_destnode(hdr, 0); 797 msg_set_nametype(hdr, seq->type); 798 msg_set_namelower(hdr, seq->lower); 799 msg_set_nameupper(hdr, seq->upper); 800 801 /* Build message as chain of buffers */ 802 skb_queue_head_init(&pkts); 803 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 804 805 /* Send message if build was successful */ 806 if (unlikely(rc == dlen)) 807 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 808 &tsk->cong_link_cnt); 809 810 tipc_nlist_purge(&dsts); 811 812 return rc ? rc : dlen; 813 } 814 815 /** 816 * tipc_send_group_msg - send a message to a member in the group 817 * @net: network namespace 818 * @m: message to send 819 * @mb: group member 820 * @dnode: destination node 821 * @dport: destination port 822 * @dlen: total length of message data 823 */ 824 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 825 struct msghdr *m, struct tipc_member *mb, 826 u32 dnode, u32 dport, int dlen) 827 { 828 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 829 struct tipc_mc_method *method = &tsk->mc_method; 830 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 831 struct tipc_msg *hdr = &tsk->phdr; 832 struct sk_buff_head pkts; 833 int mtu, rc; 834 835 /* Complete message header */ 836 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 837 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 838 msg_set_destport(hdr, dport); 839 msg_set_destnode(hdr, dnode); 840 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 841 842 /* Build message as chain of buffers */ 843 skb_queue_head_init(&pkts); 844 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 845 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 846 if (unlikely(rc != dlen)) 847 return rc; 848 849 /* Send message */ 850 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 851 if (unlikely(rc == -ELINKCONG)) { 852 tipc_dest_push(&tsk->cong_links, dnode, 0); 853 tsk->cong_link_cnt++; 854 } 855 856 /* Update send window */ 857 tipc_group_update_member(mb, blks); 858 859 /* A broadcast sent within next EXPIRE period must follow same path */ 860 method->rcast = true; 861 method->mandatory = true; 862 return dlen; 863 } 864 865 /** 866 * tipc_send_group_unicast - send message to a member in the group 867 * @sock: socket structure 868 * @m: message to send 869 * @dlen: total length of message data 870 * @timeout: timeout to wait for wakeup 871 * 872 * Called from function tipc_sendmsg(), which has done all sanity checks 873 * Returns the number of bytes sent on success, or errno 874 */ 875 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 876 int dlen, long timeout) 877 { 878 struct sock *sk = sock->sk; 879 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 880 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 881 struct tipc_sock *tsk = tipc_sk(sk); 882 struct tipc_group *grp = tsk->group; 883 struct net *net = sock_net(sk); 884 struct tipc_member *mb = NULL; 885 u32 node, port; 886 int rc; 887 888 node = dest->addr.id.node; 889 port = dest->addr.id.ref; 890 if (!port && !node) 891 return -EHOSTUNREACH; 892 893 /* Block or return if destination link or member is congested */ 894 rc = tipc_wait_for_cond(sock, &timeout, 895 !tipc_dest_find(&tsk->cong_links, node, 0) && 896 !tipc_group_cong(grp, node, port, blks, &mb)); 897 if (unlikely(rc)) 898 return rc; 899 900 if (unlikely(!mb)) 901 return -EHOSTUNREACH; 902 903 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 904 905 return rc ? rc : dlen; 906 } 907 908 /** 909 * tipc_send_group_anycast - send message to any member with given identity 910 * @sock: socket structure 911 * @m: message to send 912 * @dlen: total length of message data 913 * @timeout: timeout to wait for wakeup 914 * 915 * Called from function tipc_sendmsg(), which has done all sanity checks 916 * Returns the number of bytes sent on success, or errno 917 */ 918 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 919 int dlen, long timeout) 920 { 921 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 922 struct sock *sk = sock->sk; 923 struct tipc_sock *tsk = tipc_sk(sk); 924 struct list_head *cong_links = &tsk->cong_links; 925 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 926 struct tipc_group *grp = tsk->group; 927 struct tipc_msg *hdr = &tsk->phdr; 928 struct tipc_member *first = NULL; 929 struct tipc_member *mbr = NULL; 930 struct net *net = sock_net(sk); 931 u32 node, port, exclude; 932 struct list_head dsts; 933 u32 type, inst, scope; 934 int lookups = 0; 935 int dstcnt, rc; 936 bool cong; 937 938 INIT_LIST_HEAD(&dsts); 939 940 type = msg_nametype(hdr); 941 inst = dest->addr.name.name.instance; 942 scope = msg_lookup_scope(hdr); 943 exclude = tipc_group_exclude(grp); 944 945 while (++lookups < 4) { 946 first = NULL; 947 948 /* Look for a non-congested destination member, if any */ 949 while (1) { 950 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 951 &dstcnt, exclude, false)) 952 return -EHOSTUNREACH; 953 tipc_dest_pop(&dsts, &node, &port); 954 cong = tipc_group_cong(grp, node, port, blks, &mbr); 955 if (!cong) 956 break; 957 if (mbr == first) 958 break; 959 if (!first) 960 first = mbr; 961 } 962 963 /* Start over if destination was not in member list */ 964 if (unlikely(!mbr)) 965 continue; 966 967 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 968 break; 969 970 /* Block or return if destination link or member is congested */ 971 rc = tipc_wait_for_cond(sock, &timeout, 972 !tipc_dest_find(cong_links, node, 0) && 973 !tipc_group_cong(grp, node, port, 974 blks, &mbr)); 975 if (unlikely(rc)) 976 return rc; 977 978 /* Send, unless destination disappeared while waiting */ 979 if (likely(mbr)) 980 break; 981 } 982 983 if (unlikely(lookups >= 4)) 984 return -EHOSTUNREACH; 985 986 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 987 988 return rc ? rc : dlen; 989 } 990 991 /** 992 * tipc_send_group_bcast - send message to all members in communication group 993 * @sk: socket structure 994 * @m: message to send 995 * @dlen: total length of message data 996 * @timeout: timeout to wait for wakeup 997 * 998 * Called from function tipc_sendmsg(), which has done all sanity checks 999 * Returns the number of bytes sent on success, or errno 1000 */ 1001 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 1002 int dlen, long timeout) 1003 { 1004 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1005 struct sock *sk = sock->sk; 1006 struct net *net = sock_net(sk); 1007 struct tipc_sock *tsk = tipc_sk(sk); 1008 struct tipc_group *grp = tsk->group; 1009 struct tipc_nlist *dsts = tipc_group_dests(grp); 1010 struct tipc_mc_method *method = &tsk->mc_method; 1011 bool ack = method->mandatory && method->rcast; 1012 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1013 struct tipc_msg *hdr = &tsk->phdr; 1014 int mtu = tipc_bcast_get_mtu(net); 1015 struct sk_buff_head pkts; 1016 int rc = -EHOSTUNREACH; 1017 1018 if (!dsts->local && !dsts->remote) 1019 return -EHOSTUNREACH; 1020 1021 /* Block or return if any destination link or member is congested */ 1022 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1023 !tipc_group_bc_cong(grp, blks)); 1024 if (unlikely(rc)) 1025 return rc; 1026 1027 /* Complete message header */ 1028 if (dest) { 1029 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1030 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1031 } else { 1032 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1033 msg_set_nameinst(hdr, 0); 1034 } 1035 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1036 msg_set_destport(hdr, 0); 1037 msg_set_destnode(hdr, 0); 1038 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1039 1040 /* Avoid getting stuck with repeated forced replicasts */ 1041 msg_set_grp_bc_ack_req(hdr, ack); 1042 1043 /* Build message as chain of buffers */ 1044 skb_queue_head_init(&pkts); 1045 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1046 if (unlikely(rc != dlen)) 1047 return rc; 1048 1049 /* Send message */ 1050 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1051 if (unlikely(rc)) 1052 return rc; 1053 1054 /* Update broadcast sequence number and send windows */ 1055 tipc_group_update_bc_members(tsk->group, blks, ack); 1056 1057 /* Broadcast link is now free to choose method for next broadcast */ 1058 method->mandatory = false; 1059 method->expires = jiffies; 1060 1061 return dlen; 1062 } 1063 1064 /** 1065 * tipc_send_group_mcast - send message to all members with given identity 1066 * @sock: socket structure 1067 * @m: message to send 1068 * @dlen: total length of message data 1069 * @timeout: timeout to wait for wakeup 1070 * 1071 * Called from function tipc_sendmsg(), which has done all sanity checks 1072 * Returns the number of bytes sent on success, or errno 1073 */ 1074 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1075 int dlen, long timeout) 1076 { 1077 struct sock *sk = sock->sk; 1078 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1079 struct tipc_sock *tsk = tipc_sk(sk); 1080 struct tipc_group *grp = tsk->group; 1081 struct tipc_msg *hdr = &tsk->phdr; 1082 struct net *net = sock_net(sk); 1083 u32 type, inst, scope, exclude; 1084 struct list_head dsts; 1085 u32 dstcnt; 1086 1087 INIT_LIST_HEAD(&dsts); 1088 1089 type = msg_nametype(hdr); 1090 inst = dest->addr.name.name.instance; 1091 scope = msg_lookup_scope(hdr); 1092 exclude = tipc_group_exclude(grp); 1093 1094 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1095 &dstcnt, exclude, true)) 1096 return -EHOSTUNREACH; 1097 1098 if (dstcnt == 1) { 1099 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1100 return tipc_send_group_unicast(sock, m, dlen, timeout); 1101 } 1102 1103 tipc_dest_list_purge(&dsts); 1104 return tipc_send_group_bcast(sock, m, dlen, timeout); 1105 } 1106 1107 /** 1108 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1109 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1110 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1111 * 1112 * Multi-threaded: parallel calls with reference to same queues may occur 1113 */ 1114 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1115 struct sk_buff_head *inputq) 1116 { 1117 u32 self = tipc_own_addr(net); 1118 u32 type, lower, upper, scope; 1119 struct sk_buff *skb, *_skb; 1120 u32 portid, oport, onode; 1121 struct sk_buff_head tmpq; 1122 struct list_head dports; 1123 struct tipc_msg *hdr; 1124 int user, mtyp, hlen; 1125 bool exact; 1126 1127 __skb_queue_head_init(&tmpq); 1128 INIT_LIST_HEAD(&dports); 1129 1130 skb = tipc_skb_peek(arrvq, &inputq->lock); 1131 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1132 hdr = buf_msg(skb); 1133 user = msg_user(hdr); 1134 mtyp = msg_type(hdr); 1135 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1136 oport = msg_origport(hdr); 1137 onode = msg_orignode(hdr); 1138 type = msg_nametype(hdr); 1139 1140 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1141 spin_lock_bh(&inputq->lock); 1142 if (skb_peek(arrvq) == skb) { 1143 __skb_dequeue(arrvq); 1144 __skb_queue_tail(inputq, skb); 1145 } 1146 kfree_skb(skb); 1147 spin_unlock_bh(&inputq->lock); 1148 continue; 1149 } 1150 1151 /* Group messages require exact scope match */ 1152 if (msg_in_group(hdr)) { 1153 lower = 0; 1154 upper = ~0; 1155 scope = msg_lookup_scope(hdr); 1156 exact = true; 1157 } else { 1158 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1159 if (onode == self) 1160 scope = TIPC_NODE_SCOPE; 1161 else 1162 scope = TIPC_CLUSTER_SCOPE; 1163 exact = false; 1164 lower = msg_namelower(hdr); 1165 upper = msg_nameupper(hdr); 1166 } 1167 1168 /* Create destination port list: */ 1169 tipc_nametbl_mc_lookup(net, type, lower, upper, 1170 scope, exact, &dports); 1171 1172 /* Clone message per destination */ 1173 while (tipc_dest_pop(&dports, NULL, &portid)) { 1174 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1175 if (_skb) { 1176 msg_set_destport(buf_msg(_skb), portid); 1177 __skb_queue_tail(&tmpq, _skb); 1178 continue; 1179 } 1180 pr_warn("Failed to clone mcast rcv buffer\n"); 1181 } 1182 /* Append to inputq if not already done by other thread */ 1183 spin_lock_bh(&inputq->lock); 1184 if (skb_peek(arrvq) == skb) { 1185 skb_queue_splice_tail_init(&tmpq, inputq); 1186 kfree_skb(__skb_dequeue(arrvq)); 1187 } 1188 spin_unlock_bh(&inputq->lock); 1189 __skb_queue_purge(&tmpq); 1190 kfree_skb(skb); 1191 } 1192 tipc_sk_rcv(net, inputq); 1193 } 1194 1195 /** 1196 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1197 * @tsk: receiving socket 1198 * @skb: pointer to message buffer. 1199 */ 1200 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1201 struct sk_buff_head *xmitq) 1202 { 1203 struct tipc_msg *hdr = buf_msg(skb); 1204 u32 onode = tsk_own_node(tsk); 1205 struct sock *sk = &tsk->sk; 1206 int mtyp = msg_type(hdr); 1207 bool conn_cong; 1208 1209 /* Ignore if connection cannot be validated: */ 1210 if (!tsk_peer_msg(tsk, hdr)) 1211 goto exit; 1212 1213 if (unlikely(msg_errcode(hdr))) { 1214 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1215 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1216 tsk_peer_port(tsk)); 1217 sk->sk_state_change(sk); 1218 goto exit; 1219 } 1220 1221 tsk->probe_unacked = false; 1222 1223 if (mtyp == CONN_PROBE) { 1224 msg_set_type(hdr, CONN_PROBE_REPLY); 1225 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1226 __skb_queue_tail(xmitq, skb); 1227 return; 1228 } else if (mtyp == CONN_ACK) { 1229 conn_cong = tsk_conn_cong(tsk); 1230 tsk->snt_unacked -= msg_conn_ack(hdr); 1231 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1232 tsk->snd_win = msg_adv_win(hdr); 1233 if (conn_cong) 1234 sk->sk_write_space(sk); 1235 } else if (mtyp != CONN_PROBE_REPLY) { 1236 pr_warn("Received unknown CONN_PROTO msg\n"); 1237 } 1238 exit: 1239 kfree_skb(skb); 1240 } 1241 1242 /** 1243 * tipc_sendmsg - send message in connectionless manner 1244 * @sock: socket structure 1245 * @m: message to send 1246 * @dsz: amount of user data to be sent 1247 * 1248 * Message must have an destination specified explicitly. 1249 * Used for SOCK_RDM and SOCK_DGRAM messages, 1250 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1251 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1252 * 1253 * Returns the number of bytes sent on success, or errno otherwise 1254 */ 1255 static int tipc_sendmsg(struct socket *sock, 1256 struct msghdr *m, size_t dsz) 1257 { 1258 struct sock *sk = sock->sk; 1259 int ret; 1260 1261 lock_sock(sk); 1262 ret = __tipc_sendmsg(sock, m, dsz); 1263 release_sock(sk); 1264 1265 return ret; 1266 } 1267 1268 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1269 { 1270 struct sock *sk = sock->sk; 1271 struct net *net = sock_net(sk); 1272 struct tipc_sock *tsk = tipc_sk(sk); 1273 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1274 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1275 struct list_head *clinks = &tsk->cong_links; 1276 bool syn = !tipc_sk_type_connectionless(sk); 1277 struct tipc_group *grp = tsk->group; 1278 struct tipc_msg *hdr = &tsk->phdr; 1279 struct tipc_name_seq *seq; 1280 struct sk_buff_head pkts; 1281 u32 dport, dnode = 0; 1282 u32 type, inst; 1283 int mtu, rc; 1284 1285 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1286 return -EMSGSIZE; 1287 1288 if (likely(dest)) { 1289 if (unlikely(m->msg_namelen < sizeof(*dest))) 1290 return -EINVAL; 1291 if (unlikely(dest->family != AF_TIPC)) 1292 return -EINVAL; 1293 } 1294 1295 if (grp) { 1296 if (!dest) 1297 return tipc_send_group_bcast(sock, m, dlen, timeout); 1298 if (dest->addrtype == TIPC_ADDR_NAME) 1299 return tipc_send_group_anycast(sock, m, dlen, timeout); 1300 if (dest->addrtype == TIPC_ADDR_ID) 1301 return tipc_send_group_unicast(sock, m, dlen, timeout); 1302 if (dest->addrtype == TIPC_ADDR_MCAST) 1303 return tipc_send_group_mcast(sock, m, dlen, timeout); 1304 return -EINVAL; 1305 } 1306 1307 if (unlikely(!dest)) { 1308 dest = &tsk->peer; 1309 if (!syn || dest->family != AF_TIPC) 1310 return -EDESTADDRREQ; 1311 } 1312 1313 if (unlikely(syn)) { 1314 if (sk->sk_state == TIPC_LISTEN) 1315 return -EPIPE; 1316 if (sk->sk_state != TIPC_OPEN) 1317 return -EISCONN; 1318 if (tsk->published) 1319 return -EOPNOTSUPP; 1320 if (dest->addrtype == TIPC_ADDR_NAME) { 1321 tsk->conn_type = dest->addr.name.name.type; 1322 tsk->conn_instance = dest->addr.name.name.instance; 1323 } 1324 } 1325 1326 seq = &dest->addr.nameseq; 1327 if (dest->addrtype == TIPC_ADDR_MCAST) 1328 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1329 1330 if (dest->addrtype == TIPC_ADDR_NAME) { 1331 type = dest->addr.name.name.type; 1332 inst = dest->addr.name.name.instance; 1333 dnode = dest->addr.name.domain; 1334 msg_set_type(hdr, TIPC_NAMED_MSG); 1335 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1336 msg_set_nametype(hdr, type); 1337 msg_set_nameinst(hdr, inst); 1338 msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); 1339 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1340 msg_set_destnode(hdr, dnode); 1341 msg_set_destport(hdr, dport); 1342 if (unlikely(!dport && !dnode)) 1343 return -EHOSTUNREACH; 1344 } else if (dest->addrtype == TIPC_ADDR_ID) { 1345 dnode = dest->addr.id.node; 1346 msg_set_type(hdr, TIPC_DIRECT_MSG); 1347 msg_set_lookup_scope(hdr, 0); 1348 msg_set_destnode(hdr, dnode); 1349 msg_set_destport(hdr, dest->addr.id.ref); 1350 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1351 } else { 1352 return -EINVAL; 1353 } 1354 1355 /* Block or return if destination link is congested */ 1356 rc = tipc_wait_for_cond(sock, &timeout, 1357 !tipc_dest_find(clinks, dnode, 0)); 1358 if (unlikely(rc)) 1359 return rc; 1360 1361 skb_queue_head_init(&pkts); 1362 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1363 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1364 if (unlikely(rc != dlen)) 1365 return rc; 1366 1367 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1368 if (unlikely(rc == -ELINKCONG)) { 1369 tipc_dest_push(clinks, dnode, 0); 1370 tsk->cong_link_cnt++; 1371 rc = 0; 1372 } 1373 1374 if (unlikely(syn && !rc)) 1375 tipc_set_sk_state(sk, TIPC_CONNECTING); 1376 1377 return rc ? rc : dlen; 1378 } 1379 1380 /** 1381 * tipc_sendstream - send stream-oriented data 1382 * @sock: socket structure 1383 * @m: data to send 1384 * @dsz: total length of data to be transmitted 1385 * 1386 * Used for SOCK_STREAM data. 1387 * 1388 * Returns the number of bytes sent on success (or partial success), 1389 * or errno if no data sent 1390 */ 1391 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1392 { 1393 struct sock *sk = sock->sk; 1394 int ret; 1395 1396 lock_sock(sk); 1397 ret = __tipc_sendstream(sock, m, dsz); 1398 release_sock(sk); 1399 1400 return ret; 1401 } 1402 1403 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1404 { 1405 struct sock *sk = sock->sk; 1406 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1407 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1408 struct tipc_sock *tsk = tipc_sk(sk); 1409 struct tipc_msg *hdr = &tsk->phdr; 1410 struct net *net = sock_net(sk); 1411 struct sk_buff_head pkts; 1412 u32 dnode = tsk_peer_node(tsk); 1413 int send, sent = 0; 1414 int rc = 0; 1415 1416 skb_queue_head_init(&pkts); 1417 1418 if (unlikely(dlen > INT_MAX)) 1419 return -EMSGSIZE; 1420 1421 /* Handle implicit connection setup */ 1422 if (unlikely(dest)) { 1423 rc = __tipc_sendmsg(sock, m, dlen); 1424 if (dlen && (dlen == rc)) 1425 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1426 return rc; 1427 } 1428 1429 do { 1430 rc = tipc_wait_for_cond(sock, &timeout, 1431 (!tsk->cong_link_cnt && 1432 !tsk_conn_cong(tsk) && 1433 tipc_sk_connected(sk))); 1434 if (unlikely(rc)) 1435 break; 1436 1437 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1438 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1439 if (unlikely(rc != send)) 1440 break; 1441 1442 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1443 if (unlikely(rc == -ELINKCONG)) { 1444 tsk->cong_link_cnt = 1; 1445 rc = 0; 1446 } 1447 if (likely(!rc)) { 1448 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1449 sent += send; 1450 } 1451 } while (sent < dlen && !rc); 1452 1453 return sent ? sent : rc; 1454 } 1455 1456 /** 1457 * tipc_send_packet - send a connection-oriented message 1458 * @sock: socket structure 1459 * @m: message to send 1460 * @dsz: length of data to be transmitted 1461 * 1462 * Used for SOCK_SEQPACKET messages. 1463 * 1464 * Returns the number of bytes sent on success, or errno otherwise 1465 */ 1466 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1467 { 1468 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1469 return -EMSGSIZE; 1470 1471 return tipc_sendstream(sock, m, dsz); 1472 } 1473 1474 /* tipc_sk_finish_conn - complete the setup of a connection 1475 */ 1476 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1477 u32 peer_node) 1478 { 1479 struct sock *sk = &tsk->sk; 1480 struct net *net = sock_net(sk); 1481 struct tipc_msg *msg = &tsk->phdr; 1482 1483 msg_set_destnode(msg, peer_node); 1484 msg_set_destport(msg, peer_port); 1485 msg_set_type(msg, TIPC_CONN_MSG); 1486 msg_set_lookup_scope(msg, 0); 1487 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1488 1489 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1490 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1491 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1492 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1493 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1494 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1495 return; 1496 1497 /* Fall back to message based flow control */ 1498 tsk->rcv_win = FLOWCTL_MSG_WIN; 1499 tsk->snd_win = FLOWCTL_MSG_WIN; 1500 } 1501 1502 /** 1503 * tipc_sk_set_orig_addr - capture sender's address for received message 1504 * @m: descriptor for message info 1505 * @hdr: received message header 1506 * 1507 * Note: Address is not captured if not requested by receiver. 1508 */ 1509 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1510 { 1511 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1512 struct tipc_msg *hdr = buf_msg(skb); 1513 1514 if (!srcaddr) 1515 return; 1516 1517 srcaddr->sock.family = AF_TIPC; 1518 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1519 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1520 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1521 srcaddr->sock.addr.name.domain = 0; 1522 srcaddr->sock.scope = 0; 1523 m->msg_namelen = sizeof(struct sockaddr_tipc); 1524 1525 if (!msg_in_group(hdr)) 1526 return; 1527 1528 /* Group message users may also want to know sending member's id */ 1529 srcaddr->member.family = AF_TIPC; 1530 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1531 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1532 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1533 srcaddr->member.addr.name.domain = 0; 1534 m->msg_namelen = sizeof(*srcaddr); 1535 } 1536 1537 /** 1538 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1539 * @m: descriptor for message info 1540 * @msg: received message header 1541 * @tsk: TIPC port associated with message 1542 * 1543 * Note: Ancillary data is not captured if not requested by receiver. 1544 * 1545 * Returns 0 if successful, otherwise errno 1546 */ 1547 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1548 struct tipc_sock *tsk) 1549 { 1550 u32 anc_data[3]; 1551 u32 err; 1552 u32 dest_type; 1553 int has_name; 1554 int res; 1555 1556 if (likely(m->msg_controllen == 0)) 1557 return 0; 1558 1559 /* Optionally capture errored message object(s) */ 1560 err = msg ? msg_errcode(msg) : 0; 1561 if (unlikely(err)) { 1562 anc_data[0] = err; 1563 anc_data[1] = msg_data_sz(msg); 1564 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1565 if (res) 1566 return res; 1567 if (anc_data[1]) { 1568 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1569 msg_data(msg)); 1570 if (res) 1571 return res; 1572 } 1573 } 1574 1575 /* Optionally capture message destination object */ 1576 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1577 switch (dest_type) { 1578 case TIPC_NAMED_MSG: 1579 has_name = 1; 1580 anc_data[0] = msg_nametype(msg); 1581 anc_data[1] = msg_namelower(msg); 1582 anc_data[2] = msg_namelower(msg); 1583 break; 1584 case TIPC_MCAST_MSG: 1585 has_name = 1; 1586 anc_data[0] = msg_nametype(msg); 1587 anc_data[1] = msg_namelower(msg); 1588 anc_data[2] = msg_nameupper(msg); 1589 break; 1590 case TIPC_CONN_MSG: 1591 has_name = (tsk->conn_type != 0); 1592 anc_data[0] = tsk->conn_type; 1593 anc_data[1] = tsk->conn_instance; 1594 anc_data[2] = tsk->conn_instance; 1595 break; 1596 default: 1597 has_name = 0; 1598 } 1599 if (has_name) { 1600 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1601 if (res) 1602 return res; 1603 } 1604 1605 return 0; 1606 } 1607 1608 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1609 { 1610 struct sock *sk = &tsk->sk; 1611 struct net *net = sock_net(sk); 1612 struct sk_buff *skb = NULL; 1613 struct tipc_msg *msg; 1614 u32 peer_port = tsk_peer_port(tsk); 1615 u32 dnode = tsk_peer_node(tsk); 1616 1617 if (!tipc_sk_connected(sk)) 1618 return; 1619 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1620 dnode, tsk_own_node(tsk), peer_port, 1621 tsk->portid, TIPC_OK); 1622 if (!skb) 1623 return; 1624 msg = buf_msg(skb); 1625 msg_set_conn_ack(msg, tsk->rcv_unacked); 1626 tsk->rcv_unacked = 0; 1627 1628 /* Adjust to and advertize the correct window limit */ 1629 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1630 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1631 msg_set_adv_win(msg, tsk->rcv_win); 1632 } 1633 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1634 } 1635 1636 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1637 { 1638 struct sock *sk = sock->sk; 1639 DEFINE_WAIT(wait); 1640 long timeo = *timeop; 1641 int err = sock_error(sk); 1642 1643 if (err) 1644 return err; 1645 1646 for (;;) { 1647 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1648 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1649 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1650 err = -ENOTCONN; 1651 break; 1652 } 1653 release_sock(sk); 1654 timeo = schedule_timeout(timeo); 1655 lock_sock(sk); 1656 } 1657 err = 0; 1658 if (!skb_queue_empty(&sk->sk_receive_queue)) 1659 break; 1660 err = -EAGAIN; 1661 if (!timeo) 1662 break; 1663 err = sock_intr_errno(timeo); 1664 if (signal_pending(current)) 1665 break; 1666 1667 err = sock_error(sk); 1668 if (err) 1669 break; 1670 } 1671 finish_wait(sk_sleep(sk), &wait); 1672 *timeop = timeo; 1673 return err; 1674 } 1675 1676 /** 1677 * tipc_recvmsg - receive packet-oriented message 1678 * @m: descriptor for message info 1679 * @buflen: length of user buffer area 1680 * @flags: receive flags 1681 * 1682 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1683 * If the complete message doesn't fit in user area, truncate it. 1684 * 1685 * Returns size of returned message data, errno otherwise 1686 */ 1687 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1688 size_t buflen, int flags) 1689 { 1690 struct sock *sk = sock->sk; 1691 bool connected = !tipc_sk_type_connectionless(sk); 1692 struct tipc_sock *tsk = tipc_sk(sk); 1693 int rc, err, hlen, dlen, copy; 1694 struct sk_buff_head xmitq; 1695 struct tipc_msg *hdr; 1696 struct sk_buff *skb; 1697 bool grp_evt; 1698 long timeout; 1699 1700 /* Catch invalid receive requests */ 1701 if (unlikely(!buflen)) 1702 return -EINVAL; 1703 1704 lock_sock(sk); 1705 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1706 rc = -ENOTCONN; 1707 goto exit; 1708 } 1709 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1710 1711 /* Step rcv queue to first msg with data or error; wait if necessary */ 1712 do { 1713 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1714 if (unlikely(rc)) 1715 goto exit; 1716 skb = skb_peek(&sk->sk_receive_queue); 1717 hdr = buf_msg(skb); 1718 dlen = msg_data_sz(hdr); 1719 hlen = msg_hdr_sz(hdr); 1720 err = msg_errcode(hdr); 1721 grp_evt = msg_is_grp_evt(hdr); 1722 if (likely(dlen || err)) 1723 break; 1724 tsk_advance_rx_queue(sk); 1725 } while (1); 1726 1727 /* Collect msg meta data, including error code and rejected data */ 1728 tipc_sk_set_orig_addr(m, skb); 1729 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1730 if (unlikely(rc)) 1731 goto exit; 1732 1733 /* Capture data if non-error msg, otherwise just set return value */ 1734 if (likely(!err)) { 1735 copy = min_t(int, dlen, buflen); 1736 if (unlikely(copy != dlen)) 1737 m->msg_flags |= MSG_TRUNC; 1738 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1739 } else { 1740 copy = 0; 1741 rc = 0; 1742 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1743 rc = -ECONNRESET; 1744 } 1745 if (unlikely(rc)) 1746 goto exit; 1747 1748 /* Mark message as group event if applicable */ 1749 if (unlikely(grp_evt)) { 1750 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1751 m->msg_flags |= MSG_EOR; 1752 m->msg_flags |= MSG_OOB; 1753 copy = 0; 1754 } 1755 1756 /* Caption of data or error code/rejected data was successful */ 1757 if (unlikely(flags & MSG_PEEK)) 1758 goto exit; 1759 1760 /* Send group flow control advertisement when applicable */ 1761 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1762 skb_queue_head_init(&xmitq); 1763 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1764 msg_orignode(hdr), msg_origport(hdr), 1765 &xmitq); 1766 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1767 } 1768 1769 tsk_advance_rx_queue(sk); 1770 1771 if (likely(!connected)) 1772 goto exit; 1773 1774 /* Send connection flow control advertisement when applicable */ 1775 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1776 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1777 tipc_sk_send_ack(tsk); 1778 exit: 1779 release_sock(sk); 1780 return rc ? rc : copy; 1781 } 1782 1783 /** 1784 * tipc_recvstream - receive stream-oriented data 1785 * @m: descriptor for message info 1786 * @buflen: total size of user buffer area 1787 * @flags: receive flags 1788 * 1789 * Used for SOCK_STREAM messages only. If not enough data is available 1790 * will optionally wait for more; never truncates data. 1791 * 1792 * Returns size of returned message data, errno otherwise 1793 */ 1794 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1795 size_t buflen, int flags) 1796 { 1797 struct sock *sk = sock->sk; 1798 struct tipc_sock *tsk = tipc_sk(sk); 1799 struct sk_buff *skb; 1800 struct tipc_msg *hdr; 1801 struct tipc_skb_cb *skb_cb; 1802 bool peek = flags & MSG_PEEK; 1803 int offset, required, copy, copied = 0; 1804 int hlen, dlen, err, rc; 1805 long timeout; 1806 1807 /* Catch invalid receive attempts */ 1808 if (unlikely(!buflen)) 1809 return -EINVAL; 1810 1811 lock_sock(sk); 1812 1813 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1814 rc = -ENOTCONN; 1815 goto exit; 1816 } 1817 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1818 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1819 1820 do { 1821 /* Look at first msg in receive queue; wait if necessary */ 1822 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1823 if (unlikely(rc)) 1824 break; 1825 skb = skb_peek(&sk->sk_receive_queue); 1826 skb_cb = TIPC_SKB_CB(skb); 1827 hdr = buf_msg(skb); 1828 dlen = msg_data_sz(hdr); 1829 hlen = msg_hdr_sz(hdr); 1830 err = msg_errcode(hdr); 1831 1832 /* Discard any empty non-errored (SYN-) message */ 1833 if (unlikely(!dlen && !err)) { 1834 tsk_advance_rx_queue(sk); 1835 continue; 1836 } 1837 1838 /* Collect msg meta data, incl. error code and rejected data */ 1839 if (!copied) { 1840 tipc_sk_set_orig_addr(m, skb); 1841 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1842 if (rc) 1843 break; 1844 } 1845 1846 /* Copy data if msg ok, otherwise return error/partial data */ 1847 if (likely(!err)) { 1848 offset = skb_cb->bytes_read; 1849 copy = min_t(int, dlen - offset, buflen - copied); 1850 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1851 if (unlikely(rc)) 1852 break; 1853 copied += copy; 1854 offset += copy; 1855 if (unlikely(offset < dlen)) { 1856 if (!peek) 1857 skb_cb->bytes_read = offset; 1858 break; 1859 } 1860 } else { 1861 rc = 0; 1862 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1863 rc = -ECONNRESET; 1864 if (copied || rc) 1865 break; 1866 } 1867 1868 if (unlikely(peek)) 1869 break; 1870 1871 tsk_advance_rx_queue(sk); 1872 1873 /* Send connection flow control advertisement when applicable */ 1874 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1875 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1876 tipc_sk_send_ack(tsk); 1877 1878 /* Exit if all requested data or FIN/error received */ 1879 if (copied == buflen || err) 1880 break; 1881 1882 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1883 exit: 1884 release_sock(sk); 1885 return copied ? copied : rc; 1886 } 1887 1888 /** 1889 * tipc_write_space - wake up thread if port congestion is released 1890 * @sk: socket 1891 */ 1892 static void tipc_write_space(struct sock *sk) 1893 { 1894 struct socket_wq *wq; 1895 1896 rcu_read_lock(); 1897 wq = rcu_dereference(sk->sk_wq); 1898 if (skwq_has_sleeper(wq)) 1899 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1900 EPOLLWRNORM | EPOLLWRBAND); 1901 rcu_read_unlock(); 1902 } 1903 1904 /** 1905 * tipc_data_ready - wake up threads to indicate messages have been received 1906 * @sk: socket 1907 * @len: the length of messages 1908 */ 1909 static void tipc_data_ready(struct sock *sk) 1910 { 1911 struct socket_wq *wq; 1912 1913 rcu_read_lock(); 1914 wq = rcu_dereference(sk->sk_wq); 1915 if (skwq_has_sleeper(wq)) 1916 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1917 EPOLLRDNORM | EPOLLRDBAND); 1918 rcu_read_unlock(); 1919 } 1920 1921 static void tipc_sock_destruct(struct sock *sk) 1922 { 1923 __skb_queue_purge(&sk->sk_receive_queue); 1924 } 1925 1926 static void tipc_sk_proto_rcv(struct sock *sk, 1927 struct sk_buff_head *inputq, 1928 struct sk_buff_head *xmitq) 1929 { 1930 struct sk_buff *skb = __skb_dequeue(inputq); 1931 struct tipc_sock *tsk = tipc_sk(sk); 1932 struct tipc_msg *hdr = buf_msg(skb); 1933 struct tipc_group *grp = tsk->group; 1934 bool wakeup = false; 1935 1936 switch (msg_user(hdr)) { 1937 case CONN_MANAGER: 1938 tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1939 return; 1940 case SOCK_WAKEUP: 1941 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1942 tsk->cong_link_cnt--; 1943 wakeup = true; 1944 break; 1945 case GROUP_PROTOCOL: 1946 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1947 break; 1948 case TOP_SRV: 1949 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1950 hdr, inputq, xmitq); 1951 break; 1952 default: 1953 break; 1954 } 1955 1956 if (wakeup) 1957 sk->sk_write_space(sk); 1958 1959 kfree_skb(skb); 1960 } 1961 1962 /** 1963 * tipc_filter_connect - Handle incoming message for a connection-based socket 1964 * @tsk: TIPC socket 1965 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 1966 * 1967 * Returns true if everything ok, false otherwise 1968 */ 1969 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1970 { 1971 struct sock *sk = &tsk->sk; 1972 struct net *net = sock_net(sk); 1973 struct tipc_msg *hdr = buf_msg(skb); 1974 u32 pport = msg_origport(hdr); 1975 u32 pnode = msg_orignode(hdr); 1976 1977 if (unlikely(msg_mcast(hdr))) 1978 return false; 1979 1980 switch (sk->sk_state) { 1981 case TIPC_CONNECTING: 1982 /* Accept only ACK or NACK message */ 1983 if (unlikely(!msg_connected(hdr))) { 1984 if (pport != tsk_peer_port(tsk) || 1985 pnode != tsk_peer_node(tsk)) 1986 return false; 1987 1988 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1989 sk->sk_err = ECONNREFUSED; 1990 sk->sk_state_change(sk); 1991 return true; 1992 } 1993 1994 if (unlikely(msg_errcode(hdr))) { 1995 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1996 sk->sk_err = ECONNREFUSED; 1997 sk->sk_state_change(sk); 1998 return true; 1999 } 2000 2001 if (unlikely(!msg_isdata(hdr))) { 2002 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2003 sk->sk_err = EINVAL; 2004 sk->sk_state_change(sk); 2005 return true; 2006 } 2007 2008 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); 2009 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2010 2011 /* If 'ACK+' message, add to socket receive queue */ 2012 if (msg_data_sz(hdr)) 2013 return true; 2014 2015 /* If empty 'ACK-' message, wake up sleeping connect() */ 2016 sk->sk_data_ready(sk); 2017 2018 /* 'ACK-' message is neither accepted nor rejected: */ 2019 msg_set_dest_droppable(hdr, 1); 2020 return false; 2021 2022 case TIPC_OPEN: 2023 case TIPC_DISCONNECTING: 2024 break; 2025 case TIPC_LISTEN: 2026 /* Accept only SYN message */ 2027 if (!msg_connected(hdr) && !(msg_errcode(hdr))) 2028 return true; 2029 break; 2030 case TIPC_ESTABLISHED: 2031 /* Accept only connection-based messages sent by peer */ 2032 if (unlikely(!tsk_peer_msg(tsk, hdr))) 2033 return false; 2034 2035 if (unlikely(msg_errcode(hdr))) { 2036 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2037 /* Let timer expire on it's own */ 2038 tipc_node_remove_conn(net, tsk_peer_node(tsk), 2039 tsk->portid); 2040 sk->sk_state_change(sk); 2041 } 2042 return true; 2043 default: 2044 pr_err("Unknown sk_state %u\n", sk->sk_state); 2045 } 2046 2047 return false; 2048 } 2049 2050 /** 2051 * rcvbuf_limit - get proper overload limit of socket receive queue 2052 * @sk: socket 2053 * @skb: message 2054 * 2055 * For connection oriented messages, irrespective of importance, 2056 * default queue limit is 2 MB. 2057 * 2058 * For connectionless messages, queue limits are based on message 2059 * importance as follows: 2060 * 2061 * TIPC_LOW_IMPORTANCE (2 MB) 2062 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2063 * TIPC_HIGH_IMPORTANCE (8 MB) 2064 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2065 * 2066 * Returns overload limit according to corresponding message importance 2067 */ 2068 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2069 { 2070 struct tipc_sock *tsk = tipc_sk(sk); 2071 struct tipc_msg *hdr = buf_msg(skb); 2072 2073 if (unlikely(msg_in_group(hdr))) 2074 return sk->sk_rcvbuf; 2075 2076 if (unlikely(!msg_connected(hdr))) 2077 return sk->sk_rcvbuf << msg_importance(hdr); 2078 2079 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2080 return sk->sk_rcvbuf; 2081 2082 return FLOWCTL_MSG_LIM; 2083 } 2084 2085 /** 2086 * tipc_sk_filter_rcv - validate incoming message 2087 * @sk: socket 2088 * @skb: pointer to message. 2089 * 2090 * Enqueues message on receive queue if acceptable; optionally handles 2091 * disconnect indication for a connected socket. 2092 * 2093 * Called with socket lock already taken 2094 * 2095 */ 2096 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2097 struct sk_buff_head *xmitq) 2098 { 2099 bool sk_conn = !tipc_sk_type_connectionless(sk); 2100 struct tipc_sock *tsk = tipc_sk(sk); 2101 struct tipc_group *grp = tsk->group; 2102 struct tipc_msg *hdr = buf_msg(skb); 2103 struct net *net = sock_net(sk); 2104 struct sk_buff_head inputq; 2105 int limit, err = TIPC_OK; 2106 2107 TIPC_SKB_CB(skb)->bytes_read = 0; 2108 __skb_queue_head_init(&inputq); 2109 __skb_queue_tail(&inputq, skb); 2110 2111 if (unlikely(!msg_isdata(hdr))) 2112 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2113 2114 if (unlikely(grp)) 2115 tipc_group_filter_msg(grp, &inputq, xmitq); 2116 2117 /* Validate and add to receive buffer if there is space */ 2118 while ((skb = __skb_dequeue(&inputq))) { 2119 hdr = buf_msg(skb); 2120 limit = rcvbuf_limit(sk, skb); 2121 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2122 (!sk_conn && msg_connected(hdr)) || 2123 (!grp && msg_in_group(hdr))) 2124 err = TIPC_ERR_NO_PORT; 2125 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2126 atomic_inc(&sk->sk_drops); 2127 err = TIPC_ERR_OVERLOAD; 2128 } 2129 2130 if (unlikely(err)) { 2131 tipc_skb_reject(net, err, skb, xmitq); 2132 err = TIPC_OK; 2133 continue; 2134 } 2135 __skb_queue_tail(&sk->sk_receive_queue, skb); 2136 skb_set_owner_r(skb, sk); 2137 sk->sk_data_ready(sk); 2138 } 2139 } 2140 2141 /** 2142 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2143 * @sk: socket 2144 * @skb: message 2145 * 2146 * Caller must hold socket lock 2147 */ 2148 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2149 { 2150 unsigned int before = sk_rmem_alloc_get(sk); 2151 struct sk_buff_head xmitq; 2152 unsigned int added; 2153 2154 __skb_queue_head_init(&xmitq); 2155 2156 tipc_sk_filter_rcv(sk, skb, &xmitq); 2157 added = sk_rmem_alloc_get(sk) - before; 2158 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2159 2160 /* Send pending response/rejected messages, if any */ 2161 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2162 return 0; 2163 } 2164 2165 /** 2166 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2167 * inputq and try adding them to socket or backlog queue 2168 * @inputq: list of incoming buffers with potentially different destinations 2169 * @sk: socket where the buffers should be enqueued 2170 * @dport: port number for the socket 2171 * 2172 * Caller must hold socket lock 2173 */ 2174 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2175 u32 dport, struct sk_buff_head *xmitq) 2176 { 2177 unsigned long time_limit = jiffies + 2; 2178 struct sk_buff *skb; 2179 unsigned int lim; 2180 atomic_t *dcnt; 2181 u32 onode; 2182 2183 while (skb_queue_len(inputq)) { 2184 if (unlikely(time_after_eq(jiffies, time_limit))) 2185 return; 2186 2187 skb = tipc_skb_dequeue(inputq, dport); 2188 if (unlikely(!skb)) 2189 return; 2190 2191 /* Add message directly to receive queue if possible */ 2192 if (!sock_owned_by_user(sk)) { 2193 tipc_sk_filter_rcv(sk, skb, xmitq); 2194 continue; 2195 } 2196 2197 /* Try backlog, compensating for double-counted bytes */ 2198 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2199 if (!sk->sk_backlog.len) 2200 atomic_set(dcnt, 0); 2201 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2202 if (likely(!sk_add_backlog(sk, skb, lim))) 2203 continue; 2204 2205 /* Overload => reject message back to sender */ 2206 onode = tipc_own_addr(sock_net(sk)); 2207 atomic_inc(&sk->sk_drops); 2208 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2209 __skb_queue_tail(xmitq, skb); 2210 break; 2211 } 2212 } 2213 2214 /** 2215 * tipc_sk_rcv - handle a chain of incoming buffers 2216 * @inputq: buffer list containing the buffers 2217 * Consumes all buffers in list until inputq is empty 2218 * Note: may be called in multiple threads referring to the same queue 2219 */ 2220 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2221 { 2222 struct sk_buff_head xmitq; 2223 u32 dnode, dport = 0; 2224 int err; 2225 struct tipc_sock *tsk; 2226 struct sock *sk; 2227 struct sk_buff *skb; 2228 2229 __skb_queue_head_init(&xmitq); 2230 while (skb_queue_len(inputq)) { 2231 dport = tipc_skb_peek_port(inputq, dport); 2232 tsk = tipc_sk_lookup(net, dport); 2233 2234 if (likely(tsk)) { 2235 sk = &tsk->sk; 2236 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2237 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2238 spin_unlock_bh(&sk->sk_lock.slock); 2239 } 2240 /* Send pending response/rejected messages, if any */ 2241 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2242 sock_put(sk); 2243 continue; 2244 } 2245 /* No destination socket => dequeue skb if still there */ 2246 skb = tipc_skb_dequeue(inputq, dport); 2247 if (!skb) 2248 return; 2249 2250 /* Try secondary lookup if unresolved named message */ 2251 err = TIPC_ERR_NO_PORT; 2252 if (tipc_msg_lookup_dest(net, skb, &err)) 2253 goto xmit; 2254 2255 /* Prepare for message rejection */ 2256 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2257 continue; 2258 xmit: 2259 dnode = msg_destnode(buf_msg(skb)); 2260 tipc_node_xmit_skb(net, skb, dnode, dport); 2261 } 2262 } 2263 2264 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2265 { 2266 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2267 struct sock *sk = sock->sk; 2268 int done; 2269 2270 do { 2271 int err = sock_error(sk); 2272 if (err) 2273 return err; 2274 if (!*timeo_p) 2275 return -ETIMEDOUT; 2276 if (signal_pending(current)) 2277 return sock_intr_errno(*timeo_p); 2278 2279 add_wait_queue(sk_sleep(sk), &wait); 2280 done = sk_wait_event(sk, timeo_p, 2281 sk->sk_state != TIPC_CONNECTING, &wait); 2282 remove_wait_queue(sk_sleep(sk), &wait); 2283 } while (!done); 2284 return 0; 2285 } 2286 2287 /** 2288 * tipc_connect - establish a connection to another TIPC port 2289 * @sock: socket structure 2290 * @dest: socket address for destination port 2291 * @destlen: size of socket address data structure 2292 * @flags: file-related flags associated with socket 2293 * 2294 * Returns 0 on success, errno otherwise 2295 */ 2296 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2297 int destlen, int flags) 2298 { 2299 struct sock *sk = sock->sk; 2300 struct tipc_sock *tsk = tipc_sk(sk); 2301 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2302 struct msghdr m = {NULL,}; 2303 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2304 int previous; 2305 int res = 0; 2306 2307 if (destlen != sizeof(struct sockaddr_tipc)) 2308 return -EINVAL; 2309 2310 lock_sock(sk); 2311 2312 if (tsk->group) { 2313 res = -EINVAL; 2314 goto exit; 2315 } 2316 2317 if (dst->family == AF_UNSPEC) { 2318 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2319 if (!tipc_sk_type_connectionless(sk)) 2320 res = -EINVAL; 2321 goto exit; 2322 } else if (dst->family != AF_TIPC) { 2323 res = -EINVAL; 2324 } 2325 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2326 res = -EINVAL; 2327 if (res) 2328 goto exit; 2329 2330 /* DGRAM/RDM connect(), just save the destaddr */ 2331 if (tipc_sk_type_connectionless(sk)) { 2332 memcpy(&tsk->peer, dest, destlen); 2333 goto exit; 2334 } 2335 2336 previous = sk->sk_state; 2337 2338 switch (sk->sk_state) { 2339 case TIPC_OPEN: 2340 /* Send a 'SYN-' to destination */ 2341 m.msg_name = dest; 2342 m.msg_namelen = destlen; 2343 2344 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2345 * indicate send_msg() is never blocked. 2346 */ 2347 if (!timeout) 2348 m.msg_flags = MSG_DONTWAIT; 2349 2350 res = __tipc_sendmsg(sock, &m, 0); 2351 if ((res < 0) && (res != -EWOULDBLOCK)) 2352 goto exit; 2353 2354 /* Just entered TIPC_CONNECTING state; the only 2355 * difference is that return value in non-blocking 2356 * case is EINPROGRESS, rather than EALREADY. 2357 */ 2358 res = -EINPROGRESS; 2359 /* fall thru' */ 2360 case TIPC_CONNECTING: 2361 if (!timeout) { 2362 if (previous == TIPC_CONNECTING) 2363 res = -EALREADY; 2364 goto exit; 2365 } 2366 timeout = msecs_to_jiffies(timeout); 2367 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2368 res = tipc_wait_for_connect(sock, &timeout); 2369 break; 2370 case TIPC_ESTABLISHED: 2371 res = -EISCONN; 2372 break; 2373 default: 2374 res = -EINVAL; 2375 } 2376 2377 exit: 2378 release_sock(sk); 2379 return res; 2380 } 2381 2382 /** 2383 * tipc_listen - allow socket to listen for incoming connections 2384 * @sock: socket structure 2385 * @len: (unused) 2386 * 2387 * Returns 0 on success, errno otherwise 2388 */ 2389 static int tipc_listen(struct socket *sock, int len) 2390 { 2391 struct sock *sk = sock->sk; 2392 int res; 2393 2394 lock_sock(sk); 2395 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2396 release_sock(sk); 2397 2398 return res; 2399 } 2400 2401 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2402 { 2403 struct sock *sk = sock->sk; 2404 DEFINE_WAIT(wait); 2405 int err; 2406 2407 /* True wake-one mechanism for incoming connections: only 2408 * one process gets woken up, not the 'whole herd'. 2409 * Since we do not 'race & poll' for established sockets 2410 * anymore, the common case will execute the loop only once. 2411 */ 2412 for (;;) { 2413 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2414 TASK_INTERRUPTIBLE); 2415 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2416 release_sock(sk); 2417 timeo = schedule_timeout(timeo); 2418 lock_sock(sk); 2419 } 2420 err = 0; 2421 if (!skb_queue_empty(&sk->sk_receive_queue)) 2422 break; 2423 err = -EAGAIN; 2424 if (!timeo) 2425 break; 2426 err = sock_intr_errno(timeo); 2427 if (signal_pending(current)) 2428 break; 2429 } 2430 finish_wait(sk_sleep(sk), &wait); 2431 return err; 2432 } 2433 2434 /** 2435 * tipc_accept - wait for connection request 2436 * @sock: listening socket 2437 * @newsock: new socket that is to be connected 2438 * @flags: file-related flags associated with socket 2439 * 2440 * Returns 0 on success, errno otherwise 2441 */ 2442 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2443 bool kern) 2444 { 2445 struct sock *new_sk, *sk = sock->sk; 2446 struct sk_buff *buf; 2447 struct tipc_sock *new_tsock; 2448 struct tipc_msg *msg; 2449 long timeo; 2450 int res; 2451 2452 lock_sock(sk); 2453 2454 if (sk->sk_state != TIPC_LISTEN) { 2455 res = -EINVAL; 2456 goto exit; 2457 } 2458 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2459 res = tipc_wait_for_accept(sock, timeo); 2460 if (res) 2461 goto exit; 2462 2463 buf = skb_peek(&sk->sk_receive_queue); 2464 2465 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2466 if (res) 2467 goto exit; 2468 security_sk_clone(sock->sk, new_sock->sk); 2469 2470 new_sk = new_sock->sk; 2471 new_tsock = tipc_sk(new_sk); 2472 msg = buf_msg(buf); 2473 2474 /* we lock on new_sk; but lockdep sees the lock on sk */ 2475 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2476 2477 /* 2478 * Reject any stray messages received by new socket 2479 * before the socket lock was taken (very, very unlikely) 2480 */ 2481 tsk_rej_rx_queue(new_sk); 2482 2483 /* Connect new socket to it's peer */ 2484 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2485 2486 tsk_set_importance(new_tsock, msg_importance(msg)); 2487 if (msg_named(msg)) { 2488 new_tsock->conn_type = msg_nametype(msg); 2489 new_tsock->conn_instance = msg_nameinst(msg); 2490 } 2491 2492 /* 2493 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2494 * Respond to 'SYN+' by queuing it on new socket. 2495 */ 2496 if (!msg_data_sz(msg)) { 2497 struct msghdr m = {NULL,}; 2498 2499 tsk_advance_rx_queue(sk); 2500 __tipc_sendstream(new_sock, &m, 0); 2501 } else { 2502 __skb_dequeue(&sk->sk_receive_queue); 2503 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2504 skb_set_owner_r(buf, new_sk); 2505 } 2506 release_sock(new_sk); 2507 exit: 2508 release_sock(sk); 2509 return res; 2510 } 2511 2512 /** 2513 * tipc_shutdown - shutdown socket connection 2514 * @sock: socket structure 2515 * @how: direction to close (must be SHUT_RDWR) 2516 * 2517 * Terminates connection (if necessary), then purges socket's receive queue. 2518 * 2519 * Returns 0 on success, errno otherwise 2520 */ 2521 static int tipc_shutdown(struct socket *sock, int how) 2522 { 2523 struct sock *sk = sock->sk; 2524 int res; 2525 2526 if (how != SHUT_RDWR) 2527 return -EINVAL; 2528 2529 lock_sock(sk); 2530 2531 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2532 sk->sk_shutdown = SEND_SHUTDOWN; 2533 2534 if (sk->sk_state == TIPC_DISCONNECTING) { 2535 /* Discard any unreceived messages */ 2536 __skb_queue_purge(&sk->sk_receive_queue); 2537 2538 /* Wake up anyone sleeping in poll */ 2539 sk->sk_state_change(sk); 2540 res = 0; 2541 } else { 2542 res = -ENOTCONN; 2543 } 2544 2545 release_sock(sk); 2546 return res; 2547 } 2548 2549 static void tipc_sk_timeout(struct timer_list *t) 2550 { 2551 struct sock *sk = from_timer(sk, t, sk_timer); 2552 struct tipc_sock *tsk = tipc_sk(sk); 2553 u32 peer_port = tsk_peer_port(tsk); 2554 u32 peer_node = tsk_peer_node(tsk); 2555 u32 own_node = tsk_own_node(tsk); 2556 u32 own_port = tsk->portid; 2557 struct net *net = sock_net(sk); 2558 struct sk_buff *skb = NULL; 2559 2560 bh_lock_sock(sk); 2561 if (!tipc_sk_connected(sk)) 2562 goto exit; 2563 2564 /* Try again later if socket is busy */ 2565 if (sock_owned_by_user(sk)) { 2566 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2567 goto exit; 2568 } 2569 2570 if (tsk->probe_unacked) { 2571 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2572 tipc_node_remove_conn(net, peer_node, peer_port); 2573 sk->sk_state_change(sk); 2574 goto exit; 2575 } 2576 /* Send new probe */ 2577 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2578 peer_node, own_node, peer_port, own_port, 2579 TIPC_OK); 2580 tsk->probe_unacked = true; 2581 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2582 exit: 2583 bh_unlock_sock(sk); 2584 if (skb) 2585 tipc_node_xmit_skb(net, skb, peer_node, own_port); 2586 sock_put(sk); 2587 } 2588 2589 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2590 struct tipc_name_seq const *seq) 2591 { 2592 struct sock *sk = &tsk->sk; 2593 struct net *net = sock_net(sk); 2594 struct publication *publ; 2595 u32 key; 2596 2597 if (scope != TIPC_NODE_SCOPE) 2598 scope = TIPC_CLUSTER_SCOPE; 2599 2600 if (tipc_sk_connected(sk)) 2601 return -EINVAL; 2602 key = tsk->portid + tsk->pub_count + 1; 2603 if (key == tsk->portid) 2604 return -EADDRINUSE; 2605 2606 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2607 scope, tsk->portid, key); 2608 if (unlikely(!publ)) 2609 return -EINVAL; 2610 2611 list_add(&publ->binding_sock, &tsk->publications); 2612 tsk->pub_count++; 2613 tsk->published = 1; 2614 return 0; 2615 } 2616 2617 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2618 struct tipc_name_seq const *seq) 2619 { 2620 struct net *net = sock_net(&tsk->sk); 2621 struct publication *publ; 2622 struct publication *safe; 2623 int rc = -EINVAL; 2624 2625 if (scope != TIPC_NODE_SCOPE) 2626 scope = TIPC_CLUSTER_SCOPE; 2627 2628 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { 2629 if (seq) { 2630 if (publ->scope != scope) 2631 continue; 2632 if (publ->type != seq->type) 2633 continue; 2634 if (publ->lower != seq->lower) 2635 continue; 2636 if (publ->upper != seq->upper) 2637 break; 2638 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2639 publ->upper, publ->key); 2640 rc = 0; 2641 break; 2642 } 2643 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2644 publ->upper, publ->key); 2645 rc = 0; 2646 } 2647 if (list_empty(&tsk->publications)) 2648 tsk->published = 0; 2649 return rc; 2650 } 2651 2652 /* tipc_sk_reinit: set non-zero address in all existing sockets 2653 * when we go from standalone to network mode. 2654 */ 2655 void tipc_sk_reinit(struct net *net) 2656 { 2657 struct tipc_net *tn = net_generic(net, tipc_net_id); 2658 struct rhashtable_iter iter; 2659 struct tipc_sock *tsk; 2660 struct tipc_msg *msg; 2661 2662 rhashtable_walk_enter(&tn->sk_rht, &iter); 2663 2664 do { 2665 rhashtable_walk_start(&iter); 2666 2667 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2668 spin_lock_bh(&tsk->sk.sk_lock.slock); 2669 msg = &tsk->phdr; 2670 msg_set_prevnode(msg, tipc_own_addr(net)); 2671 msg_set_orignode(msg, tipc_own_addr(net)); 2672 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2673 } 2674 2675 rhashtable_walk_stop(&iter); 2676 } while (tsk == ERR_PTR(-EAGAIN)); 2677 } 2678 2679 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2680 { 2681 struct tipc_net *tn = net_generic(net, tipc_net_id); 2682 struct tipc_sock *tsk; 2683 2684 rcu_read_lock(); 2685 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2686 if (tsk) 2687 sock_hold(&tsk->sk); 2688 rcu_read_unlock(); 2689 2690 return tsk; 2691 } 2692 2693 static int tipc_sk_insert(struct tipc_sock *tsk) 2694 { 2695 struct sock *sk = &tsk->sk; 2696 struct net *net = sock_net(sk); 2697 struct tipc_net *tn = net_generic(net, tipc_net_id); 2698 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2699 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2700 2701 while (remaining--) { 2702 portid++; 2703 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2704 portid = TIPC_MIN_PORT; 2705 tsk->portid = portid; 2706 sock_hold(&tsk->sk); 2707 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2708 tsk_rht_params)) 2709 return 0; 2710 sock_put(&tsk->sk); 2711 } 2712 2713 return -1; 2714 } 2715 2716 static void tipc_sk_remove(struct tipc_sock *tsk) 2717 { 2718 struct sock *sk = &tsk->sk; 2719 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2720 2721 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2722 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2723 __sock_put(sk); 2724 } 2725 } 2726 2727 static const struct rhashtable_params tsk_rht_params = { 2728 .nelem_hint = 192, 2729 .head_offset = offsetof(struct tipc_sock, node), 2730 .key_offset = offsetof(struct tipc_sock, portid), 2731 .key_len = sizeof(u32), /* portid */ 2732 .max_size = 1048576, 2733 .min_size = 256, 2734 .automatic_shrinking = true, 2735 }; 2736 2737 int tipc_sk_rht_init(struct net *net) 2738 { 2739 struct tipc_net *tn = net_generic(net, tipc_net_id); 2740 2741 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2742 } 2743 2744 void tipc_sk_rht_destroy(struct net *net) 2745 { 2746 struct tipc_net *tn = net_generic(net, tipc_net_id); 2747 2748 /* Wait for socket readers to complete */ 2749 synchronize_net(); 2750 2751 rhashtable_destroy(&tn->sk_rht); 2752 } 2753 2754 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2755 { 2756 struct net *net = sock_net(&tsk->sk); 2757 struct tipc_group *grp = tsk->group; 2758 struct tipc_msg *hdr = &tsk->phdr; 2759 struct tipc_name_seq seq; 2760 int rc; 2761 2762 if (mreq->type < TIPC_RESERVED_TYPES) 2763 return -EACCES; 2764 if (mreq->scope > TIPC_NODE_SCOPE) 2765 return -EINVAL; 2766 if (grp) 2767 return -EACCES; 2768 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2769 if (!grp) 2770 return -ENOMEM; 2771 tsk->group = grp; 2772 msg_set_lookup_scope(hdr, mreq->scope); 2773 msg_set_nametype(hdr, mreq->type); 2774 msg_set_dest_droppable(hdr, true); 2775 seq.type = mreq->type; 2776 seq.lower = mreq->instance; 2777 seq.upper = seq.lower; 2778 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2779 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2780 if (rc) { 2781 tipc_group_delete(net, grp); 2782 tsk->group = NULL; 2783 return rc; 2784 } 2785 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2786 tsk->mc_method.rcast = true; 2787 tsk->mc_method.mandatory = true; 2788 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2789 return rc; 2790 } 2791 2792 static int tipc_sk_leave(struct tipc_sock *tsk) 2793 { 2794 struct net *net = sock_net(&tsk->sk); 2795 struct tipc_group *grp = tsk->group; 2796 struct tipc_name_seq seq; 2797 int scope; 2798 2799 if (!grp) 2800 return -EINVAL; 2801 tipc_group_self(grp, &seq, &scope); 2802 tipc_group_delete(net, grp); 2803 tsk->group = NULL; 2804 tipc_sk_withdraw(tsk, scope, &seq); 2805 return 0; 2806 } 2807 2808 /** 2809 * tipc_setsockopt - set socket option 2810 * @sock: socket structure 2811 * @lvl: option level 2812 * @opt: option identifier 2813 * @ov: pointer to new option value 2814 * @ol: length of option value 2815 * 2816 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2817 * (to ease compatibility). 2818 * 2819 * Returns 0 on success, errno otherwise 2820 */ 2821 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2822 char __user *ov, unsigned int ol) 2823 { 2824 struct sock *sk = sock->sk; 2825 struct tipc_sock *tsk = tipc_sk(sk); 2826 struct tipc_group_req mreq; 2827 u32 value = 0; 2828 int res = 0; 2829 2830 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2831 return 0; 2832 if (lvl != SOL_TIPC) 2833 return -ENOPROTOOPT; 2834 2835 switch (opt) { 2836 case TIPC_IMPORTANCE: 2837 case TIPC_SRC_DROPPABLE: 2838 case TIPC_DEST_DROPPABLE: 2839 case TIPC_CONN_TIMEOUT: 2840 if (ol < sizeof(value)) 2841 return -EINVAL; 2842 if (get_user(value, (u32 __user *)ov)) 2843 return -EFAULT; 2844 break; 2845 case TIPC_GROUP_JOIN: 2846 if (ol < sizeof(mreq)) 2847 return -EINVAL; 2848 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2849 return -EFAULT; 2850 break; 2851 default: 2852 if (ov || ol) 2853 return -EINVAL; 2854 } 2855 2856 lock_sock(sk); 2857 2858 switch (opt) { 2859 case TIPC_IMPORTANCE: 2860 res = tsk_set_importance(tsk, value); 2861 break; 2862 case TIPC_SRC_DROPPABLE: 2863 if (sock->type != SOCK_STREAM) 2864 tsk_set_unreliable(tsk, value); 2865 else 2866 res = -ENOPROTOOPT; 2867 break; 2868 case TIPC_DEST_DROPPABLE: 2869 tsk_set_unreturnable(tsk, value); 2870 break; 2871 case TIPC_CONN_TIMEOUT: 2872 tipc_sk(sk)->conn_timeout = value; 2873 break; 2874 case TIPC_MCAST_BROADCAST: 2875 tsk->mc_method.rcast = false; 2876 tsk->mc_method.mandatory = true; 2877 break; 2878 case TIPC_MCAST_REPLICAST: 2879 tsk->mc_method.rcast = true; 2880 tsk->mc_method.mandatory = true; 2881 break; 2882 case TIPC_GROUP_JOIN: 2883 res = tipc_sk_join(tsk, &mreq); 2884 break; 2885 case TIPC_GROUP_LEAVE: 2886 res = tipc_sk_leave(tsk); 2887 break; 2888 default: 2889 res = -EINVAL; 2890 } 2891 2892 release_sock(sk); 2893 2894 return res; 2895 } 2896 2897 /** 2898 * tipc_getsockopt - get socket option 2899 * @sock: socket structure 2900 * @lvl: option level 2901 * @opt: option identifier 2902 * @ov: receptacle for option value 2903 * @ol: receptacle for length of option value 2904 * 2905 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 2906 * (to ease compatibility). 2907 * 2908 * Returns 0 on success, errno otherwise 2909 */ 2910 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 2911 char __user *ov, int __user *ol) 2912 { 2913 struct sock *sk = sock->sk; 2914 struct tipc_sock *tsk = tipc_sk(sk); 2915 struct tipc_name_seq seq; 2916 int len, scope; 2917 u32 value; 2918 int res; 2919 2920 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2921 return put_user(0, ol); 2922 if (lvl != SOL_TIPC) 2923 return -ENOPROTOOPT; 2924 res = get_user(len, ol); 2925 if (res) 2926 return res; 2927 2928 lock_sock(sk); 2929 2930 switch (opt) { 2931 case TIPC_IMPORTANCE: 2932 value = tsk_importance(tsk); 2933 break; 2934 case TIPC_SRC_DROPPABLE: 2935 value = tsk_unreliable(tsk); 2936 break; 2937 case TIPC_DEST_DROPPABLE: 2938 value = tsk_unreturnable(tsk); 2939 break; 2940 case TIPC_CONN_TIMEOUT: 2941 value = tsk->conn_timeout; 2942 /* no need to set "res", since already 0 at this point */ 2943 break; 2944 case TIPC_NODE_RECVQ_DEPTH: 2945 value = 0; /* was tipc_queue_size, now obsolete */ 2946 break; 2947 case TIPC_SOCK_RECVQ_DEPTH: 2948 value = skb_queue_len(&sk->sk_receive_queue); 2949 break; 2950 case TIPC_GROUP_JOIN: 2951 seq.type = 0; 2952 if (tsk->group) 2953 tipc_group_self(tsk->group, &seq, &scope); 2954 value = seq.type; 2955 break; 2956 default: 2957 res = -EINVAL; 2958 } 2959 2960 release_sock(sk); 2961 2962 if (res) 2963 return res; /* "get" failed */ 2964 2965 if (len < sizeof(value)) 2966 return -EINVAL; 2967 2968 if (copy_to_user(ov, &value, sizeof(value))) 2969 return -EFAULT; 2970 2971 return put_user(sizeof(value), ol); 2972 } 2973 2974 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2975 { 2976 struct sock *sk = sock->sk; 2977 struct tipc_sioc_ln_req lnr; 2978 void __user *argp = (void __user *)arg; 2979 2980 switch (cmd) { 2981 case SIOCGETLINKNAME: 2982 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2983 return -EFAULT; 2984 if (!tipc_node_get_linkname(sock_net(sk), 2985 lnr.bearer_id & 0xffff, lnr.peer, 2986 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2987 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2988 return -EFAULT; 2989 return 0; 2990 } 2991 return -EADDRNOTAVAIL; 2992 default: 2993 return -ENOIOCTLCMD; 2994 } 2995 } 2996 2997 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 2998 { 2999 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 3000 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 3001 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 3002 3003 tsk1->peer.family = AF_TIPC; 3004 tsk1->peer.addrtype = TIPC_ADDR_ID; 3005 tsk1->peer.scope = TIPC_NODE_SCOPE; 3006 tsk1->peer.addr.id.ref = tsk2->portid; 3007 tsk1->peer.addr.id.node = onode; 3008 tsk2->peer.family = AF_TIPC; 3009 tsk2->peer.addrtype = TIPC_ADDR_ID; 3010 tsk2->peer.scope = TIPC_NODE_SCOPE; 3011 tsk2->peer.addr.id.ref = tsk1->portid; 3012 tsk2->peer.addr.id.node = onode; 3013 3014 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3015 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3016 return 0; 3017 } 3018 3019 /* Protocol switches for the various types of TIPC sockets */ 3020 3021 static const struct proto_ops msg_ops = { 3022 .owner = THIS_MODULE, 3023 .family = AF_TIPC, 3024 .release = tipc_release, 3025 .bind = tipc_bind, 3026 .connect = tipc_connect, 3027 .socketpair = tipc_socketpair, 3028 .accept = sock_no_accept, 3029 .getname = tipc_getname, 3030 .poll = tipc_poll, 3031 .ioctl = tipc_ioctl, 3032 .listen = sock_no_listen, 3033 .shutdown = tipc_shutdown, 3034 .setsockopt = tipc_setsockopt, 3035 .getsockopt = tipc_getsockopt, 3036 .sendmsg = tipc_sendmsg, 3037 .recvmsg = tipc_recvmsg, 3038 .mmap = sock_no_mmap, 3039 .sendpage = sock_no_sendpage 3040 }; 3041 3042 static const struct proto_ops packet_ops = { 3043 .owner = THIS_MODULE, 3044 .family = AF_TIPC, 3045 .release = tipc_release, 3046 .bind = tipc_bind, 3047 .connect = tipc_connect, 3048 .socketpair = tipc_socketpair, 3049 .accept = tipc_accept, 3050 .getname = tipc_getname, 3051 .poll = tipc_poll, 3052 .ioctl = tipc_ioctl, 3053 .listen = tipc_listen, 3054 .shutdown = tipc_shutdown, 3055 .setsockopt = tipc_setsockopt, 3056 .getsockopt = tipc_getsockopt, 3057 .sendmsg = tipc_send_packet, 3058 .recvmsg = tipc_recvmsg, 3059 .mmap = sock_no_mmap, 3060 .sendpage = sock_no_sendpage 3061 }; 3062 3063 static const struct proto_ops stream_ops = { 3064 .owner = THIS_MODULE, 3065 .family = AF_TIPC, 3066 .release = tipc_release, 3067 .bind = tipc_bind, 3068 .connect = tipc_connect, 3069 .socketpair = tipc_socketpair, 3070 .accept = tipc_accept, 3071 .getname = tipc_getname, 3072 .poll = tipc_poll, 3073 .ioctl = tipc_ioctl, 3074 .listen = tipc_listen, 3075 .shutdown = tipc_shutdown, 3076 .setsockopt = tipc_setsockopt, 3077 .getsockopt = tipc_getsockopt, 3078 .sendmsg = tipc_sendstream, 3079 .recvmsg = tipc_recvstream, 3080 .mmap = sock_no_mmap, 3081 .sendpage = sock_no_sendpage 3082 }; 3083 3084 static const struct net_proto_family tipc_family_ops = { 3085 .owner = THIS_MODULE, 3086 .family = AF_TIPC, 3087 .create = tipc_sk_create 3088 }; 3089 3090 static struct proto tipc_proto = { 3091 .name = "TIPC", 3092 .owner = THIS_MODULE, 3093 .obj_size = sizeof(struct tipc_sock), 3094 .sysctl_rmem = sysctl_tipc_rmem 3095 }; 3096 3097 /** 3098 * tipc_socket_init - initialize TIPC socket interface 3099 * 3100 * Returns 0 on success, errno otherwise 3101 */ 3102 int tipc_socket_init(void) 3103 { 3104 int res; 3105 3106 res = proto_register(&tipc_proto, 1); 3107 if (res) { 3108 pr_err("Failed to register TIPC protocol type\n"); 3109 goto out; 3110 } 3111 3112 res = sock_register(&tipc_family_ops); 3113 if (res) { 3114 pr_err("Failed to register TIPC socket type\n"); 3115 proto_unregister(&tipc_proto); 3116 goto out; 3117 } 3118 out: 3119 return res; 3120 } 3121 3122 /** 3123 * tipc_socket_stop - stop TIPC socket interface 3124 */ 3125 void tipc_socket_stop(void) 3126 { 3127 sock_unregister(tipc_family_ops.family); 3128 proto_unregister(&tipc_proto); 3129 } 3130 3131 /* Caller should hold socket lock for the passed tipc socket. */ 3132 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3133 { 3134 u32 peer_node; 3135 u32 peer_port; 3136 struct nlattr *nest; 3137 3138 peer_node = tsk_peer_node(tsk); 3139 peer_port = tsk_peer_port(tsk); 3140 3141 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3142 3143 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3144 goto msg_full; 3145 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3146 goto msg_full; 3147 3148 if (tsk->conn_type != 0) { 3149 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3150 goto msg_full; 3151 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3152 goto msg_full; 3153 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3154 goto msg_full; 3155 } 3156 nla_nest_end(skb, nest); 3157 3158 return 0; 3159 3160 msg_full: 3161 nla_nest_cancel(skb, nest); 3162 3163 return -EMSGSIZE; 3164 } 3165 3166 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock 3167 *tsk) 3168 { 3169 struct net *net = sock_net(skb->sk); 3170 struct sock *sk = &tsk->sk; 3171 3172 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || 3173 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) 3174 return -EMSGSIZE; 3175 3176 if (tipc_sk_connected(sk)) { 3177 if (__tipc_nl_add_sk_con(skb, tsk)) 3178 return -EMSGSIZE; 3179 } else if (!list_empty(&tsk->publications)) { 3180 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3181 return -EMSGSIZE; 3182 } 3183 return 0; 3184 } 3185 3186 /* Caller should hold socket lock for the passed tipc socket. */ 3187 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3188 struct tipc_sock *tsk) 3189 { 3190 struct nlattr *attrs; 3191 void *hdr; 3192 3193 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3194 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3195 if (!hdr) 3196 goto msg_cancel; 3197 3198 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3199 if (!attrs) 3200 goto genlmsg_cancel; 3201 3202 if (__tipc_nl_add_sk_info(skb, tsk)) 3203 goto attr_msg_cancel; 3204 3205 nla_nest_end(skb, attrs); 3206 genlmsg_end(skb, hdr); 3207 3208 return 0; 3209 3210 attr_msg_cancel: 3211 nla_nest_cancel(skb, attrs); 3212 genlmsg_cancel: 3213 genlmsg_cancel(skb, hdr); 3214 msg_cancel: 3215 return -EMSGSIZE; 3216 } 3217 3218 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 3219 int (*skb_handler)(struct sk_buff *skb, 3220 struct netlink_callback *cb, 3221 struct tipc_sock *tsk)) 3222 { 3223 struct net *net = sock_net(skb->sk); 3224 struct tipc_net *tn = tipc_net(net); 3225 const struct bucket_table *tbl; 3226 u32 prev_portid = cb->args[1]; 3227 u32 tbl_id = cb->args[0]; 3228 struct rhash_head *pos; 3229 struct tipc_sock *tsk; 3230 int err; 3231 3232 rcu_read_lock(); 3233 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); 3234 for (; tbl_id < tbl->size; tbl_id++) { 3235 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { 3236 spin_lock_bh(&tsk->sk.sk_lock.slock); 3237 if (prev_portid && prev_portid != tsk->portid) { 3238 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3239 continue; 3240 } 3241 3242 err = skb_handler(skb, cb, tsk); 3243 if (err) { 3244 prev_portid = tsk->portid; 3245 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3246 goto out; 3247 } 3248 3249 prev_portid = 0; 3250 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3251 } 3252 } 3253 out: 3254 rcu_read_unlock(); 3255 cb->args[0] = tbl_id; 3256 cb->args[1] = prev_portid; 3257 3258 return skb->len; 3259 } 3260 EXPORT_SYMBOL(tipc_nl_sk_walk); 3261 3262 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3263 struct tipc_sock *tsk, u32 sk_filter_state, 3264 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3265 { 3266 struct sock *sk = &tsk->sk; 3267 struct nlattr *attrs; 3268 struct nlattr *stat; 3269 3270 /*filter response w.r.t sk_state*/ 3271 if (!(sk_filter_state & (1 << sk->sk_state))) 3272 return 0; 3273 3274 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3275 if (!attrs) 3276 goto msg_cancel; 3277 3278 if (__tipc_nl_add_sk_info(skb, tsk)) 3279 goto attr_msg_cancel; 3280 3281 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || 3282 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3283 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3284 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3285 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3286 sock_i_uid(sk))) || 3287 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3288 tipc_diag_gen_cookie(sk), 3289 TIPC_NLA_SOCK_PAD)) 3290 goto attr_msg_cancel; 3291 3292 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); 3293 if (!stat) 3294 goto attr_msg_cancel; 3295 3296 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3297 skb_queue_len(&sk->sk_receive_queue)) || 3298 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3299 skb_queue_len(&sk->sk_write_queue)) || 3300 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3301 atomic_read(&sk->sk_drops))) 3302 goto stat_msg_cancel; 3303 3304 if (tsk->cong_link_cnt && 3305 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) 3306 goto stat_msg_cancel; 3307 3308 if (tsk_conn_cong(tsk) && 3309 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) 3310 goto stat_msg_cancel; 3311 3312 nla_nest_end(skb, stat); 3313 nla_nest_end(skb, attrs); 3314 3315 return 0; 3316 3317 stat_msg_cancel: 3318 nla_nest_cancel(skb, stat); 3319 attr_msg_cancel: 3320 nla_nest_cancel(skb, attrs); 3321 msg_cancel: 3322 return -EMSGSIZE; 3323 } 3324 EXPORT_SYMBOL(tipc_sk_fill_sock_diag); 3325 3326 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3327 { 3328 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); 3329 } 3330 3331 /* Caller should hold socket lock for the passed tipc socket. */ 3332 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3333 struct netlink_callback *cb, 3334 struct publication *publ) 3335 { 3336 void *hdr; 3337 struct nlattr *attrs; 3338 3339 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3340 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3341 if (!hdr) 3342 goto msg_cancel; 3343 3344 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 3345 if (!attrs) 3346 goto genlmsg_cancel; 3347 3348 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3349 goto attr_msg_cancel; 3350 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3351 goto attr_msg_cancel; 3352 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3353 goto attr_msg_cancel; 3354 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3355 goto attr_msg_cancel; 3356 3357 nla_nest_end(skb, attrs); 3358 genlmsg_end(skb, hdr); 3359 3360 return 0; 3361 3362 attr_msg_cancel: 3363 nla_nest_cancel(skb, attrs); 3364 genlmsg_cancel: 3365 genlmsg_cancel(skb, hdr); 3366 msg_cancel: 3367 return -EMSGSIZE; 3368 } 3369 3370 /* Caller should hold socket lock for the passed tipc socket. */ 3371 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3372 struct netlink_callback *cb, 3373 struct tipc_sock *tsk, u32 *last_publ) 3374 { 3375 int err; 3376 struct publication *p; 3377 3378 if (*last_publ) { 3379 list_for_each_entry(p, &tsk->publications, binding_sock) { 3380 if (p->key == *last_publ) 3381 break; 3382 } 3383 if (p->key != *last_publ) { 3384 /* We never set seq or call nl_dump_check_consistent() 3385 * this means that setting prev_seq here will cause the 3386 * consistence check to fail in the netlink callback 3387 * handler. Resulting in the last NLMSG_DONE message 3388 * having the NLM_F_DUMP_INTR flag set. 3389 */ 3390 cb->prev_seq = 1; 3391 *last_publ = 0; 3392 return -EPIPE; 3393 } 3394 } else { 3395 p = list_first_entry(&tsk->publications, struct publication, 3396 binding_sock); 3397 } 3398 3399 list_for_each_entry_from(p, &tsk->publications, binding_sock) { 3400 err = __tipc_nl_add_sk_publ(skb, cb, p); 3401 if (err) { 3402 *last_publ = p->key; 3403 return err; 3404 } 3405 } 3406 *last_publ = 0; 3407 3408 return 0; 3409 } 3410 3411 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3412 { 3413 int err; 3414 u32 tsk_portid = cb->args[0]; 3415 u32 last_publ = cb->args[1]; 3416 u32 done = cb->args[2]; 3417 struct net *net = sock_net(skb->sk); 3418 struct tipc_sock *tsk; 3419 3420 if (!tsk_portid) { 3421 struct nlattr **attrs; 3422 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3423 3424 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3425 if (err) 3426 return err; 3427 3428 if (!attrs[TIPC_NLA_SOCK]) 3429 return -EINVAL; 3430 3431 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 3432 attrs[TIPC_NLA_SOCK], 3433 tipc_nl_sock_policy, NULL); 3434 if (err) 3435 return err; 3436 3437 if (!sock[TIPC_NLA_SOCK_REF]) 3438 return -EINVAL; 3439 3440 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3441 } 3442 3443 if (done) 3444 return 0; 3445 3446 tsk = tipc_sk_lookup(net, tsk_portid); 3447 if (!tsk) 3448 return -EINVAL; 3449 3450 lock_sock(&tsk->sk); 3451 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3452 if (!err) 3453 done = 1; 3454 release_sock(&tsk->sk); 3455 sock_put(&tsk->sk); 3456 3457 cb->args[0] = tsk_portid; 3458 cb->args[1] = last_publ; 3459 cb->args[2] = done; 3460 3461 return skb->len; 3462 } 3463