1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 52 #define TIPC_FWD_MSG 1 53 #define TIPC_MAX_PORT 0xffffffff 54 #define TIPC_MIN_PORT 1 55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 56 57 enum { 58 TIPC_LISTEN = TCP_LISTEN, 59 TIPC_ESTABLISHED = TCP_ESTABLISHED, 60 TIPC_OPEN = TCP_CLOSE, 61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 62 TIPC_CONNECTING = TCP_SYN_SENT, 63 }; 64 65 struct sockaddr_pair { 66 struct sockaddr_tipc sock; 67 struct sockaddr_tipc member; 68 }; 69 70 /** 71 * struct tipc_sock - TIPC socket structure 72 * @sk: socket - interacts with 'port' and with user via the socket API 73 * @conn_type: TIPC type used when connection was established 74 * @conn_instance: TIPC instance used when connection was established 75 * @published: non-zero if port has one or more associated names 76 * @max_pkt: maximum packet size "hint" used when building messages sent by port 77 * @portid: unique port identity in TIPC socket hash table 78 * @phdr: preformatted message header used when sending messages 79 * #cong_links: list of congested links 80 * @publications: list of publications for port 81 * @blocking_link: address of the congested link we are currently sleeping on 82 * @pub_count: total # of publications port has made during its lifetime 83 * @probing_state: 84 * @conn_timeout: the time we can wait for an unresponded setup request 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 86 * @cong_link_cnt: number of congested links 87 * @snt_unacked: # messages sent by socket, and not yet acked by peer 88 * @rcv_unacked: # messages read by user, but not yet acked back to peer 89 * @peer: 'connected' peer for dgram/rdm 90 * @node: hash table node 91 * @mc_method: cookie for use between socket and broadcast layer 92 * @rcu: rcu struct for tipc_sock 93 */ 94 struct tipc_sock { 95 struct sock sk; 96 u32 conn_type; 97 u32 conn_instance; 98 int published; 99 u32 max_pkt; 100 u32 portid; 101 struct tipc_msg phdr; 102 struct list_head cong_links; 103 struct list_head publications; 104 u32 pub_count; 105 uint conn_timeout; 106 atomic_t dupl_rcvcnt; 107 bool probe_unacked; 108 u16 cong_link_cnt; 109 u16 snt_unacked; 110 u16 snd_win; 111 u16 peer_caps; 112 u16 rcv_unacked; 113 u16 rcv_win; 114 struct sockaddr_tipc peer; 115 struct rhash_head node; 116 struct tipc_mc_method mc_method; 117 struct rcu_head rcu; 118 struct tipc_group *group; 119 bool group_is_open; 120 }; 121 122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 123 static void tipc_data_ready(struct sock *sk); 124 static void tipc_write_space(struct sock *sk); 125 static void tipc_sock_destruct(struct sock *sk); 126 static int tipc_release(struct socket *sock); 127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 128 bool kern); 129 static void tipc_sk_timeout(struct timer_list *t); 130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 131 struct tipc_name_seq const *seq); 132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 133 struct tipc_name_seq const *seq); 134 static int tipc_sk_leave(struct tipc_sock *tsk); 135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 136 static int tipc_sk_insert(struct tipc_sock *tsk); 137 static void tipc_sk_remove(struct tipc_sock *tsk); 138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 140 141 static const struct proto_ops packet_ops; 142 static const struct proto_ops stream_ops; 143 static const struct proto_ops msg_ops; 144 static struct proto tipc_proto; 145 static const struct rhashtable_params tsk_rht_params; 146 147 static u32 tsk_own_node(struct tipc_sock *tsk) 148 { 149 return msg_prevnode(&tsk->phdr); 150 } 151 152 static u32 tsk_peer_node(struct tipc_sock *tsk) 153 { 154 return msg_destnode(&tsk->phdr); 155 } 156 157 static u32 tsk_peer_port(struct tipc_sock *tsk) 158 { 159 return msg_destport(&tsk->phdr); 160 } 161 162 static bool tsk_unreliable(struct tipc_sock *tsk) 163 { 164 return msg_src_droppable(&tsk->phdr) != 0; 165 } 166 167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 168 { 169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 170 } 171 172 static bool tsk_unreturnable(struct tipc_sock *tsk) 173 { 174 return msg_dest_droppable(&tsk->phdr) != 0; 175 } 176 177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 178 { 179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 180 } 181 182 static int tsk_importance(struct tipc_sock *tsk) 183 { 184 return msg_importance(&tsk->phdr); 185 } 186 187 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 188 { 189 if (imp > TIPC_CRITICAL_IMPORTANCE) 190 return -EINVAL; 191 msg_set_importance(&tsk->phdr, (u32)imp); 192 return 0; 193 } 194 195 static struct tipc_sock *tipc_sk(const struct sock *sk) 196 { 197 return container_of(sk, struct tipc_sock, sk); 198 } 199 200 static bool tsk_conn_cong(struct tipc_sock *tsk) 201 { 202 return tsk->snt_unacked > tsk->snd_win; 203 } 204 205 static u16 tsk_blocks(int len) 206 { 207 return ((len / FLOWCTL_BLK_SZ) + 1); 208 } 209 210 /* tsk_blocks(): translate a buffer size in bytes to number of 211 * advertisable blocks, taking into account the ratio truesize(len)/len 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 213 */ 214 static u16 tsk_adv_blocks(int len) 215 { 216 return len / FLOWCTL_BLK_SZ / 4; 217 } 218 219 /* tsk_inc(): increment counter for sent or received data 220 * - If block based flow control is not supported by peer we 221 * fall back to message based ditto, incrementing the counter 222 */ 223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 224 { 225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 226 return ((msglen / FLOWCTL_BLK_SZ) + 1); 227 return 1; 228 } 229 230 /** 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue 232 * 233 * Caller must hold socket lock 234 */ 235 static void tsk_advance_rx_queue(struct sock *sk) 236 { 237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 238 } 239 240 /* tipc_sk_respond() : send response message back to sender 241 */ 242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 243 { 244 u32 selector; 245 u32 dnode; 246 u32 onode = tipc_own_addr(sock_net(sk)); 247 248 if (!tipc_msg_reverse(onode, &skb, err)) 249 return; 250 251 dnode = msg_destnode(buf_msg(skb)); 252 selector = msg_origport(buf_msg(skb)); 253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 254 } 255 256 /** 257 * tsk_rej_rx_queue - reject all buffers in socket receive queue 258 * 259 * Caller must hold socket lock 260 */ 261 static void tsk_rej_rx_queue(struct sock *sk) 262 { 263 struct sk_buff *skb; 264 265 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 267 } 268 269 static bool tipc_sk_connected(struct sock *sk) 270 { 271 return sk->sk_state == TIPC_ESTABLISHED; 272 } 273 274 /* tipc_sk_type_connectionless - check if the socket is datagram socket 275 * @sk: socket 276 * 277 * Returns true if connection less, false otherwise 278 */ 279 static bool tipc_sk_type_connectionless(struct sock *sk) 280 { 281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 282 } 283 284 /* tsk_peer_msg - verify if message was sent by connected port's peer 285 * 286 * Handles cases where the node's network address has changed from 287 * the default of <0.0.0> to its configured setting. 288 */ 289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 290 { 291 struct sock *sk = &tsk->sk; 292 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 293 u32 peer_port = tsk_peer_port(tsk); 294 u32 orig_node; 295 u32 peer_node; 296 297 if (unlikely(!tipc_sk_connected(sk))) 298 return false; 299 300 if (unlikely(msg_origport(msg) != peer_port)) 301 return false; 302 303 orig_node = msg_orignode(msg); 304 peer_node = tsk_peer_node(tsk); 305 306 if (likely(orig_node == peer_node)) 307 return true; 308 309 if (!orig_node && (peer_node == tn->own_addr)) 310 return true; 311 312 if (!peer_node && (orig_node == tn->own_addr)) 313 return true; 314 315 return false; 316 } 317 318 /* tipc_set_sk_state - set the sk_state of the socket 319 * @sk: socket 320 * 321 * Caller must hold socket lock 322 * 323 * Returns 0 on success, errno otherwise 324 */ 325 static int tipc_set_sk_state(struct sock *sk, int state) 326 { 327 int oldsk_state = sk->sk_state; 328 int res = -EINVAL; 329 330 switch (state) { 331 case TIPC_OPEN: 332 res = 0; 333 break; 334 case TIPC_LISTEN: 335 case TIPC_CONNECTING: 336 if (oldsk_state == TIPC_OPEN) 337 res = 0; 338 break; 339 case TIPC_ESTABLISHED: 340 if (oldsk_state == TIPC_CONNECTING || 341 oldsk_state == TIPC_OPEN) 342 res = 0; 343 break; 344 case TIPC_DISCONNECTING: 345 if (oldsk_state == TIPC_CONNECTING || 346 oldsk_state == TIPC_ESTABLISHED) 347 res = 0; 348 break; 349 } 350 351 if (!res) 352 sk->sk_state = state; 353 354 return res; 355 } 356 357 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 358 { 359 struct sock *sk = sock->sk; 360 int err = sock_error(sk); 361 int typ = sock->type; 362 363 if (err) 364 return err; 365 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 366 if (sk->sk_state == TIPC_DISCONNECTING) 367 return -EPIPE; 368 else if (!tipc_sk_connected(sk)) 369 return -ENOTCONN; 370 } 371 if (!*timeout) 372 return -EAGAIN; 373 if (signal_pending(current)) 374 return sock_intr_errno(*timeout); 375 376 return 0; 377 } 378 379 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 380 ({ \ 381 struct sock *sk_; \ 382 int rc_; \ 383 \ 384 while ((rc_ = !(condition_))) { \ 385 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 386 sk_ = (sock_)->sk; \ 387 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 388 if (rc_) \ 389 break; \ 390 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 391 release_sock(sk_); \ 392 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 393 sched_annotate_sleep(); \ 394 lock_sock(sk_); \ 395 remove_wait_queue(sk_sleep(sk_), &wait_); \ 396 } \ 397 rc_; \ 398 }) 399 400 /** 401 * tipc_sk_create - create a TIPC socket 402 * @net: network namespace (must be default network) 403 * @sock: pre-allocated socket structure 404 * @protocol: protocol indicator (must be 0) 405 * @kern: caused by kernel or by userspace? 406 * 407 * This routine creates additional data structures used by the TIPC socket, 408 * initializes them, and links them together. 409 * 410 * Returns 0 on success, errno otherwise 411 */ 412 static int tipc_sk_create(struct net *net, struct socket *sock, 413 int protocol, int kern) 414 { 415 struct tipc_net *tn; 416 const struct proto_ops *ops; 417 struct sock *sk; 418 struct tipc_sock *tsk; 419 struct tipc_msg *msg; 420 421 /* Validate arguments */ 422 if (unlikely(protocol != 0)) 423 return -EPROTONOSUPPORT; 424 425 switch (sock->type) { 426 case SOCK_STREAM: 427 ops = &stream_ops; 428 break; 429 case SOCK_SEQPACKET: 430 ops = &packet_ops; 431 break; 432 case SOCK_DGRAM: 433 case SOCK_RDM: 434 ops = &msg_ops; 435 break; 436 default: 437 return -EPROTOTYPE; 438 } 439 440 /* Allocate socket's protocol area */ 441 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 442 if (sk == NULL) 443 return -ENOMEM; 444 445 tsk = tipc_sk(sk); 446 tsk->max_pkt = MAX_PKT_DEFAULT; 447 INIT_LIST_HEAD(&tsk->publications); 448 INIT_LIST_HEAD(&tsk->cong_links); 449 msg = &tsk->phdr; 450 tn = net_generic(sock_net(sk), tipc_net_id); 451 452 /* Finish initializing socket data structures */ 453 sock->ops = ops; 454 sock_init_data(sock, sk); 455 tipc_set_sk_state(sk, TIPC_OPEN); 456 if (tipc_sk_insert(tsk)) { 457 pr_warn("Socket create failed; port number exhausted\n"); 458 return -EINVAL; 459 } 460 461 /* Ensure tsk is visible before we read own_addr. */ 462 smp_mb(); 463 464 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, 465 NAMED_H_SIZE, 0); 466 467 msg_set_origport(msg, tsk->portid); 468 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 469 sk->sk_shutdown = 0; 470 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 471 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 472 sk->sk_data_ready = tipc_data_ready; 473 sk->sk_write_space = tipc_write_space; 474 sk->sk_destruct = tipc_sock_destruct; 475 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 476 atomic_set(&tsk->dupl_rcvcnt, 0); 477 478 /* Start out with safe limits until we receive an advertised window */ 479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 480 tsk->rcv_win = tsk->snd_win; 481 482 if (tipc_sk_type_connectionless(sk)) { 483 tsk_set_unreturnable(tsk, true); 484 if (sock->type == SOCK_DGRAM) 485 tsk_set_unreliable(tsk, true); 486 } 487 488 return 0; 489 } 490 491 static void tipc_sk_callback(struct rcu_head *head) 492 { 493 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 494 495 sock_put(&tsk->sk); 496 } 497 498 /* Caller should hold socket lock for the socket. */ 499 static void __tipc_shutdown(struct socket *sock, int error) 500 { 501 struct sock *sk = sock->sk; 502 struct tipc_sock *tsk = tipc_sk(sk); 503 struct net *net = sock_net(sk); 504 long timeout = CONN_TIMEOUT_DEFAULT; 505 u32 dnode = tsk_peer_node(tsk); 506 struct sk_buff *skb; 507 508 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 509 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 510 !tsk_conn_cong(tsk))); 511 512 /* Reject all unreceived messages, except on an active connection 513 * (which disconnects locally & sends a 'FIN+' to peer). 514 */ 515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 516 if (TIPC_SKB_CB(skb)->bytes_read) { 517 kfree_skb(skb); 518 continue; 519 } 520 if (!tipc_sk_type_connectionless(sk) && 521 sk->sk_state != TIPC_DISCONNECTING) { 522 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 523 tipc_node_remove_conn(net, dnode, tsk->portid); 524 } 525 tipc_sk_respond(sk, skb, error); 526 } 527 528 if (tipc_sk_type_connectionless(sk)) 529 return; 530 531 if (sk->sk_state != TIPC_DISCONNECTING) { 532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 534 tsk_own_node(tsk), tsk_peer_port(tsk), 535 tsk->portid, error); 536 if (skb) 537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 538 tipc_node_remove_conn(net, dnode, tsk->portid); 539 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 540 } 541 } 542 543 /** 544 * tipc_release - destroy a TIPC socket 545 * @sock: socket to destroy 546 * 547 * This routine cleans up any messages that are still queued on the socket. 548 * For DGRAM and RDM socket types, all queued messages are rejected. 549 * For SEQPACKET and STREAM socket types, the first message is rejected 550 * and any others are discarded. (If the first message on a STREAM socket 551 * is partially-read, it is discarded and the next one is rejected instead.) 552 * 553 * NOTE: Rejected messages are not necessarily returned to the sender! They 554 * are returned or discarded according to the "destination droppable" setting 555 * specified for the message by the sender. 556 * 557 * Returns 0 on success, errno otherwise 558 */ 559 static int tipc_release(struct socket *sock) 560 { 561 struct sock *sk = sock->sk; 562 struct tipc_sock *tsk; 563 564 /* 565 * Exit if socket isn't fully initialized (occurs when a failed accept() 566 * releases a pre-allocated child socket that was never used) 567 */ 568 if (sk == NULL) 569 return 0; 570 571 tsk = tipc_sk(sk); 572 lock_sock(sk); 573 574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 575 sk->sk_shutdown = SHUTDOWN_MASK; 576 tipc_sk_leave(tsk); 577 tipc_sk_withdraw(tsk, 0, NULL); 578 sk_stop_timer(sk, &sk->sk_timer); 579 tipc_sk_remove(tsk); 580 581 /* Reject any messages that accumulated in backlog queue */ 582 release_sock(sk); 583 tipc_dest_list_purge(&tsk->cong_links); 584 tsk->cong_link_cnt = 0; 585 call_rcu(&tsk->rcu, tipc_sk_callback); 586 sock->sk = NULL; 587 588 return 0; 589 } 590 591 /** 592 * tipc_bind - associate or disassocate TIPC name(s) with a socket 593 * @sock: socket structure 594 * @uaddr: socket address describing name(s) and desired operation 595 * @uaddr_len: size of socket address data structure 596 * 597 * Name and name sequence binding is indicated using a positive scope value; 598 * a negative scope value unbinds the specified name. Specifying no name 599 * (i.e. a socket address length of 0) unbinds all names from the socket. 600 * 601 * Returns 0 on success, errno otherwise 602 * 603 * NOTE: This routine doesn't need to take the socket lock since it doesn't 604 * access any non-constant socket information. 605 */ 606 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 607 int uaddr_len) 608 { 609 struct sock *sk = sock->sk; 610 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 611 struct tipc_sock *tsk = tipc_sk(sk); 612 int res = -EINVAL; 613 614 lock_sock(sk); 615 if (unlikely(!uaddr_len)) { 616 res = tipc_sk_withdraw(tsk, 0, NULL); 617 goto exit; 618 } 619 if (tsk->group) { 620 res = -EACCES; 621 goto exit; 622 } 623 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 624 res = -EINVAL; 625 goto exit; 626 } 627 if (addr->family != AF_TIPC) { 628 res = -EAFNOSUPPORT; 629 goto exit; 630 } 631 632 if (addr->addrtype == TIPC_ADDR_NAME) 633 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 634 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 635 res = -EAFNOSUPPORT; 636 goto exit; 637 } 638 639 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 640 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 641 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 642 res = -EACCES; 643 goto exit; 644 } 645 646 res = (addr->scope > 0) ? 647 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 648 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 649 exit: 650 release_sock(sk); 651 return res; 652 } 653 654 /** 655 * tipc_getname - get port ID of socket or peer socket 656 * @sock: socket structure 657 * @uaddr: area for returned socket address 658 * @uaddr_len: area for returned length of socket address 659 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 660 * 661 * Returns 0 on success, errno otherwise 662 * 663 * NOTE: This routine doesn't need to take the socket lock since it only 664 * accesses socket information that is unchanging (or which changes in 665 * a completely predictable manner). 666 */ 667 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 668 int peer) 669 { 670 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 671 struct sock *sk = sock->sk; 672 struct tipc_sock *tsk = tipc_sk(sk); 673 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id); 674 675 memset(addr, 0, sizeof(*addr)); 676 if (peer) { 677 if ((!tipc_sk_connected(sk)) && 678 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 679 return -ENOTCONN; 680 addr->addr.id.ref = tsk_peer_port(tsk); 681 addr->addr.id.node = tsk_peer_node(tsk); 682 } else { 683 addr->addr.id.ref = tsk->portid; 684 addr->addr.id.node = tn->own_addr; 685 } 686 687 addr->addrtype = TIPC_ADDR_ID; 688 addr->family = AF_TIPC; 689 addr->scope = 0; 690 addr->addr.name.domain = 0; 691 692 return sizeof(*addr); 693 } 694 695 /** 696 * tipc_poll - read and possibly block on pollmask 697 * @file: file structure associated with the socket 698 * @sock: socket for which to calculate the poll bits 699 * @wait: ??? 700 * 701 * Returns pollmask value 702 * 703 * COMMENTARY: 704 * It appears that the usual socket locking mechanisms are not useful here 705 * since the pollmask info is potentially out-of-date the moment this routine 706 * exits. TCP and other protocols seem to rely on higher level poll routines 707 * to handle any preventable race conditions, so TIPC will do the same ... 708 * 709 * IMPORTANT: The fact that a read or write operation is indicated does NOT 710 * imply that the operation will succeed, merely that it should be performed 711 * and will not block. 712 */ 713 static __poll_t tipc_poll(struct file *file, struct socket *sock, 714 poll_table *wait) 715 { 716 struct sock *sk = sock->sk; 717 struct tipc_sock *tsk = tipc_sk(sk); 718 __poll_t revents = 0; 719 720 sock_poll_wait(file, sk_sleep(sk), wait); 721 722 if (sk->sk_shutdown & RCV_SHUTDOWN) 723 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 724 if (sk->sk_shutdown == SHUTDOWN_MASK) 725 revents |= EPOLLHUP; 726 727 switch (sk->sk_state) { 728 case TIPC_ESTABLISHED: 729 case TIPC_CONNECTING: 730 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 731 revents |= EPOLLOUT; 732 /* fall thru' */ 733 case TIPC_LISTEN: 734 if (!skb_queue_empty(&sk->sk_receive_queue)) 735 revents |= EPOLLIN | EPOLLRDNORM; 736 break; 737 case TIPC_OPEN: 738 if (tsk->group_is_open && !tsk->cong_link_cnt) 739 revents |= EPOLLOUT; 740 if (!tipc_sk_type_connectionless(sk)) 741 break; 742 if (skb_queue_empty(&sk->sk_receive_queue)) 743 break; 744 revents |= EPOLLIN | EPOLLRDNORM; 745 break; 746 case TIPC_DISCONNECTING: 747 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 748 break; 749 } 750 return revents; 751 } 752 753 /** 754 * tipc_sendmcast - send multicast message 755 * @sock: socket structure 756 * @seq: destination address 757 * @msg: message to send 758 * @dlen: length of data to send 759 * @timeout: timeout to wait for wakeup 760 * 761 * Called from function tipc_sendmsg(), which has done all sanity checks 762 * Returns the number of bytes sent on success, or errno 763 */ 764 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 765 struct msghdr *msg, size_t dlen, long timeout) 766 { 767 struct sock *sk = sock->sk; 768 struct tipc_sock *tsk = tipc_sk(sk); 769 struct tipc_msg *hdr = &tsk->phdr; 770 struct net *net = sock_net(sk); 771 int mtu = tipc_bcast_get_mtu(net); 772 struct tipc_mc_method *method = &tsk->mc_method; 773 struct sk_buff_head pkts; 774 struct tipc_nlist dsts; 775 int rc; 776 777 if (tsk->group) 778 return -EACCES; 779 780 /* Block or return if any destination link is congested */ 781 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 782 if (unlikely(rc)) 783 return rc; 784 785 /* Lookup destination nodes */ 786 tipc_nlist_init(&dsts, tipc_own_addr(net)); 787 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 788 seq->upper, &dsts); 789 if (!dsts.local && !dsts.remote) 790 return -EHOSTUNREACH; 791 792 /* Build message header */ 793 msg_set_type(hdr, TIPC_MCAST_MSG); 794 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 795 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 796 msg_set_destport(hdr, 0); 797 msg_set_destnode(hdr, 0); 798 msg_set_nametype(hdr, seq->type); 799 msg_set_namelower(hdr, seq->lower); 800 msg_set_nameupper(hdr, seq->upper); 801 802 /* Build message as chain of buffers */ 803 skb_queue_head_init(&pkts); 804 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 805 806 /* Send message if build was successful */ 807 if (unlikely(rc == dlen)) 808 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 809 &tsk->cong_link_cnt); 810 811 tipc_nlist_purge(&dsts); 812 813 return rc ? rc : dlen; 814 } 815 816 /** 817 * tipc_send_group_msg - send a message to a member in the group 818 * @net: network namespace 819 * @m: message to send 820 * @mb: group member 821 * @dnode: destination node 822 * @dport: destination port 823 * @dlen: total length of message data 824 */ 825 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 826 struct msghdr *m, struct tipc_member *mb, 827 u32 dnode, u32 dport, int dlen) 828 { 829 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 830 struct tipc_mc_method *method = &tsk->mc_method; 831 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 832 struct tipc_msg *hdr = &tsk->phdr; 833 struct sk_buff_head pkts; 834 int mtu, rc; 835 836 /* Complete message header */ 837 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 838 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 839 msg_set_destport(hdr, dport); 840 msg_set_destnode(hdr, dnode); 841 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 842 843 /* Build message as chain of buffers */ 844 skb_queue_head_init(&pkts); 845 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 846 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 847 if (unlikely(rc != dlen)) 848 return rc; 849 850 /* Send message */ 851 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 852 if (unlikely(rc == -ELINKCONG)) { 853 tipc_dest_push(&tsk->cong_links, dnode, 0); 854 tsk->cong_link_cnt++; 855 } 856 857 /* Update send window */ 858 tipc_group_update_member(mb, blks); 859 860 /* A broadcast sent within next EXPIRE period must follow same path */ 861 method->rcast = true; 862 method->mandatory = true; 863 return dlen; 864 } 865 866 /** 867 * tipc_send_group_unicast - send message to a member in the group 868 * @sock: socket structure 869 * @m: message to send 870 * @dlen: total length of message data 871 * @timeout: timeout to wait for wakeup 872 * 873 * Called from function tipc_sendmsg(), which has done all sanity checks 874 * Returns the number of bytes sent on success, or errno 875 */ 876 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 877 int dlen, long timeout) 878 { 879 struct sock *sk = sock->sk; 880 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 881 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 882 struct tipc_sock *tsk = tipc_sk(sk); 883 struct tipc_group *grp = tsk->group; 884 struct net *net = sock_net(sk); 885 struct tipc_member *mb = NULL; 886 u32 node, port; 887 int rc; 888 889 node = dest->addr.id.node; 890 port = dest->addr.id.ref; 891 if (!port && !node) 892 return -EHOSTUNREACH; 893 894 /* Block or return if destination link or member is congested */ 895 rc = tipc_wait_for_cond(sock, &timeout, 896 !tipc_dest_find(&tsk->cong_links, node, 0) && 897 !tipc_group_cong(grp, node, port, blks, &mb)); 898 if (unlikely(rc)) 899 return rc; 900 901 if (unlikely(!mb)) 902 return -EHOSTUNREACH; 903 904 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 905 906 return rc ? rc : dlen; 907 } 908 909 /** 910 * tipc_send_group_anycast - send message to any member with given identity 911 * @sock: socket structure 912 * @m: message to send 913 * @dlen: total length of message data 914 * @timeout: timeout to wait for wakeup 915 * 916 * Called from function tipc_sendmsg(), which has done all sanity checks 917 * Returns the number of bytes sent on success, or errno 918 */ 919 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 920 int dlen, long timeout) 921 { 922 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 923 struct sock *sk = sock->sk; 924 struct tipc_sock *tsk = tipc_sk(sk); 925 struct list_head *cong_links = &tsk->cong_links; 926 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 927 struct tipc_group *grp = tsk->group; 928 struct tipc_msg *hdr = &tsk->phdr; 929 struct tipc_member *first = NULL; 930 struct tipc_member *mbr = NULL; 931 struct net *net = sock_net(sk); 932 u32 node, port, exclude; 933 struct list_head dsts; 934 u32 type, inst, scope; 935 int lookups = 0; 936 int dstcnt, rc; 937 bool cong; 938 939 INIT_LIST_HEAD(&dsts); 940 941 type = msg_nametype(hdr); 942 inst = dest->addr.name.name.instance; 943 scope = msg_lookup_scope(hdr); 944 exclude = tipc_group_exclude(grp); 945 946 while (++lookups < 4) { 947 first = NULL; 948 949 /* Look for a non-congested destination member, if any */ 950 while (1) { 951 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 952 &dstcnt, exclude, false)) 953 return -EHOSTUNREACH; 954 tipc_dest_pop(&dsts, &node, &port); 955 cong = tipc_group_cong(grp, node, port, blks, &mbr); 956 if (!cong) 957 break; 958 if (mbr == first) 959 break; 960 if (!first) 961 first = mbr; 962 } 963 964 /* Start over if destination was not in member list */ 965 if (unlikely(!mbr)) 966 continue; 967 968 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 969 break; 970 971 /* Block or return if destination link or member is congested */ 972 rc = tipc_wait_for_cond(sock, &timeout, 973 !tipc_dest_find(cong_links, node, 0) && 974 !tipc_group_cong(grp, node, port, 975 blks, &mbr)); 976 if (unlikely(rc)) 977 return rc; 978 979 /* Send, unless destination disappeared while waiting */ 980 if (likely(mbr)) 981 break; 982 } 983 984 if (unlikely(lookups >= 4)) 985 return -EHOSTUNREACH; 986 987 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 988 989 return rc ? rc : dlen; 990 } 991 992 /** 993 * tipc_send_group_bcast - send message to all members in communication group 994 * @sk: socket structure 995 * @m: message to send 996 * @dlen: total length of message data 997 * @timeout: timeout to wait for wakeup 998 * 999 * Called from function tipc_sendmsg(), which has done all sanity checks 1000 * Returns the number of bytes sent on success, or errno 1001 */ 1002 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 1003 int dlen, long timeout) 1004 { 1005 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1006 struct sock *sk = sock->sk; 1007 struct net *net = sock_net(sk); 1008 struct tipc_sock *tsk = tipc_sk(sk); 1009 struct tipc_group *grp = tsk->group; 1010 struct tipc_nlist *dsts = tipc_group_dests(grp); 1011 struct tipc_mc_method *method = &tsk->mc_method; 1012 bool ack = method->mandatory && method->rcast; 1013 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1014 struct tipc_msg *hdr = &tsk->phdr; 1015 int mtu = tipc_bcast_get_mtu(net); 1016 struct sk_buff_head pkts; 1017 int rc = -EHOSTUNREACH; 1018 1019 if (!dsts->local && !dsts->remote) 1020 return -EHOSTUNREACH; 1021 1022 /* Block or return if any destination link or member is congested */ 1023 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1024 !tipc_group_bc_cong(grp, blks)); 1025 if (unlikely(rc)) 1026 return rc; 1027 1028 /* Complete message header */ 1029 if (dest) { 1030 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1031 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1032 } else { 1033 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1034 msg_set_nameinst(hdr, 0); 1035 } 1036 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1037 msg_set_destport(hdr, 0); 1038 msg_set_destnode(hdr, 0); 1039 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1040 1041 /* Avoid getting stuck with repeated forced replicasts */ 1042 msg_set_grp_bc_ack_req(hdr, ack); 1043 1044 /* Build message as chain of buffers */ 1045 skb_queue_head_init(&pkts); 1046 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1047 if (unlikely(rc != dlen)) 1048 return rc; 1049 1050 /* Send message */ 1051 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1052 if (unlikely(rc)) 1053 return rc; 1054 1055 /* Update broadcast sequence number and send windows */ 1056 tipc_group_update_bc_members(tsk->group, blks, ack); 1057 1058 /* Broadcast link is now free to choose method for next broadcast */ 1059 method->mandatory = false; 1060 method->expires = jiffies; 1061 1062 return dlen; 1063 } 1064 1065 /** 1066 * tipc_send_group_mcast - send message to all members with given identity 1067 * @sock: socket structure 1068 * @m: message to send 1069 * @dlen: total length of message data 1070 * @timeout: timeout to wait for wakeup 1071 * 1072 * Called from function tipc_sendmsg(), which has done all sanity checks 1073 * Returns the number of bytes sent on success, or errno 1074 */ 1075 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1076 int dlen, long timeout) 1077 { 1078 struct sock *sk = sock->sk; 1079 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1080 struct tipc_sock *tsk = tipc_sk(sk); 1081 struct tipc_group *grp = tsk->group; 1082 struct tipc_msg *hdr = &tsk->phdr; 1083 struct net *net = sock_net(sk); 1084 u32 type, inst, scope, exclude; 1085 struct list_head dsts; 1086 u32 dstcnt; 1087 1088 INIT_LIST_HEAD(&dsts); 1089 1090 type = msg_nametype(hdr); 1091 inst = dest->addr.name.name.instance; 1092 scope = msg_lookup_scope(hdr); 1093 exclude = tipc_group_exclude(grp); 1094 1095 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1096 &dstcnt, exclude, true)) 1097 return -EHOSTUNREACH; 1098 1099 if (dstcnt == 1) { 1100 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1101 return tipc_send_group_unicast(sock, m, dlen, timeout); 1102 } 1103 1104 tipc_dest_list_purge(&dsts); 1105 return tipc_send_group_bcast(sock, m, dlen, timeout); 1106 } 1107 1108 /** 1109 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1110 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1111 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1112 * 1113 * Multi-threaded: parallel calls with reference to same queues may occur 1114 */ 1115 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1116 struct sk_buff_head *inputq) 1117 { 1118 u32 self = tipc_own_addr(net); 1119 u32 type, lower, upper, scope; 1120 struct sk_buff *skb, *_skb; 1121 u32 portid, oport, onode; 1122 struct sk_buff_head tmpq; 1123 struct list_head dports; 1124 struct tipc_msg *hdr; 1125 int user, mtyp, hlen; 1126 bool exact; 1127 1128 __skb_queue_head_init(&tmpq); 1129 INIT_LIST_HEAD(&dports); 1130 1131 skb = tipc_skb_peek(arrvq, &inputq->lock); 1132 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1133 hdr = buf_msg(skb); 1134 user = msg_user(hdr); 1135 mtyp = msg_type(hdr); 1136 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1137 oport = msg_origport(hdr); 1138 onode = msg_orignode(hdr); 1139 type = msg_nametype(hdr); 1140 1141 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1142 spin_lock_bh(&inputq->lock); 1143 if (skb_peek(arrvq) == skb) { 1144 __skb_dequeue(arrvq); 1145 __skb_queue_tail(inputq, skb); 1146 } 1147 kfree_skb(skb); 1148 spin_unlock_bh(&inputq->lock); 1149 continue; 1150 } 1151 1152 /* Group messages require exact scope match */ 1153 if (msg_in_group(hdr)) { 1154 lower = 0; 1155 upper = ~0; 1156 scope = msg_lookup_scope(hdr); 1157 exact = true; 1158 } else { 1159 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1160 if (onode == self) 1161 scope = TIPC_NODE_SCOPE; 1162 else 1163 scope = TIPC_CLUSTER_SCOPE; 1164 exact = false; 1165 lower = msg_namelower(hdr); 1166 upper = msg_nameupper(hdr); 1167 } 1168 1169 /* Create destination port list: */ 1170 tipc_nametbl_mc_lookup(net, type, lower, upper, 1171 scope, exact, &dports); 1172 1173 /* Clone message per destination */ 1174 while (tipc_dest_pop(&dports, NULL, &portid)) { 1175 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1176 if (_skb) { 1177 msg_set_destport(buf_msg(_skb), portid); 1178 __skb_queue_tail(&tmpq, _skb); 1179 continue; 1180 } 1181 pr_warn("Failed to clone mcast rcv buffer\n"); 1182 } 1183 /* Append to inputq if not already done by other thread */ 1184 spin_lock_bh(&inputq->lock); 1185 if (skb_peek(arrvq) == skb) { 1186 skb_queue_splice_tail_init(&tmpq, inputq); 1187 kfree_skb(__skb_dequeue(arrvq)); 1188 } 1189 spin_unlock_bh(&inputq->lock); 1190 __skb_queue_purge(&tmpq); 1191 kfree_skb(skb); 1192 } 1193 tipc_sk_rcv(net, inputq); 1194 } 1195 1196 /** 1197 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1198 * @tsk: receiving socket 1199 * @skb: pointer to message buffer. 1200 */ 1201 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1202 struct sk_buff_head *xmitq) 1203 { 1204 struct tipc_msg *hdr = buf_msg(skb); 1205 u32 onode = tsk_own_node(tsk); 1206 struct sock *sk = &tsk->sk; 1207 int mtyp = msg_type(hdr); 1208 bool conn_cong; 1209 1210 /* Ignore if connection cannot be validated: */ 1211 if (!tsk_peer_msg(tsk, hdr)) 1212 goto exit; 1213 1214 if (unlikely(msg_errcode(hdr))) { 1215 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1216 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1217 tsk_peer_port(tsk)); 1218 sk->sk_state_change(sk); 1219 goto exit; 1220 } 1221 1222 tsk->probe_unacked = false; 1223 1224 if (mtyp == CONN_PROBE) { 1225 msg_set_type(hdr, CONN_PROBE_REPLY); 1226 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1227 __skb_queue_tail(xmitq, skb); 1228 return; 1229 } else if (mtyp == CONN_ACK) { 1230 conn_cong = tsk_conn_cong(tsk); 1231 tsk->snt_unacked -= msg_conn_ack(hdr); 1232 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1233 tsk->snd_win = msg_adv_win(hdr); 1234 if (conn_cong) 1235 sk->sk_write_space(sk); 1236 } else if (mtyp != CONN_PROBE_REPLY) { 1237 pr_warn("Received unknown CONN_PROTO msg\n"); 1238 } 1239 exit: 1240 kfree_skb(skb); 1241 } 1242 1243 /** 1244 * tipc_sendmsg - send message in connectionless manner 1245 * @sock: socket structure 1246 * @m: message to send 1247 * @dsz: amount of user data to be sent 1248 * 1249 * Message must have an destination specified explicitly. 1250 * Used for SOCK_RDM and SOCK_DGRAM messages, 1251 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1252 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1253 * 1254 * Returns the number of bytes sent on success, or errno otherwise 1255 */ 1256 static int tipc_sendmsg(struct socket *sock, 1257 struct msghdr *m, size_t dsz) 1258 { 1259 struct sock *sk = sock->sk; 1260 int ret; 1261 1262 lock_sock(sk); 1263 ret = __tipc_sendmsg(sock, m, dsz); 1264 release_sock(sk); 1265 1266 return ret; 1267 } 1268 1269 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1270 { 1271 struct sock *sk = sock->sk; 1272 struct net *net = sock_net(sk); 1273 struct tipc_sock *tsk = tipc_sk(sk); 1274 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1275 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1276 struct list_head *clinks = &tsk->cong_links; 1277 bool syn = !tipc_sk_type_connectionless(sk); 1278 struct tipc_group *grp = tsk->group; 1279 struct tipc_msg *hdr = &tsk->phdr; 1280 struct tipc_name_seq *seq; 1281 struct sk_buff_head pkts; 1282 u32 type, inst, domain; 1283 u32 dnode, dport; 1284 int mtu, rc; 1285 1286 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1287 return -EMSGSIZE; 1288 1289 if (likely(dest)) { 1290 if (unlikely(m->msg_namelen < sizeof(*dest))) 1291 return -EINVAL; 1292 if (unlikely(dest->family != AF_TIPC)) 1293 return -EINVAL; 1294 } 1295 1296 if (grp) { 1297 if (!dest) 1298 return tipc_send_group_bcast(sock, m, dlen, timeout); 1299 if (dest->addrtype == TIPC_ADDR_NAME) 1300 return tipc_send_group_anycast(sock, m, dlen, timeout); 1301 if (dest->addrtype == TIPC_ADDR_ID) 1302 return tipc_send_group_unicast(sock, m, dlen, timeout); 1303 if (dest->addrtype == TIPC_ADDR_MCAST) 1304 return tipc_send_group_mcast(sock, m, dlen, timeout); 1305 return -EINVAL; 1306 } 1307 1308 if (unlikely(!dest)) { 1309 dest = &tsk->peer; 1310 if (!syn || dest->family != AF_TIPC) 1311 return -EDESTADDRREQ; 1312 } 1313 1314 if (unlikely(syn)) { 1315 if (sk->sk_state == TIPC_LISTEN) 1316 return -EPIPE; 1317 if (sk->sk_state != TIPC_OPEN) 1318 return -EISCONN; 1319 if (tsk->published) 1320 return -EOPNOTSUPP; 1321 if (dest->addrtype == TIPC_ADDR_NAME) { 1322 tsk->conn_type = dest->addr.name.name.type; 1323 tsk->conn_instance = dest->addr.name.name.instance; 1324 } 1325 } 1326 1327 seq = &dest->addr.nameseq; 1328 if (dest->addrtype == TIPC_ADDR_MCAST) 1329 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1330 1331 if (dest->addrtype == TIPC_ADDR_NAME) { 1332 type = dest->addr.name.name.type; 1333 inst = dest->addr.name.name.instance; 1334 domain = dest->addr.name.domain; 1335 dnode = domain; 1336 msg_set_type(hdr, TIPC_NAMED_MSG); 1337 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1338 msg_set_nametype(hdr, type); 1339 msg_set_nameinst(hdr, inst); 1340 msg_set_lookup_scope(hdr, tipc_addr_scope(domain)); 1341 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1342 msg_set_destnode(hdr, dnode); 1343 msg_set_destport(hdr, dport); 1344 if (unlikely(!dport && !dnode)) 1345 return -EHOSTUNREACH; 1346 } else if (dest->addrtype == TIPC_ADDR_ID) { 1347 dnode = dest->addr.id.node; 1348 msg_set_type(hdr, TIPC_DIRECT_MSG); 1349 msg_set_lookup_scope(hdr, 0); 1350 msg_set_destnode(hdr, dnode); 1351 msg_set_destport(hdr, dest->addr.id.ref); 1352 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1353 } 1354 1355 /* Block or return if destination link is congested */ 1356 rc = tipc_wait_for_cond(sock, &timeout, 1357 !tipc_dest_find(clinks, dnode, 0)); 1358 if (unlikely(rc)) 1359 return rc; 1360 1361 skb_queue_head_init(&pkts); 1362 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1363 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1364 if (unlikely(rc != dlen)) 1365 return rc; 1366 1367 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1368 if (unlikely(rc == -ELINKCONG)) { 1369 tipc_dest_push(clinks, dnode, 0); 1370 tsk->cong_link_cnt++; 1371 rc = 0; 1372 } 1373 1374 if (unlikely(syn && !rc)) 1375 tipc_set_sk_state(sk, TIPC_CONNECTING); 1376 1377 return rc ? rc : dlen; 1378 } 1379 1380 /** 1381 * tipc_sendstream - send stream-oriented data 1382 * @sock: socket structure 1383 * @m: data to send 1384 * @dsz: total length of data to be transmitted 1385 * 1386 * Used for SOCK_STREAM data. 1387 * 1388 * Returns the number of bytes sent on success (or partial success), 1389 * or errno if no data sent 1390 */ 1391 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1392 { 1393 struct sock *sk = sock->sk; 1394 int ret; 1395 1396 lock_sock(sk); 1397 ret = __tipc_sendstream(sock, m, dsz); 1398 release_sock(sk); 1399 1400 return ret; 1401 } 1402 1403 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1404 { 1405 struct sock *sk = sock->sk; 1406 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1407 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1408 struct tipc_sock *tsk = tipc_sk(sk); 1409 struct tipc_msg *hdr = &tsk->phdr; 1410 struct net *net = sock_net(sk); 1411 struct sk_buff_head pkts; 1412 u32 dnode = tsk_peer_node(tsk); 1413 int send, sent = 0; 1414 int rc = 0; 1415 1416 skb_queue_head_init(&pkts); 1417 1418 if (unlikely(dlen > INT_MAX)) 1419 return -EMSGSIZE; 1420 1421 /* Handle implicit connection setup */ 1422 if (unlikely(dest)) { 1423 rc = __tipc_sendmsg(sock, m, dlen); 1424 if (dlen && (dlen == rc)) 1425 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1426 return rc; 1427 } 1428 1429 do { 1430 rc = tipc_wait_for_cond(sock, &timeout, 1431 (!tsk->cong_link_cnt && 1432 !tsk_conn_cong(tsk) && 1433 tipc_sk_connected(sk))); 1434 if (unlikely(rc)) 1435 break; 1436 1437 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1438 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1439 if (unlikely(rc != send)) 1440 break; 1441 1442 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1443 if (unlikely(rc == -ELINKCONG)) { 1444 tsk->cong_link_cnt = 1; 1445 rc = 0; 1446 } 1447 if (likely(!rc)) { 1448 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1449 sent += send; 1450 } 1451 } while (sent < dlen && !rc); 1452 1453 return sent ? sent : rc; 1454 } 1455 1456 /** 1457 * tipc_send_packet - send a connection-oriented message 1458 * @sock: socket structure 1459 * @m: message to send 1460 * @dsz: length of data to be transmitted 1461 * 1462 * Used for SOCK_SEQPACKET messages. 1463 * 1464 * Returns the number of bytes sent on success, or errno otherwise 1465 */ 1466 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1467 { 1468 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1469 return -EMSGSIZE; 1470 1471 return tipc_sendstream(sock, m, dsz); 1472 } 1473 1474 /* tipc_sk_finish_conn - complete the setup of a connection 1475 */ 1476 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1477 u32 peer_node) 1478 { 1479 struct sock *sk = &tsk->sk; 1480 struct net *net = sock_net(sk); 1481 struct tipc_msg *msg = &tsk->phdr; 1482 1483 msg_set_destnode(msg, peer_node); 1484 msg_set_destport(msg, peer_port); 1485 msg_set_type(msg, TIPC_CONN_MSG); 1486 msg_set_lookup_scope(msg, 0); 1487 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1488 1489 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1490 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1491 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1492 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1493 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1494 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1495 return; 1496 1497 /* Fall back to message based flow control */ 1498 tsk->rcv_win = FLOWCTL_MSG_WIN; 1499 tsk->snd_win = FLOWCTL_MSG_WIN; 1500 } 1501 1502 /** 1503 * tipc_sk_set_orig_addr - capture sender's address for received message 1504 * @m: descriptor for message info 1505 * @hdr: received message header 1506 * 1507 * Note: Address is not captured if not requested by receiver. 1508 */ 1509 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1510 { 1511 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1512 struct tipc_msg *hdr = buf_msg(skb); 1513 1514 if (!srcaddr) 1515 return; 1516 1517 srcaddr->sock.family = AF_TIPC; 1518 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1519 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1520 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1521 srcaddr->sock.addr.name.domain = 0; 1522 srcaddr->sock.scope = 0; 1523 m->msg_namelen = sizeof(struct sockaddr_tipc); 1524 1525 if (!msg_in_group(hdr)) 1526 return; 1527 1528 /* Group message users may also want to know sending member's id */ 1529 srcaddr->member.family = AF_TIPC; 1530 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1531 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1532 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1533 srcaddr->member.addr.name.domain = 0; 1534 m->msg_namelen = sizeof(*srcaddr); 1535 } 1536 1537 /** 1538 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1539 * @m: descriptor for message info 1540 * @msg: received message header 1541 * @tsk: TIPC port associated with message 1542 * 1543 * Note: Ancillary data is not captured if not requested by receiver. 1544 * 1545 * Returns 0 if successful, otherwise errno 1546 */ 1547 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1548 struct tipc_sock *tsk) 1549 { 1550 u32 anc_data[3]; 1551 u32 err; 1552 u32 dest_type; 1553 int has_name; 1554 int res; 1555 1556 if (likely(m->msg_controllen == 0)) 1557 return 0; 1558 1559 /* Optionally capture errored message object(s) */ 1560 err = msg ? msg_errcode(msg) : 0; 1561 if (unlikely(err)) { 1562 anc_data[0] = err; 1563 anc_data[1] = msg_data_sz(msg); 1564 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1565 if (res) 1566 return res; 1567 if (anc_data[1]) { 1568 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1569 msg_data(msg)); 1570 if (res) 1571 return res; 1572 } 1573 } 1574 1575 /* Optionally capture message destination object */ 1576 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1577 switch (dest_type) { 1578 case TIPC_NAMED_MSG: 1579 has_name = 1; 1580 anc_data[0] = msg_nametype(msg); 1581 anc_data[1] = msg_namelower(msg); 1582 anc_data[2] = msg_namelower(msg); 1583 break; 1584 case TIPC_MCAST_MSG: 1585 has_name = 1; 1586 anc_data[0] = msg_nametype(msg); 1587 anc_data[1] = msg_namelower(msg); 1588 anc_data[2] = msg_nameupper(msg); 1589 break; 1590 case TIPC_CONN_MSG: 1591 has_name = (tsk->conn_type != 0); 1592 anc_data[0] = tsk->conn_type; 1593 anc_data[1] = tsk->conn_instance; 1594 anc_data[2] = tsk->conn_instance; 1595 break; 1596 default: 1597 has_name = 0; 1598 } 1599 if (has_name) { 1600 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1601 if (res) 1602 return res; 1603 } 1604 1605 return 0; 1606 } 1607 1608 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1609 { 1610 struct sock *sk = &tsk->sk; 1611 struct net *net = sock_net(sk); 1612 struct sk_buff *skb = NULL; 1613 struct tipc_msg *msg; 1614 u32 peer_port = tsk_peer_port(tsk); 1615 u32 dnode = tsk_peer_node(tsk); 1616 1617 if (!tipc_sk_connected(sk)) 1618 return; 1619 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1620 dnode, tsk_own_node(tsk), peer_port, 1621 tsk->portid, TIPC_OK); 1622 if (!skb) 1623 return; 1624 msg = buf_msg(skb); 1625 msg_set_conn_ack(msg, tsk->rcv_unacked); 1626 tsk->rcv_unacked = 0; 1627 1628 /* Adjust to and advertize the correct window limit */ 1629 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1630 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1631 msg_set_adv_win(msg, tsk->rcv_win); 1632 } 1633 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1634 } 1635 1636 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1637 { 1638 struct sock *sk = sock->sk; 1639 DEFINE_WAIT(wait); 1640 long timeo = *timeop; 1641 int err = sock_error(sk); 1642 1643 if (err) 1644 return err; 1645 1646 for (;;) { 1647 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1648 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1649 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1650 err = -ENOTCONN; 1651 break; 1652 } 1653 release_sock(sk); 1654 timeo = schedule_timeout(timeo); 1655 lock_sock(sk); 1656 } 1657 err = 0; 1658 if (!skb_queue_empty(&sk->sk_receive_queue)) 1659 break; 1660 err = -EAGAIN; 1661 if (!timeo) 1662 break; 1663 err = sock_intr_errno(timeo); 1664 if (signal_pending(current)) 1665 break; 1666 1667 err = sock_error(sk); 1668 if (err) 1669 break; 1670 } 1671 finish_wait(sk_sleep(sk), &wait); 1672 *timeop = timeo; 1673 return err; 1674 } 1675 1676 /** 1677 * tipc_recvmsg - receive packet-oriented message 1678 * @m: descriptor for message info 1679 * @buflen: length of user buffer area 1680 * @flags: receive flags 1681 * 1682 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1683 * If the complete message doesn't fit in user area, truncate it. 1684 * 1685 * Returns size of returned message data, errno otherwise 1686 */ 1687 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1688 size_t buflen, int flags) 1689 { 1690 struct sock *sk = sock->sk; 1691 bool connected = !tipc_sk_type_connectionless(sk); 1692 struct tipc_sock *tsk = tipc_sk(sk); 1693 int rc, err, hlen, dlen, copy; 1694 struct sk_buff_head xmitq; 1695 struct tipc_msg *hdr; 1696 struct sk_buff *skb; 1697 bool grp_evt; 1698 long timeout; 1699 1700 /* Catch invalid receive requests */ 1701 if (unlikely(!buflen)) 1702 return -EINVAL; 1703 1704 lock_sock(sk); 1705 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1706 rc = -ENOTCONN; 1707 goto exit; 1708 } 1709 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1710 1711 /* Step rcv queue to first msg with data or error; wait if necessary */ 1712 do { 1713 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1714 if (unlikely(rc)) 1715 goto exit; 1716 skb = skb_peek(&sk->sk_receive_queue); 1717 hdr = buf_msg(skb); 1718 dlen = msg_data_sz(hdr); 1719 hlen = msg_hdr_sz(hdr); 1720 err = msg_errcode(hdr); 1721 grp_evt = msg_is_grp_evt(hdr); 1722 if (likely(dlen || err)) 1723 break; 1724 tsk_advance_rx_queue(sk); 1725 } while (1); 1726 1727 /* Collect msg meta data, including error code and rejected data */ 1728 tipc_sk_set_orig_addr(m, skb); 1729 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1730 if (unlikely(rc)) 1731 goto exit; 1732 1733 /* Capture data if non-error msg, otherwise just set return value */ 1734 if (likely(!err)) { 1735 copy = min_t(int, dlen, buflen); 1736 if (unlikely(copy != dlen)) 1737 m->msg_flags |= MSG_TRUNC; 1738 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1739 } else { 1740 copy = 0; 1741 rc = 0; 1742 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1743 rc = -ECONNRESET; 1744 } 1745 if (unlikely(rc)) 1746 goto exit; 1747 1748 /* Mark message as group event if applicable */ 1749 if (unlikely(grp_evt)) { 1750 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1751 m->msg_flags |= MSG_EOR; 1752 m->msg_flags |= MSG_OOB; 1753 copy = 0; 1754 } 1755 1756 /* Caption of data or error code/rejected data was successful */ 1757 if (unlikely(flags & MSG_PEEK)) 1758 goto exit; 1759 1760 /* Send group flow control advertisement when applicable */ 1761 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1762 skb_queue_head_init(&xmitq); 1763 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1764 msg_orignode(hdr), msg_origport(hdr), 1765 &xmitq); 1766 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1767 } 1768 1769 tsk_advance_rx_queue(sk); 1770 1771 if (likely(!connected)) 1772 goto exit; 1773 1774 /* Send connection flow control advertisement when applicable */ 1775 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1776 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1777 tipc_sk_send_ack(tsk); 1778 exit: 1779 release_sock(sk); 1780 return rc ? rc : copy; 1781 } 1782 1783 /** 1784 * tipc_recvstream - receive stream-oriented data 1785 * @m: descriptor for message info 1786 * @buflen: total size of user buffer area 1787 * @flags: receive flags 1788 * 1789 * Used for SOCK_STREAM messages only. If not enough data is available 1790 * will optionally wait for more; never truncates data. 1791 * 1792 * Returns size of returned message data, errno otherwise 1793 */ 1794 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1795 size_t buflen, int flags) 1796 { 1797 struct sock *sk = sock->sk; 1798 struct tipc_sock *tsk = tipc_sk(sk); 1799 struct sk_buff *skb; 1800 struct tipc_msg *hdr; 1801 struct tipc_skb_cb *skb_cb; 1802 bool peek = flags & MSG_PEEK; 1803 int offset, required, copy, copied = 0; 1804 int hlen, dlen, err, rc; 1805 long timeout; 1806 1807 /* Catch invalid receive attempts */ 1808 if (unlikely(!buflen)) 1809 return -EINVAL; 1810 1811 lock_sock(sk); 1812 1813 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1814 rc = -ENOTCONN; 1815 goto exit; 1816 } 1817 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1818 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1819 1820 do { 1821 /* Look at first msg in receive queue; wait if necessary */ 1822 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1823 if (unlikely(rc)) 1824 break; 1825 skb = skb_peek(&sk->sk_receive_queue); 1826 skb_cb = TIPC_SKB_CB(skb); 1827 hdr = buf_msg(skb); 1828 dlen = msg_data_sz(hdr); 1829 hlen = msg_hdr_sz(hdr); 1830 err = msg_errcode(hdr); 1831 1832 /* Discard any empty non-errored (SYN-) message */ 1833 if (unlikely(!dlen && !err)) { 1834 tsk_advance_rx_queue(sk); 1835 continue; 1836 } 1837 1838 /* Collect msg meta data, incl. error code and rejected data */ 1839 if (!copied) { 1840 tipc_sk_set_orig_addr(m, skb); 1841 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1842 if (rc) 1843 break; 1844 } 1845 1846 /* Copy data if msg ok, otherwise return error/partial data */ 1847 if (likely(!err)) { 1848 offset = skb_cb->bytes_read; 1849 copy = min_t(int, dlen - offset, buflen - copied); 1850 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1851 if (unlikely(rc)) 1852 break; 1853 copied += copy; 1854 offset += copy; 1855 if (unlikely(offset < dlen)) { 1856 if (!peek) 1857 skb_cb->bytes_read = offset; 1858 break; 1859 } 1860 } else { 1861 rc = 0; 1862 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1863 rc = -ECONNRESET; 1864 if (copied || rc) 1865 break; 1866 } 1867 1868 if (unlikely(peek)) 1869 break; 1870 1871 tsk_advance_rx_queue(sk); 1872 1873 /* Send connection flow control advertisement when applicable */ 1874 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1875 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1876 tipc_sk_send_ack(tsk); 1877 1878 /* Exit if all requested data or FIN/error received */ 1879 if (copied == buflen || err) 1880 break; 1881 1882 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1883 exit: 1884 release_sock(sk); 1885 return copied ? copied : rc; 1886 } 1887 1888 /** 1889 * tipc_write_space - wake up thread if port congestion is released 1890 * @sk: socket 1891 */ 1892 static void tipc_write_space(struct sock *sk) 1893 { 1894 struct socket_wq *wq; 1895 1896 rcu_read_lock(); 1897 wq = rcu_dereference(sk->sk_wq); 1898 if (skwq_has_sleeper(wq)) 1899 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1900 EPOLLWRNORM | EPOLLWRBAND); 1901 rcu_read_unlock(); 1902 } 1903 1904 /** 1905 * tipc_data_ready - wake up threads to indicate messages have been received 1906 * @sk: socket 1907 * @len: the length of messages 1908 */ 1909 static void tipc_data_ready(struct sock *sk) 1910 { 1911 struct socket_wq *wq; 1912 1913 rcu_read_lock(); 1914 wq = rcu_dereference(sk->sk_wq); 1915 if (skwq_has_sleeper(wq)) 1916 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1917 EPOLLRDNORM | EPOLLRDBAND); 1918 rcu_read_unlock(); 1919 } 1920 1921 static void tipc_sock_destruct(struct sock *sk) 1922 { 1923 __skb_queue_purge(&sk->sk_receive_queue); 1924 } 1925 1926 static void tipc_sk_proto_rcv(struct sock *sk, 1927 struct sk_buff_head *inputq, 1928 struct sk_buff_head *xmitq) 1929 { 1930 struct sk_buff *skb = __skb_dequeue(inputq); 1931 struct tipc_sock *tsk = tipc_sk(sk); 1932 struct tipc_msg *hdr = buf_msg(skb); 1933 struct tipc_group *grp = tsk->group; 1934 bool wakeup = false; 1935 1936 switch (msg_user(hdr)) { 1937 case CONN_MANAGER: 1938 tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1939 return; 1940 case SOCK_WAKEUP: 1941 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1942 tsk->cong_link_cnt--; 1943 wakeup = true; 1944 break; 1945 case GROUP_PROTOCOL: 1946 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1947 break; 1948 case TOP_SRV: 1949 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1950 hdr, inputq, xmitq); 1951 break; 1952 default: 1953 break; 1954 } 1955 1956 if (wakeup) 1957 sk->sk_write_space(sk); 1958 1959 kfree_skb(skb); 1960 } 1961 1962 /** 1963 * tipc_filter_connect - Handle incoming message for a connection-based socket 1964 * @tsk: TIPC socket 1965 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 1966 * 1967 * Returns true if everything ok, false otherwise 1968 */ 1969 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1970 { 1971 struct sock *sk = &tsk->sk; 1972 struct net *net = sock_net(sk); 1973 struct tipc_msg *hdr = buf_msg(skb); 1974 u32 pport = msg_origport(hdr); 1975 u32 pnode = msg_orignode(hdr); 1976 1977 if (unlikely(msg_mcast(hdr))) 1978 return false; 1979 1980 switch (sk->sk_state) { 1981 case TIPC_CONNECTING: 1982 /* Accept only ACK or NACK message */ 1983 if (unlikely(!msg_connected(hdr))) { 1984 if (pport != tsk_peer_port(tsk) || 1985 pnode != tsk_peer_node(tsk)) 1986 return false; 1987 1988 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1989 sk->sk_err = ECONNREFUSED; 1990 sk->sk_state_change(sk); 1991 return true; 1992 } 1993 1994 if (unlikely(msg_errcode(hdr))) { 1995 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1996 sk->sk_err = ECONNREFUSED; 1997 sk->sk_state_change(sk); 1998 return true; 1999 } 2000 2001 if (unlikely(!msg_isdata(hdr))) { 2002 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2003 sk->sk_err = EINVAL; 2004 sk->sk_state_change(sk); 2005 return true; 2006 } 2007 2008 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); 2009 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2010 2011 /* If 'ACK+' message, add to socket receive queue */ 2012 if (msg_data_sz(hdr)) 2013 return true; 2014 2015 /* If empty 'ACK-' message, wake up sleeping connect() */ 2016 sk->sk_data_ready(sk); 2017 2018 /* 'ACK-' message is neither accepted nor rejected: */ 2019 msg_set_dest_droppable(hdr, 1); 2020 return false; 2021 2022 case TIPC_OPEN: 2023 case TIPC_DISCONNECTING: 2024 break; 2025 case TIPC_LISTEN: 2026 /* Accept only SYN message */ 2027 if (!msg_connected(hdr) && !(msg_errcode(hdr))) 2028 return true; 2029 break; 2030 case TIPC_ESTABLISHED: 2031 /* Accept only connection-based messages sent by peer */ 2032 if (unlikely(!tsk_peer_msg(tsk, hdr))) 2033 return false; 2034 2035 if (unlikely(msg_errcode(hdr))) { 2036 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2037 /* Let timer expire on it's own */ 2038 tipc_node_remove_conn(net, tsk_peer_node(tsk), 2039 tsk->portid); 2040 sk->sk_state_change(sk); 2041 } 2042 return true; 2043 default: 2044 pr_err("Unknown sk_state %u\n", sk->sk_state); 2045 } 2046 2047 return false; 2048 } 2049 2050 /** 2051 * rcvbuf_limit - get proper overload limit of socket receive queue 2052 * @sk: socket 2053 * @skb: message 2054 * 2055 * For connection oriented messages, irrespective of importance, 2056 * default queue limit is 2 MB. 2057 * 2058 * For connectionless messages, queue limits are based on message 2059 * importance as follows: 2060 * 2061 * TIPC_LOW_IMPORTANCE (2 MB) 2062 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2063 * TIPC_HIGH_IMPORTANCE (8 MB) 2064 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2065 * 2066 * Returns overload limit according to corresponding message importance 2067 */ 2068 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2069 { 2070 struct tipc_sock *tsk = tipc_sk(sk); 2071 struct tipc_msg *hdr = buf_msg(skb); 2072 2073 if (unlikely(msg_in_group(hdr))) 2074 return sk->sk_rcvbuf; 2075 2076 if (unlikely(!msg_connected(hdr))) 2077 return sk->sk_rcvbuf << msg_importance(hdr); 2078 2079 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2080 return sk->sk_rcvbuf; 2081 2082 return FLOWCTL_MSG_LIM; 2083 } 2084 2085 /** 2086 * tipc_sk_filter_rcv - validate incoming message 2087 * @sk: socket 2088 * @skb: pointer to message. 2089 * 2090 * Enqueues message on receive queue if acceptable; optionally handles 2091 * disconnect indication for a connected socket. 2092 * 2093 * Called with socket lock already taken 2094 * 2095 */ 2096 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2097 struct sk_buff_head *xmitq) 2098 { 2099 bool sk_conn = !tipc_sk_type_connectionless(sk); 2100 struct tipc_sock *tsk = tipc_sk(sk); 2101 struct tipc_group *grp = tsk->group; 2102 struct tipc_msg *hdr = buf_msg(skb); 2103 struct net *net = sock_net(sk); 2104 struct sk_buff_head inputq; 2105 int limit, err = TIPC_OK; 2106 2107 TIPC_SKB_CB(skb)->bytes_read = 0; 2108 __skb_queue_head_init(&inputq); 2109 __skb_queue_tail(&inputq, skb); 2110 2111 if (unlikely(!msg_isdata(hdr))) 2112 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2113 2114 if (unlikely(grp)) 2115 tipc_group_filter_msg(grp, &inputq, xmitq); 2116 2117 /* Validate and add to receive buffer if there is space */ 2118 while ((skb = __skb_dequeue(&inputq))) { 2119 hdr = buf_msg(skb); 2120 limit = rcvbuf_limit(sk, skb); 2121 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2122 (!sk_conn && msg_connected(hdr)) || 2123 (!grp && msg_in_group(hdr))) 2124 err = TIPC_ERR_NO_PORT; 2125 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) 2126 err = TIPC_ERR_OVERLOAD; 2127 2128 if (unlikely(err)) { 2129 tipc_skb_reject(net, err, skb, xmitq); 2130 err = TIPC_OK; 2131 continue; 2132 } 2133 __skb_queue_tail(&sk->sk_receive_queue, skb); 2134 skb_set_owner_r(skb, sk); 2135 sk->sk_data_ready(sk); 2136 } 2137 } 2138 2139 /** 2140 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2141 * @sk: socket 2142 * @skb: message 2143 * 2144 * Caller must hold socket lock 2145 */ 2146 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2147 { 2148 unsigned int before = sk_rmem_alloc_get(sk); 2149 struct sk_buff_head xmitq; 2150 unsigned int added; 2151 2152 __skb_queue_head_init(&xmitq); 2153 2154 tipc_sk_filter_rcv(sk, skb, &xmitq); 2155 added = sk_rmem_alloc_get(sk) - before; 2156 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2157 2158 /* Send pending response/rejected messages, if any */ 2159 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2160 return 0; 2161 } 2162 2163 /** 2164 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2165 * inputq and try adding them to socket or backlog queue 2166 * @inputq: list of incoming buffers with potentially different destinations 2167 * @sk: socket where the buffers should be enqueued 2168 * @dport: port number for the socket 2169 * 2170 * Caller must hold socket lock 2171 */ 2172 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2173 u32 dport, struct sk_buff_head *xmitq) 2174 { 2175 unsigned long time_limit = jiffies + 2; 2176 struct sk_buff *skb; 2177 unsigned int lim; 2178 atomic_t *dcnt; 2179 u32 onode; 2180 2181 while (skb_queue_len(inputq)) { 2182 if (unlikely(time_after_eq(jiffies, time_limit))) 2183 return; 2184 2185 skb = tipc_skb_dequeue(inputq, dport); 2186 if (unlikely(!skb)) 2187 return; 2188 2189 /* Add message directly to receive queue if possible */ 2190 if (!sock_owned_by_user(sk)) { 2191 tipc_sk_filter_rcv(sk, skb, xmitq); 2192 continue; 2193 } 2194 2195 /* Try backlog, compensating for double-counted bytes */ 2196 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2197 if (!sk->sk_backlog.len) 2198 atomic_set(dcnt, 0); 2199 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2200 if (likely(!sk_add_backlog(sk, skb, lim))) 2201 continue; 2202 2203 /* Overload => reject message back to sender */ 2204 onode = tipc_own_addr(sock_net(sk)); 2205 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2206 __skb_queue_tail(xmitq, skb); 2207 break; 2208 } 2209 } 2210 2211 /** 2212 * tipc_sk_rcv - handle a chain of incoming buffers 2213 * @inputq: buffer list containing the buffers 2214 * Consumes all buffers in list until inputq is empty 2215 * Note: may be called in multiple threads referring to the same queue 2216 */ 2217 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2218 { 2219 struct sk_buff_head xmitq; 2220 u32 dnode, dport = 0; 2221 int err; 2222 struct tipc_sock *tsk; 2223 struct sock *sk; 2224 struct sk_buff *skb; 2225 2226 __skb_queue_head_init(&xmitq); 2227 while (skb_queue_len(inputq)) { 2228 dport = tipc_skb_peek_port(inputq, dport); 2229 tsk = tipc_sk_lookup(net, dport); 2230 2231 if (likely(tsk)) { 2232 sk = &tsk->sk; 2233 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2234 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2235 spin_unlock_bh(&sk->sk_lock.slock); 2236 } 2237 /* Send pending response/rejected messages, if any */ 2238 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2239 sock_put(sk); 2240 continue; 2241 } 2242 /* No destination socket => dequeue skb if still there */ 2243 skb = tipc_skb_dequeue(inputq, dport); 2244 if (!skb) 2245 return; 2246 2247 /* Try secondary lookup if unresolved named message */ 2248 err = TIPC_ERR_NO_PORT; 2249 if (tipc_msg_lookup_dest(net, skb, &err)) 2250 goto xmit; 2251 2252 /* Prepare for message rejection */ 2253 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2254 continue; 2255 xmit: 2256 dnode = msg_destnode(buf_msg(skb)); 2257 tipc_node_xmit_skb(net, skb, dnode, dport); 2258 } 2259 } 2260 2261 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2262 { 2263 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2264 struct sock *sk = sock->sk; 2265 int done; 2266 2267 do { 2268 int err = sock_error(sk); 2269 if (err) 2270 return err; 2271 if (!*timeo_p) 2272 return -ETIMEDOUT; 2273 if (signal_pending(current)) 2274 return sock_intr_errno(*timeo_p); 2275 2276 add_wait_queue(sk_sleep(sk), &wait); 2277 done = sk_wait_event(sk, timeo_p, 2278 sk->sk_state != TIPC_CONNECTING, &wait); 2279 remove_wait_queue(sk_sleep(sk), &wait); 2280 } while (!done); 2281 return 0; 2282 } 2283 2284 /** 2285 * tipc_connect - establish a connection to another TIPC port 2286 * @sock: socket structure 2287 * @dest: socket address for destination port 2288 * @destlen: size of socket address data structure 2289 * @flags: file-related flags associated with socket 2290 * 2291 * Returns 0 on success, errno otherwise 2292 */ 2293 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2294 int destlen, int flags) 2295 { 2296 struct sock *sk = sock->sk; 2297 struct tipc_sock *tsk = tipc_sk(sk); 2298 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2299 struct msghdr m = {NULL,}; 2300 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2301 int previous; 2302 int res = 0; 2303 2304 if (destlen != sizeof(struct sockaddr_tipc)) 2305 return -EINVAL; 2306 2307 lock_sock(sk); 2308 2309 if (tsk->group) { 2310 res = -EINVAL; 2311 goto exit; 2312 } 2313 2314 if (dst->family == AF_UNSPEC) { 2315 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2316 if (!tipc_sk_type_connectionless(sk)) 2317 res = -EINVAL; 2318 goto exit; 2319 } else if (dst->family != AF_TIPC) { 2320 res = -EINVAL; 2321 } 2322 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2323 res = -EINVAL; 2324 if (res) 2325 goto exit; 2326 2327 /* DGRAM/RDM connect(), just save the destaddr */ 2328 if (tipc_sk_type_connectionless(sk)) { 2329 memcpy(&tsk->peer, dest, destlen); 2330 goto exit; 2331 } 2332 2333 previous = sk->sk_state; 2334 2335 switch (sk->sk_state) { 2336 case TIPC_OPEN: 2337 /* Send a 'SYN-' to destination */ 2338 m.msg_name = dest; 2339 m.msg_namelen = destlen; 2340 2341 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2342 * indicate send_msg() is never blocked. 2343 */ 2344 if (!timeout) 2345 m.msg_flags = MSG_DONTWAIT; 2346 2347 res = __tipc_sendmsg(sock, &m, 0); 2348 if ((res < 0) && (res != -EWOULDBLOCK)) 2349 goto exit; 2350 2351 /* Just entered TIPC_CONNECTING state; the only 2352 * difference is that return value in non-blocking 2353 * case is EINPROGRESS, rather than EALREADY. 2354 */ 2355 res = -EINPROGRESS; 2356 /* fall thru' */ 2357 case TIPC_CONNECTING: 2358 if (!timeout) { 2359 if (previous == TIPC_CONNECTING) 2360 res = -EALREADY; 2361 goto exit; 2362 } 2363 timeout = msecs_to_jiffies(timeout); 2364 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2365 res = tipc_wait_for_connect(sock, &timeout); 2366 break; 2367 case TIPC_ESTABLISHED: 2368 res = -EISCONN; 2369 break; 2370 default: 2371 res = -EINVAL; 2372 } 2373 2374 exit: 2375 release_sock(sk); 2376 return res; 2377 } 2378 2379 /** 2380 * tipc_listen - allow socket to listen for incoming connections 2381 * @sock: socket structure 2382 * @len: (unused) 2383 * 2384 * Returns 0 on success, errno otherwise 2385 */ 2386 static int tipc_listen(struct socket *sock, int len) 2387 { 2388 struct sock *sk = sock->sk; 2389 int res; 2390 2391 lock_sock(sk); 2392 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2393 release_sock(sk); 2394 2395 return res; 2396 } 2397 2398 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2399 { 2400 struct sock *sk = sock->sk; 2401 DEFINE_WAIT(wait); 2402 int err; 2403 2404 /* True wake-one mechanism for incoming connections: only 2405 * one process gets woken up, not the 'whole herd'. 2406 * Since we do not 'race & poll' for established sockets 2407 * anymore, the common case will execute the loop only once. 2408 */ 2409 for (;;) { 2410 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2411 TASK_INTERRUPTIBLE); 2412 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2413 release_sock(sk); 2414 timeo = schedule_timeout(timeo); 2415 lock_sock(sk); 2416 } 2417 err = 0; 2418 if (!skb_queue_empty(&sk->sk_receive_queue)) 2419 break; 2420 err = -EAGAIN; 2421 if (!timeo) 2422 break; 2423 err = sock_intr_errno(timeo); 2424 if (signal_pending(current)) 2425 break; 2426 } 2427 finish_wait(sk_sleep(sk), &wait); 2428 return err; 2429 } 2430 2431 /** 2432 * tipc_accept - wait for connection request 2433 * @sock: listening socket 2434 * @newsock: new socket that is to be connected 2435 * @flags: file-related flags associated with socket 2436 * 2437 * Returns 0 on success, errno otherwise 2438 */ 2439 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2440 bool kern) 2441 { 2442 struct sock *new_sk, *sk = sock->sk; 2443 struct sk_buff *buf; 2444 struct tipc_sock *new_tsock; 2445 struct tipc_msg *msg; 2446 long timeo; 2447 int res; 2448 2449 lock_sock(sk); 2450 2451 if (sk->sk_state != TIPC_LISTEN) { 2452 res = -EINVAL; 2453 goto exit; 2454 } 2455 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2456 res = tipc_wait_for_accept(sock, timeo); 2457 if (res) 2458 goto exit; 2459 2460 buf = skb_peek(&sk->sk_receive_queue); 2461 2462 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2463 if (res) 2464 goto exit; 2465 security_sk_clone(sock->sk, new_sock->sk); 2466 2467 new_sk = new_sock->sk; 2468 new_tsock = tipc_sk(new_sk); 2469 msg = buf_msg(buf); 2470 2471 /* we lock on new_sk; but lockdep sees the lock on sk */ 2472 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2473 2474 /* 2475 * Reject any stray messages received by new socket 2476 * before the socket lock was taken (very, very unlikely) 2477 */ 2478 tsk_rej_rx_queue(new_sk); 2479 2480 /* Connect new socket to it's peer */ 2481 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2482 2483 tsk_set_importance(new_tsock, msg_importance(msg)); 2484 if (msg_named(msg)) { 2485 new_tsock->conn_type = msg_nametype(msg); 2486 new_tsock->conn_instance = msg_nameinst(msg); 2487 } 2488 2489 /* 2490 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2491 * Respond to 'SYN+' by queuing it on new socket. 2492 */ 2493 if (!msg_data_sz(msg)) { 2494 struct msghdr m = {NULL,}; 2495 2496 tsk_advance_rx_queue(sk); 2497 __tipc_sendstream(new_sock, &m, 0); 2498 } else { 2499 __skb_dequeue(&sk->sk_receive_queue); 2500 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2501 skb_set_owner_r(buf, new_sk); 2502 } 2503 release_sock(new_sk); 2504 exit: 2505 release_sock(sk); 2506 return res; 2507 } 2508 2509 /** 2510 * tipc_shutdown - shutdown socket connection 2511 * @sock: socket structure 2512 * @how: direction to close (must be SHUT_RDWR) 2513 * 2514 * Terminates connection (if necessary), then purges socket's receive queue. 2515 * 2516 * Returns 0 on success, errno otherwise 2517 */ 2518 static int tipc_shutdown(struct socket *sock, int how) 2519 { 2520 struct sock *sk = sock->sk; 2521 int res; 2522 2523 if (how != SHUT_RDWR) 2524 return -EINVAL; 2525 2526 lock_sock(sk); 2527 2528 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2529 sk->sk_shutdown = SEND_SHUTDOWN; 2530 2531 if (sk->sk_state == TIPC_DISCONNECTING) { 2532 /* Discard any unreceived messages */ 2533 __skb_queue_purge(&sk->sk_receive_queue); 2534 2535 /* Wake up anyone sleeping in poll */ 2536 sk->sk_state_change(sk); 2537 res = 0; 2538 } else { 2539 res = -ENOTCONN; 2540 } 2541 2542 release_sock(sk); 2543 return res; 2544 } 2545 2546 static void tipc_sk_timeout(struct timer_list *t) 2547 { 2548 struct sock *sk = from_timer(sk, t, sk_timer); 2549 struct tipc_sock *tsk = tipc_sk(sk); 2550 u32 peer_port = tsk_peer_port(tsk); 2551 u32 peer_node = tsk_peer_node(tsk); 2552 u32 own_node = tsk_own_node(tsk); 2553 u32 own_port = tsk->portid; 2554 struct net *net = sock_net(sk); 2555 struct sk_buff *skb = NULL; 2556 2557 bh_lock_sock(sk); 2558 if (!tipc_sk_connected(sk)) 2559 goto exit; 2560 2561 /* Try again later if socket is busy */ 2562 if (sock_owned_by_user(sk)) { 2563 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2564 goto exit; 2565 } 2566 2567 if (tsk->probe_unacked) { 2568 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2569 tipc_node_remove_conn(net, peer_node, peer_port); 2570 sk->sk_state_change(sk); 2571 goto exit; 2572 } 2573 /* Send new probe */ 2574 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2575 peer_node, own_node, peer_port, own_port, 2576 TIPC_OK); 2577 tsk->probe_unacked = true; 2578 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2579 exit: 2580 bh_unlock_sock(sk); 2581 if (skb) 2582 tipc_node_xmit_skb(net, skb, peer_node, own_port); 2583 sock_put(sk); 2584 } 2585 2586 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2587 struct tipc_name_seq const *seq) 2588 { 2589 struct sock *sk = &tsk->sk; 2590 struct net *net = sock_net(sk); 2591 struct publication *publ; 2592 u32 key; 2593 2594 if (tipc_sk_connected(sk)) 2595 return -EINVAL; 2596 key = tsk->portid + tsk->pub_count + 1; 2597 if (key == tsk->portid) 2598 return -EADDRINUSE; 2599 2600 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2601 scope, tsk->portid, key); 2602 if (unlikely(!publ)) 2603 return -EINVAL; 2604 2605 list_add(&publ->pport_list, &tsk->publications); 2606 tsk->pub_count++; 2607 tsk->published = 1; 2608 return 0; 2609 } 2610 2611 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2612 struct tipc_name_seq const *seq) 2613 { 2614 struct net *net = sock_net(&tsk->sk); 2615 struct publication *publ; 2616 struct publication *safe; 2617 int rc = -EINVAL; 2618 2619 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) { 2620 if (seq) { 2621 if (publ->scope != scope) 2622 continue; 2623 if (publ->type != seq->type) 2624 continue; 2625 if (publ->lower != seq->lower) 2626 continue; 2627 if (publ->upper != seq->upper) 2628 break; 2629 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2630 publ->ref, publ->key); 2631 rc = 0; 2632 break; 2633 } 2634 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2635 publ->ref, publ->key); 2636 rc = 0; 2637 } 2638 if (list_empty(&tsk->publications)) 2639 tsk->published = 0; 2640 return rc; 2641 } 2642 2643 /* tipc_sk_reinit: set non-zero address in all existing sockets 2644 * when we go from standalone to network mode. 2645 */ 2646 void tipc_sk_reinit(struct net *net) 2647 { 2648 struct tipc_net *tn = net_generic(net, tipc_net_id); 2649 struct rhashtable_iter iter; 2650 struct tipc_sock *tsk; 2651 struct tipc_msg *msg; 2652 2653 rhashtable_walk_enter(&tn->sk_rht, &iter); 2654 2655 do { 2656 rhashtable_walk_start(&iter); 2657 2658 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2659 spin_lock_bh(&tsk->sk.sk_lock.slock); 2660 msg = &tsk->phdr; 2661 msg_set_prevnode(msg, tn->own_addr); 2662 msg_set_orignode(msg, tn->own_addr); 2663 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2664 } 2665 2666 rhashtable_walk_stop(&iter); 2667 } while (tsk == ERR_PTR(-EAGAIN)); 2668 } 2669 2670 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2671 { 2672 struct tipc_net *tn = net_generic(net, tipc_net_id); 2673 struct tipc_sock *tsk; 2674 2675 rcu_read_lock(); 2676 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2677 if (tsk) 2678 sock_hold(&tsk->sk); 2679 rcu_read_unlock(); 2680 2681 return tsk; 2682 } 2683 2684 static int tipc_sk_insert(struct tipc_sock *tsk) 2685 { 2686 struct sock *sk = &tsk->sk; 2687 struct net *net = sock_net(sk); 2688 struct tipc_net *tn = net_generic(net, tipc_net_id); 2689 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2690 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2691 2692 while (remaining--) { 2693 portid++; 2694 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2695 portid = TIPC_MIN_PORT; 2696 tsk->portid = portid; 2697 sock_hold(&tsk->sk); 2698 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2699 tsk_rht_params)) 2700 return 0; 2701 sock_put(&tsk->sk); 2702 } 2703 2704 return -1; 2705 } 2706 2707 static void tipc_sk_remove(struct tipc_sock *tsk) 2708 { 2709 struct sock *sk = &tsk->sk; 2710 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2711 2712 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2713 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2714 __sock_put(sk); 2715 } 2716 } 2717 2718 static const struct rhashtable_params tsk_rht_params = { 2719 .nelem_hint = 192, 2720 .head_offset = offsetof(struct tipc_sock, node), 2721 .key_offset = offsetof(struct tipc_sock, portid), 2722 .key_len = sizeof(u32), /* portid */ 2723 .max_size = 1048576, 2724 .min_size = 256, 2725 .automatic_shrinking = true, 2726 }; 2727 2728 int tipc_sk_rht_init(struct net *net) 2729 { 2730 struct tipc_net *tn = net_generic(net, tipc_net_id); 2731 2732 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2733 } 2734 2735 void tipc_sk_rht_destroy(struct net *net) 2736 { 2737 struct tipc_net *tn = net_generic(net, tipc_net_id); 2738 2739 /* Wait for socket readers to complete */ 2740 synchronize_net(); 2741 2742 rhashtable_destroy(&tn->sk_rht); 2743 } 2744 2745 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2746 { 2747 struct net *net = sock_net(&tsk->sk); 2748 struct tipc_group *grp = tsk->group; 2749 struct tipc_msg *hdr = &tsk->phdr; 2750 struct tipc_name_seq seq; 2751 int rc; 2752 2753 if (mreq->type < TIPC_RESERVED_TYPES) 2754 return -EACCES; 2755 if (mreq->scope > TIPC_NODE_SCOPE) 2756 return -EINVAL; 2757 if (grp) 2758 return -EACCES; 2759 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2760 if (!grp) 2761 return -ENOMEM; 2762 tsk->group = grp; 2763 msg_set_lookup_scope(hdr, mreq->scope); 2764 msg_set_nametype(hdr, mreq->type); 2765 msg_set_dest_droppable(hdr, true); 2766 seq.type = mreq->type; 2767 seq.lower = mreq->instance; 2768 seq.upper = seq.lower; 2769 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2770 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2771 if (rc) { 2772 tipc_group_delete(net, grp); 2773 tsk->group = NULL; 2774 return rc; 2775 } 2776 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2777 tsk->mc_method.rcast = true; 2778 tsk->mc_method.mandatory = true; 2779 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2780 return rc; 2781 } 2782 2783 static int tipc_sk_leave(struct tipc_sock *tsk) 2784 { 2785 struct net *net = sock_net(&tsk->sk); 2786 struct tipc_group *grp = tsk->group; 2787 struct tipc_name_seq seq; 2788 int scope; 2789 2790 if (!grp) 2791 return -EINVAL; 2792 tipc_group_self(grp, &seq, &scope); 2793 tipc_group_delete(net, grp); 2794 tsk->group = NULL; 2795 tipc_sk_withdraw(tsk, scope, &seq); 2796 return 0; 2797 } 2798 2799 /** 2800 * tipc_setsockopt - set socket option 2801 * @sock: socket structure 2802 * @lvl: option level 2803 * @opt: option identifier 2804 * @ov: pointer to new option value 2805 * @ol: length of option value 2806 * 2807 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2808 * (to ease compatibility). 2809 * 2810 * Returns 0 on success, errno otherwise 2811 */ 2812 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2813 char __user *ov, unsigned int ol) 2814 { 2815 struct sock *sk = sock->sk; 2816 struct tipc_sock *tsk = tipc_sk(sk); 2817 struct tipc_group_req mreq; 2818 u32 value = 0; 2819 int res = 0; 2820 2821 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2822 return 0; 2823 if (lvl != SOL_TIPC) 2824 return -ENOPROTOOPT; 2825 2826 switch (opt) { 2827 case TIPC_IMPORTANCE: 2828 case TIPC_SRC_DROPPABLE: 2829 case TIPC_DEST_DROPPABLE: 2830 case TIPC_CONN_TIMEOUT: 2831 if (ol < sizeof(value)) 2832 return -EINVAL; 2833 if (get_user(value, (u32 __user *)ov)) 2834 return -EFAULT; 2835 break; 2836 case TIPC_GROUP_JOIN: 2837 if (ol < sizeof(mreq)) 2838 return -EINVAL; 2839 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2840 return -EFAULT; 2841 break; 2842 default: 2843 if (ov || ol) 2844 return -EINVAL; 2845 } 2846 2847 lock_sock(sk); 2848 2849 switch (opt) { 2850 case TIPC_IMPORTANCE: 2851 res = tsk_set_importance(tsk, value); 2852 break; 2853 case TIPC_SRC_DROPPABLE: 2854 if (sock->type != SOCK_STREAM) 2855 tsk_set_unreliable(tsk, value); 2856 else 2857 res = -ENOPROTOOPT; 2858 break; 2859 case TIPC_DEST_DROPPABLE: 2860 tsk_set_unreturnable(tsk, value); 2861 break; 2862 case TIPC_CONN_TIMEOUT: 2863 tipc_sk(sk)->conn_timeout = value; 2864 break; 2865 case TIPC_MCAST_BROADCAST: 2866 tsk->mc_method.rcast = false; 2867 tsk->mc_method.mandatory = true; 2868 break; 2869 case TIPC_MCAST_REPLICAST: 2870 tsk->mc_method.rcast = true; 2871 tsk->mc_method.mandatory = true; 2872 break; 2873 case TIPC_GROUP_JOIN: 2874 res = tipc_sk_join(tsk, &mreq); 2875 break; 2876 case TIPC_GROUP_LEAVE: 2877 res = tipc_sk_leave(tsk); 2878 break; 2879 default: 2880 res = -EINVAL; 2881 } 2882 2883 release_sock(sk); 2884 2885 return res; 2886 } 2887 2888 /** 2889 * tipc_getsockopt - get socket option 2890 * @sock: socket structure 2891 * @lvl: option level 2892 * @opt: option identifier 2893 * @ov: receptacle for option value 2894 * @ol: receptacle for length of option value 2895 * 2896 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 2897 * (to ease compatibility). 2898 * 2899 * Returns 0 on success, errno otherwise 2900 */ 2901 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 2902 char __user *ov, int __user *ol) 2903 { 2904 struct sock *sk = sock->sk; 2905 struct tipc_sock *tsk = tipc_sk(sk); 2906 struct tipc_name_seq seq; 2907 int len, scope; 2908 u32 value; 2909 int res; 2910 2911 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2912 return put_user(0, ol); 2913 if (lvl != SOL_TIPC) 2914 return -ENOPROTOOPT; 2915 res = get_user(len, ol); 2916 if (res) 2917 return res; 2918 2919 lock_sock(sk); 2920 2921 switch (opt) { 2922 case TIPC_IMPORTANCE: 2923 value = tsk_importance(tsk); 2924 break; 2925 case TIPC_SRC_DROPPABLE: 2926 value = tsk_unreliable(tsk); 2927 break; 2928 case TIPC_DEST_DROPPABLE: 2929 value = tsk_unreturnable(tsk); 2930 break; 2931 case TIPC_CONN_TIMEOUT: 2932 value = tsk->conn_timeout; 2933 /* no need to set "res", since already 0 at this point */ 2934 break; 2935 case TIPC_NODE_RECVQ_DEPTH: 2936 value = 0; /* was tipc_queue_size, now obsolete */ 2937 break; 2938 case TIPC_SOCK_RECVQ_DEPTH: 2939 value = skb_queue_len(&sk->sk_receive_queue); 2940 break; 2941 case TIPC_GROUP_JOIN: 2942 seq.type = 0; 2943 if (tsk->group) 2944 tipc_group_self(tsk->group, &seq, &scope); 2945 value = seq.type; 2946 break; 2947 default: 2948 res = -EINVAL; 2949 } 2950 2951 release_sock(sk); 2952 2953 if (res) 2954 return res; /* "get" failed */ 2955 2956 if (len < sizeof(value)) 2957 return -EINVAL; 2958 2959 if (copy_to_user(ov, &value, sizeof(value))) 2960 return -EFAULT; 2961 2962 return put_user(sizeof(value), ol); 2963 } 2964 2965 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2966 { 2967 struct sock *sk = sock->sk; 2968 struct tipc_sioc_ln_req lnr; 2969 void __user *argp = (void __user *)arg; 2970 2971 switch (cmd) { 2972 case SIOCGETLINKNAME: 2973 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2974 return -EFAULT; 2975 if (!tipc_node_get_linkname(sock_net(sk), 2976 lnr.bearer_id & 0xffff, lnr.peer, 2977 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2978 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2979 return -EFAULT; 2980 return 0; 2981 } 2982 return -EADDRNOTAVAIL; 2983 default: 2984 return -ENOIOCTLCMD; 2985 } 2986 } 2987 2988 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 2989 { 2990 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 2991 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 2992 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 2993 2994 tsk1->peer.family = AF_TIPC; 2995 tsk1->peer.addrtype = TIPC_ADDR_ID; 2996 tsk1->peer.scope = TIPC_NODE_SCOPE; 2997 tsk1->peer.addr.id.ref = tsk2->portid; 2998 tsk1->peer.addr.id.node = onode; 2999 tsk2->peer.family = AF_TIPC; 3000 tsk2->peer.addrtype = TIPC_ADDR_ID; 3001 tsk2->peer.scope = TIPC_NODE_SCOPE; 3002 tsk2->peer.addr.id.ref = tsk1->portid; 3003 tsk2->peer.addr.id.node = onode; 3004 3005 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3006 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3007 return 0; 3008 } 3009 3010 /* Protocol switches for the various types of TIPC sockets */ 3011 3012 static const struct proto_ops msg_ops = { 3013 .owner = THIS_MODULE, 3014 .family = AF_TIPC, 3015 .release = tipc_release, 3016 .bind = tipc_bind, 3017 .connect = tipc_connect, 3018 .socketpair = tipc_socketpair, 3019 .accept = sock_no_accept, 3020 .getname = tipc_getname, 3021 .poll = tipc_poll, 3022 .ioctl = tipc_ioctl, 3023 .listen = sock_no_listen, 3024 .shutdown = tipc_shutdown, 3025 .setsockopt = tipc_setsockopt, 3026 .getsockopt = tipc_getsockopt, 3027 .sendmsg = tipc_sendmsg, 3028 .recvmsg = tipc_recvmsg, 3029 .mmap = sock_no_mmap, 3030 .sendpage = sock_no_sendpage 3031 }; 3032 3033 static const struct proto_ops packet_ops = { 3034 .owner = THIS_MODULE, 3035 .family = AF_TIPC, 3036 .release = tipc_release, 3037 .bind = tipc_bind, 3038 .connect = tipc_connect, 3039 .socketpair = tipc_socketpair, 3040 .accept = tipc_accept, 3041 .getname = tipc_getname, 3042 .poll = tipc_poll, 3043 .ioctl = tipc_ioctl, 3044 .listen = tipc_listen, 3045 .shutdown = tipc_shutdown, 3046 .setsockopt = tipc_setsockopt, 3047 .getsockopt = tipc_getsockopt, 3048 .sendmsg = tipc_send_packet, 3049 .recvmsg = tipc_recvmsg, 3050 .mmap = sock_no_mmap, 3051 .sendpage = sock_no_sendpage 3052 }; 3053 3054 static const struct proto_ops stream_ops = { 3055 .owner = THIS_MODULE, 3056 .family = AF_TIPC, 3057 .release = tipc_release, 3058 .bind = tipc_bind, 3059 .connect = tipc_connect, 3060 .socketpair = tipc_socketpair, 3061 .accept = tipc_accept, 3062 .getname = tipc_getname, 3063 .poll = tipc_poll, 3064 .ioctl = tipc_ioctl, 3065 .listen = tipc_listen, 3066 .shutdown = tipc_shutdown, 3067 .setsockopt = tipc_setsockopt, 3068 .getsockopt = tipc_getsockopt, 3069 .sendmsg = tipc_sendstream, 3070 .recvmsg = tipc_recvstream, 3071 .mmap = sock_no_mmap, 3072 .sendpage = sock_no_sendpage 3073 }; 3074 3075 static const struct net_proto_family tipc_family_ops = { 3076 .owner = THIS_MODULE, 3077 .family = AF_TIPC, 3078 .create = tipc_sk_create 3079 }; 3080 3081 static struct proto tipc_proto = { 3082 .name = "TIPC", 3083 .owner = THIS_MODULE, 3084 .obj_size = sizeof(struct tipc_sock), 3085 .sysctl_rmem = sysctl_tipc_rmem 3086 }; 3087 3088 /** 3089 * tipc_socket_init - initialize TIPC socket interface 3090 * 3091 * Returns 0 on success, errno otherwise 3092 */ 3093 int tipc_socket_init(void) 3094 { 3095 int res; 3096 3097 res = proto_register(&tipc_proto, 1); 3098 if (res) { 3099 pr_err("Failed to register TIPC protocol type\n"); 3100 goto out; 3101 } 3102 3103 res = sock_register(&tipc_family_ops); 3104 if (res) { 3105 pr_err("Failed to register TIPC socket type\n"); 3106 proto_unregister(&tipc_proto); 3107 goto out; 3108 } 3109 out: 3110 return res; 3111 } 3112 3113 /** 3114 * tipc_socket_stop - stop TIPC socket interface 3115 */ 3116 void tipc_socket_stop(void) 3117 { 3118 sock_unregister(tipc_family_ops.family); 3119 proto_unregister(&tipc_proto); 3120 } 3121 3122 /* Caller should hold socket lock for the passed tipc socket. */ 3123 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3124 { 3125 u32 peer_node; 3126 u32 peer_port; 3127 struct nlattr *nest; 3128 3129 peer_node = tsk_peer_node(tsk); 3130 peer_port = tsk_peer_port(tsk); 3131 3132 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3133 3134 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3135 goto msg_full; 3136 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3137 goto msg_full; 3138 3139 if (tsk->conn_type != 0) { 3140 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3141 goto msg_full; 3142 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3143 goto msg_full; 3144 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3145 goto msg_full; 3146 } 3147 nla_nest_end(skb, nest); 3148 3149 return 0; 3150 3151 msg_full: 3152 nla_nest_cancel(skb, nest); 3153 3154 return -EMSGSIZE; 3155 } 3156 3157 /* Caller should hold socket lock for the passed tipc socket. */ 3158 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3159 struct tipc_sock *tsk) 3160 { 3161 int err; 3162 void *hdr; 3163 struct nlattr *attrs; 3164 struct net *net = sock_net(skb->sk); 3165 struct tipc_net *tn = net_generic(net, tipc_net_id); 3166 struct sock *sk = &tsk->sk; 3167 3168 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3169 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3170 if (!hdr) 3171 goto msg_cancel; 3172 3173 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3174 if (!attrs) 3175 goto genlmsg_cancel; 3176 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid)) 3177 goto attr_msg_cancel; 3178 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr)) 3179 goto attr_msg_cancel; 3180 3181 if (tipc_sk_connected(sk)) { 3182 err = __tipc_nl_add_sk_con(skb, tsk); 3183 if (err) 3184 goto attr_msg_cancel; 3185 } else if (!list_empty(&tsk->publications)) { 3186 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3187 goto attr_msg_cancel; 3188 } 3189 nla_nest_end(skb, attrs); 3190 genlmsg_end(skb, hdr); 3191 3192 return 0; 3193 3194 attr_msg_cancel: 3195 nla_nest_cancel(skb, attrs); 3196 genlmsg_cancel: 3197 genlmsg_cancel(skb, hdr); 3198 msg_cancel: 3199 return -EMSGSIZE; 3200 } 3201 3202 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3203 { 3204 int err; 3205 struct tipc_sock *tsk; 3206 const struct bucket_table *tbl; 3207 struct rhash_head *pos; 3208 struct net *net = sock_net(skb->sk); 3209 struct tipc_net *tn = net_generic(net, tipc_net_id); 3210 u32 tbl_id = cb->args[0]; 3211 u32 prev_portid = cb->args[1]; 3212 3213 rcu_read_lock(); 3214 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); 3215 for (; tbl_id < tbl->size; tbl_id++) { 3216 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { 3217 spin_lock_bh(&tsk->sk.sk_lock.slock); 3218 if (prev_portid && prev_portid != tsk->portid) { 3219 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3220 continue; 3221 } 3222 3223 err = __tipc_nl_add_sk(skb, cb, tsk); 3224 if (err) { 3225 prev_portid = tsk->portid; 3226 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3227 goto out; 3228 } 3229 prev_portid = 0; 3230 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3231 } 3232 } 3233 out: 3234 rcu_read_unlock(); 3235 cb->args[0] = tbl_id; 3236 cb->args[1] = prev_portid; 3237 3238 return skb->len; 3239 } 3240 3241 /* Caller should hold socket lock for the passed tipc socket. */ 3242 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3243 struct netlink_callback *cb, 3244 struct publication *publ) 3245 { 3246 void *hdr; 3247 struct nlattr *attrs; 3248 3249 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3250 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3251 if (!hdr) 3252 goto msg_cancel; 3253 3254 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 3255 if (!attrs) 3256 goto genlmsg_cancel; 3257 3258 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3259 goto attr_msg_cancel; 3260 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3261 goto attr_msg_cancel; 3262 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3263 goto attr_msg_cancel; 3264 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3265 goto attr_msg_cancel; 3266 3267 nla_nest_end(skb, attrs); 3268 genlmsg_end(skb, hdr); 3269 3270 return 0; 3271 3272 attr_msg_cancel: 3273 nla_nest_cancel(skb, attrs); 3274 genlmsg_cancel: 3275 genlmsg_cancel(skb, hdr); 3276 msg_cancel: 3277 return -EMSGSIZE; 3278 } 3279 3280 /* Caller should hold socket lock for the passed tipc socket. */ 3281 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3282 struct netlink_callback *cb, 3283 struct tipc_sock *tsk, u32 *last_publ) 3284 { 3285 int err; 3286 struct publication *p; 3287 3288 if (*last_publ) { 3289 list_for_each_entry(p, &tsk->publications, pport_list) { 3290 if (p->key == *last_publ) 3291 break; 3292 } 3293 if (p->key != *last_publ) { 3294 /* We never set seq or call nl_dump_check_consistent() 3295 * this means that setting prev_seq here will cause the 3296 * consistence check to fail in the netlink callback 3297 * handler. Resulting in the last NLMSG_DONE message 3298 * having the NLM_F_DUMP_INTR flag set. 3299 */ 3300 cb->prev_seq = 1; 3301 *last_publ = 0; 3302 return -EPIPE; 3303 } 3304 } else { 3305 p = list_first_entry(&tsk->publications, struct publication, 3306 pport_list); 3307 } 3308 3309 list_for_each_entry_from(p, &tsk->publications, pport_list) { 3310 err = __tipc_nl_add_sk_publ(skb, cb, p); 3311 if (err) { 3312 *last_publ = p->key; 3313 return err; 3314 } 3315 } 3316 *last_publ = 0; 3317 3318 return 0; 3319 } 3320 3321 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3322 { 3323 int err; 3324 u32 tsk_portid = cb->args[0]; 3325 u32 last_publ = cb->args[1]; 3326 u32 done = cb->args[2]; 3327 struct net *net = sock_net(skb->sk); 3328 struct tipc_sock *tsk; 3329 3330 if (!tsk_portid) { 3331 struct nlattr **attrs; 3332 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3333 3334 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3335 if (err) 3336 return err; 3337 3338 if (!attrs[TIPC_NLA_SOCK]) 3339 return -EINVAL; 3340 3341 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 3342 attrs[TIPC_NLA_SOCK], 3343 tipc_nl_sock_policy, NULL); 3344 if (err) 3345 return err; 3346 3347 if (!sock[TIPC_NLA_SOCK_REF]) 3348 return -EINVAL; 3349 3350 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3351 } 3352 3353 if (done) 3354 return 0; 3355 3356 tsk = tipc_sk_lookup(net, tsk_portid); 3357 if (!tsk) 3358 return -EINVAL; 3359 3360 lock_sock(&tsk->sk); 3361 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3362 if (!err) 3363 done = 1; 3364 release_sock(&tsk->sk); 3365 sock_put(&tsk->sk); 3366 3367 cb->args[0] = tsk_portid; 3368 cb->args[1] = last_publ; 3369 cb->args[2] = done; 3370 3371 return skb->len; 3372 } 3373