1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 52 #define TIPC_FWD_MSG 1 53 #define TIPC_MAX_PORT 0xffffffff 54 #define TIPC_MIN_PORT 1 55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 56 57 enum { 58 TIPC_LISTEN = TCP_LISTEN, 59 TIPC_ESTABLISHED = TCP_ESTABLISHED, 60 TIPC_OPEN = TCP_CLOSE, 61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 62 TIPC_CONNECTING = TCP_SYN_SENT, 63 }; 64 65 struct sockaddr_pair { 66 struct sockaddr_tipc sock; 67 struct sockaddr_tipc member; 68 }; 69 70 /** 71 * struct tipc_sock - TIPC socket structure 72 * @sk: socket - interacts with 'port' and with user via the socket API 73 * @conn_type: TIPC type used when connection was established 74 * @conn_instance: TIPC instance used when connection was established 75 * @published: non-zero if port has one or more associated names 76 * @max_pkt: maximum packet size "hint" used when building messages sent by port 77 * @portid: unique port identity in TIPC socket hash table 78 * @phdr: preformatted message header used when sending messages 79 * #cong_links: list of congested links 80 * @publications: list of publications for port 81 * @blocking_link: address of the congested link we are currently sleeping on 82 * @pub_count: total # of publications port has made during its lifetime 83 * @probing_state: 84 * @conn_timeout: the time we can wait for an unresponded setup request 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 86 * @cong_link_cnt: number of congested links 87 * @snt_unacked: # messages sent by socket, and not yet acked by peer 88 * @rcv_unacked: # messages read by user, but not yet acked back to peer 89 * @peer: 'connected' peer for dgram/rdm 90 * @node: hash table node 91 * @mc_method: cookie for use between socket and broadcast layer 92 * @rcu: rcu struct for tipc_sock 93 */ 94 struct tipc_sock { 95 struct sock sk; 96 u32 conn_type; 97 u32 conn_instance; 98 int published; 99 u32 max_pkt; 100 u32 portid; 101 struct tipc_msg phdr; 102 struct list_head cong_links; 103 struct list_head publications; 104 u32 pub_count; 105 uint conn_timeout; 106 atomic_t dupl_rcvcnt; 107 bool probe_unacked; 108 u16 cong_link_cnt; 109 u16 snt_unacked; 110 u16 snd_win; 111 u16 peer_caps; 112 u16 rcv_unacked; 113 u16 rcv_win; 114 struct sockaddr_tipc peer; 115 struct rhash_head node; 116 struct tipc_mc_method mc_method; 117 struct rcu_head rcu; 118 struct tipc_group *group; 119 bool group_is_open; 120 }; 121 122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 123 static void tipc_data_ready(struct sock *sk); 124 static void tipc_write_space(struct sock *sk); 125 static void tipc_sock_destruct(struct sock *sk); 126 static int tipc_release(struct socket *sock); 127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 128 bool kern); 129 static void tipc_sk_timeout(struct timer_list *t); 130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 131 struct tipc_name_seq const *seq); 132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 133 struct tipc_name_seq const *seq); 134 static int tipc_sk_leave(struct tipc_sock *tsk); 135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 136 static int tipc_sk_insert(struct tipc_sock *tsk); 137 static void tipc_sk_remove(struct tipc_sock *tsk); 138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 140 141 static const struct proto_ops packet_ops; 142 static const struct proto_ops stream_ops; 143 static const struct proto_ops msg_ops; 144 static struct proto tipc_proto; 145 static const struct rhashtable_params tsk_rht_params; 146 147 static u32 tsk_own_node(struct tipc_sock *tsk) 148 { 149 return msg_prevnode(&tsk->phdr); 150 } 151 152 static u32 tsk_peer_node(struct tipc_sock *tsk) 153 { 154 return msg_destnode(&tsk->phdr); 155 } 156 157 static u32 tsk_peer_port(struct tipc_sock *tsk) 158 { 159 return msg_destport(&tsk->phdr); 160 } 161 162 static bool tsk_unreliable(struct tipc_sock *tsk) 163 { 164 return msg_src_droppable(&tsk->phdr) != 0; 165 } 166 167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 168 { 169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 170 } 171 172 static bool tsk_unreturnable(struct tipc_sock *tsk) 173 { 174 return msg_dest_droppable(&tsk->phdr) != 0; 175 } 176 177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 178 { 179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 180 } 181 182 static int tsk_importance(struct tipc_sock *tsk) 183 { 184 return msg_importance(&tsk->phdr); 185 } 186 187 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 188 { 189 if (imp > TIPC_CRITICAL_IMPORTANCE) 190 return -EINVAL; 191 msg_set_importance(&tsk->phdr, (u32)imp); 192 return 0; 193 } 194 195 static struct tipc_sock *tipc_sk(const struct sock *sk) 196 { 197 return container_of(sk, struct tipc_sock, sk); 198 } 199 200 static bool tsk_conn_cong(struct tipc_sock *tsk) 201 { 202 return tsk->snt_unacked > tsk->snd_win; 203 } 204 205 static u16 tsk_blocks(int len) 206 { 207 return ((len / FLOWCTL_BLK_SZ) + 1); 208 } 209 210 /* tsk_blocks(): translate a buffer size in bytes to number of 211 * advertisable blocks, taking into account the ratio truesize(len)/len 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 213 */ 214 static u16 tsk_adv_blocks(int len) 215 { 216 return len / FLOWCTL_BLK_SZ / 4; 217 } 218 219 /* tsk_inc(): increment counter for sent or received data 220 * - If block based flow control is not supported by peer we 221 * fall back to message based ditto, incrementing the counter 222 */ 223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 224 { 225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 226 return ((msglen / FLOWCTL_BLK_SZ) + 1); 227 return 1; 228 } 229 230 /** 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue 232 * 233 * Caller must hold socket lock 234 */ 235 static void tsk_advance_rx_queue(struct sock *sk) 236 { 237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 238 } 239 240 /* tipc_sk_respond() : send response message back to sender 241 */ 242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 243 { 244 u32 selector; 245 u32 dnode; 246 u32 onode = tipc_own_addr(sock_net(sk)); 247 248 if (!tipc_msg_reverse(onode, &skb, err)) 249 return; 250 251 dnode = msg_destnode(buf_msg(skb)); 252 selector = msg_origport(buf_msg(skb)); 253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 254 } 255 256 /** 257 * tsk_rej_rx_queue - reject all buffers in socket receive queue 258 * 259 * Caller must hold socket lock 260 */ 261 static void tsk_rej_rx_queue(struct sock *sk) 262 { 263 struct sk_buff *skb; 264 265 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 267 } 268 269 static bool tipc_sk_connected(struct sock *sk) 270 { 271 return sk->sk_state == TIPC_ESTABLISHED; 272 } 273 274 /* tipc_sk_type_connectionless - check if the socket is datagram socket 275 * @sk: socket 276 * 277 * Returns true if connection less, false otherwise 278 */ 279 static bool tipc_sk_type_connectionless(struct sock *sk) 280 { 281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 282 } 283 284 /* tsk_peer_msg - verify if message was sent by connected port's peer 285 * 286 * Handles cases where the node's network address has changed from 287 * the default of <0.0.0> to its configured setting. 288 */ 289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 290 { 291 struct sock *sk = &tsk->sk; 292 u32 self = tipc_own_addr(sock_net(sk)); 293 u32 peer_port = tsk_peer_port(tsk); 294 u32 orig_node, peer_node; 295 296 if (unlikely(!tipc_sk_connected(sk))) 297 return false; 298 299 if (unlikely(msg_origport(msg) != peer_port)) 300 return false; 301 302 orig_node = msg_orignode(msg); 303 peer_node = tsk_peer_node(tsk); 304 305 if (likely(orig_node == peer_node)) 306 return true; 307 308 if (!orig_node && peer_node == self) 309 return true; 310 311 if (!peer_node && orig_node == self) 312 return true; 313 314 return false; 315 } 316 317 /* tipc_set_sk_state - set the sk_state of the socket 318 * @sk: socket 319 * 320 * Caller must hold socket lock 321 * 322 * Returns 0 on success, errno otherwise 323 */ 324 static int tipc_set_sk_state(struct sock *sk, int state) 325 { 326 int oldsk_state = sk->sk_state; 327 int res = -EINVAL; 328 329 switch (state) { 330 case TIPC_OPEN: 331 res = 0; 332 break; 333 case TIPC_LISTEN: 334 case TIPC_CONNECTING: 335 if (oldsk_state == TIPC_OPEN) 336 res = 0; 337 break; 338 case TIPC_ESTABLISHED: 339 if (oldsk_state == TIPC_CONNECTING || 340 oldsk_state == TIPC_OPEN) 341 res = 0; 342 break; 343 case TIPC_DISCONNECTING: 344 if (oldsk_state == TIPC_CONNECTING || 345 oldsk_state == TIPC_ESTABLISHED) 346 res = 0; 347 break; 348 } 349 350 if (!res) 351 sk->sk_state = state; 352 353 return res; 354 } 355 356 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 357 { 358 struct sock *sk = sock->sk; 359 int err = sock_error(sk); 360 int typ = sock->type; 361 362 if (err) 363 return err; 364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 365 if (sk->sk_state == TIPC_DISCONNECTING) 366 return -EPIPE; 367 else if (!tipc_sk_connected(sk)) 368 return -ENOTCONN; 369 } 370 if (!*timeout) 371 return -EAGAIN; 372 if (signal_pending(current)) 373 return sock_intr_errno(*timeout); 374 375 return 0; 376 } 377 378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 379 ({ \ 380 struct sock *sk_; \ 381 int rc_; \ 382 \ 383 while ((rc_ = !(condition_))) { \ 384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 385 sk_ = (sock_)->sk; \ 386 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 387 if (rc_) \ 388 break; \ 389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 390 release_sock(sk_); \ 391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 392 sched_annotate_sleep(); \ 393 lock_sock(sk_); \ 394 remove_wait_queue(sk_sleep(sk_), &wait_); \ 395 } \ 396 rc_; \ 397 }) 398 399 /** 400 * tipc_sk_create - create a TIPC socket 401 * @net: network namespace (must be default network) 402 * @sock: pre-allocated socket structure 403 * @protocol: protocol indicator (must be 0) 404 * @kern: caused by kernel or by userspace? 405 * 406 * This routine creates additional data structures used by the TIPC socket, 407 * initializes them, and links them together. 408 * 409 * Returns 0 on success, errno otherwise 410 */ 411 static int tipc_sk_create(struct net *net, struct socket *sock, 412 int protocol, int kern) 413 { 414 struct tipc_net *tn; 415 const struct proto_ops *ops; 416 struct sock *sk; 417 struct tipc_sock *tsk; 418 struct tipc_msg *msg; 419 420 /* Validate arguments */ 421 if (unlikely(protocol != 0)) 422 return -EPROTONOSUPPORT; 423 424 switch (sock->type) { 425 case SOCK_STREAM: 426 ops = &stream_ops; 427 break; 428 case SOCK_SEQPACKET: 429 ops = &packet_ops; 430 break; 431 case SOCK_DGRAM: 432 case SOCK_RDM: 433 ops = &msg_ops; 434 break; 435 default: 436 return -EPROTOTYPE; 437 } 438 439 /* Allocate socket's protocol area */ 440 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 441 if (sk == NULL) 442 return -ENOMEM; 443 444 tsk = tipc_sk(sk); 445 tsk->max_pkt = MAX_PKT_DEFAULT; 446 INIT_LIST_HEAD(&tsk->publications); 447 INIT_LIST_HEAD(&tsk->cong_links); 448 msg = &tsk->phdr; 449 tn = net_generic(sock_net(sk), tipc_net_id); 450 451 /* Finish initializing socket data structures */ 452 sock->ops = ops; 453 sock_init_data(sock, sk); 454 tipc_set_sk_state(sk, TIPC_OPEN); 455 if (tipc_sk_insert(tsk)) { 456 pr_warn("Socket create failed; port number exhausted\n"); 457 return -EINVAL; 458 } 459 460 /* Ensure tsk is visible before we read own_addr. */ 461 smp_mb(); 462 463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, 464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0); 465 466 msg_set_origport(msg, tsk->portid); 467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 468 sk->sk_shutdown = 0; 469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 470 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 471 sk->sk_data_ready = tipc_data_ready; 472 sk->sk_write_space = tipc_write_space; 473 sk->sk_destruct = tipc_sock_destruct; 474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 475 tsk->group_is_open = true; 476 atomic_set(&tsk->dupl_rcvcnt, 0); 477 478 /* Start out with safe limits until we receive an advertised window */ 479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 480 tsk->rcv_win = tsk->snd_win; 481 482 if (tipc_sk_type_connectionless(sk)) { 483 tsk_set_unreturnable(tsk, true); 484 if (sock->type == SOCK_DGRAM) 485 tsk_set_unreliable(tsk, true); 486 } 487 488 return 0; 489 } 490 491 static void tipc_sk_callback(struct rcu_head *head) 492 { 493 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 494 495 sock_put(&tsk->sk); 496 } 497 498 /* Caller should hold socket lock for the socket. */ 499 static void __tipc_shutdown(struct socket *sock, int error) 500 { 501 struct sock *sk = sock->sk; 502 struct tipc_sock *tsk = tipc_sk(sk); 503 struct net *net = sock_net(sk); 504 long timeout = CONN_TIMEOUT_DEFAULT; 505 u32 dnode = tsk_peer_node(tsk); 506 struct sk_buff *skb; 507 508 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 509 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 510 !tsk_conn_cong(tsk))); 511 512 /* Reject all unreceived messages, except on an active connection 513 * (which disconnects locally & sends a 'FIN+' to peer). 514 */ 515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 516 if (TIPC_SKB_CB(skb)->bytes_read) { 517 kfree_skb(skb); 518 continue; 519 } 520 if (!tipc_sk_type_connectionless(sk) && 521 sk->sk_state != TIPC_DISCONNECTING) { 522 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 523 tipc_node_remove_conn(net, dnode, tsk->portid); 524 } 525 tipc_sk_respond(sk, skb, error); 526 } 527 528 if (tipc_sk_type_connectionless(sk)) 529 return; 530 531 if (sk->sk_state != TIPC_DISCONNECTING) { 532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 534 tsk_own_node(tsk), tsk_peer_port(tsk), 535 tsk->portid, error); 536 if (skb) 537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 538 tipc_node_remove_conn(net, dnode, tsk->portid); 539 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 540 } 541 } 542 543 /** 544 * tipc_release - destroy a TIPC socket 545 * @sock: socket to destroy 546 * 547 * This routine cleans up any messages that are still queued on the socket. 548 * For DGRAM and RDM socket types, all queued messages are rejected. 549 * For SEQPACKET and STREAM socket types, the first message is rejected 550 * and any others are discarded. (If the first message on a STREAM socket 551 * is partially-read, it is discarded and the next one is rejected instead.) 552 * 553 * NOTE: Rejected messages are not necessarily returned to the sender! They 554 * are returned or discarded according to the "destination droppable" setting 555 * specified for the message by the sender. 556 * 557 * Returns 0 on success, errno otherwise 558 */ 559 static int tipc_release(struct socket *sock) 560 { 561 struct sock *sk = sock->sk; 562 struct tipc_sock *tsk; 563 564 /* 565 * Exit if socket isn't fully initialized (occurs when a failed accept() 566 * releases a pre-allocated child socket that was never used) 567 */ 568 if (sk == NULL) 569 return 0; 570 571 tsk = tipc_sk(sk); 572 lock_sock(sk); 573 574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 575 sk->sk_shutdown = SHUTDOWN_MASK; 576 tipc_sk_leave(tsk); 577 tipc_sk_withdraw(tsk, 0, NULL); 578 sk_stop_timer(sk, &sk->sk_timer); 579 tipc_sk_remove(tsk); 580 581 /* Reject any messages that accumulated in backlog queue */ 582 release_sock(sk); 583 tipc_dest_list_purge(&tsk->cong_links); 584 tsk->cong_link_cnt = 0; 585 call_rcu(&tsk->rcu, tipc_sk_callback); 586 sock->sk = NULL; 587 588 return 0; 589 } 590 591 /** 592 * tipc_bind - associate or disassocate TIPC name(s) with a socket 593 * @sock: socket structure 594 * @uaddr: socket address describing name(s) and desired operation 595 * @uaddr_len: size of socket address data structure 596 * 597 * Name and name sequence binding is indicated using a positive scope value; 598 * a negative scope value unbinds the specified name. Specifying no name 599 * (i.e. a socket address length of 0) unbinds all names from the socket. 600 * 601 * Returns 0 on success, errno otherwise 602 * 603 * NOTE: This routine doesn't need to take the socket lock since it doesn't 604 * access any non-constant socket information. 605 */ 606 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 607 int uaddr_len) 608 { 609 struct sock *sk = sock->sk; 610 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 611 struct tipc_sock *tsk = tipc_sk(sk); 612 int res = -EINVAL; 613 614 lock_sock(sk); 615 if (unlikely(!uaddr_len)) { 616 res = tipc_sk_withdraw(tsk, 0, NULL); 617 goto exit; 618 } 619 if (tsk->group) { 620 res = -EACCES; 621 goto exit; 622 } 623 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 624 res = -EINVAL; 625 goto exit; 626 } 627 if (addr->family != AF_TIPC) { 628 res = -EAFNOSUPPORT; 629 goto exit; 630 } 631 632 if (addr->addrtype == TIPC_ADDR_NAME) 633 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 634 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 635 res = -EAFNOSUPPORT; 636 goto exit; 637 } 638 639 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 640 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 641 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 642 res = -EACCES; 643 goto exit; 644 } 645 646 res = (addr->scope >= 0) ? 647 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 648 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 649 exit: 650 release_sock(sk); 651 return res; 652 } 653 654 /** 655 * tipc_getname - get port ID of socket or peer socket 656 * @sock: socket structure 657 * @uaddr: area for returned socket address 658 * @uaddr_len: area for returned length of socket address 659 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 660 * 661 * Returns 0 on success, errno otherwise 662 * 663 * NOTE: This routine doesn't need to take the socket lock since it only 664 * accesses socket information that is unchanging (or which changes in 665 * a completely predictable manner). 666 */ 667 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 668 int peer) 669 { 670 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 671 struct sock *sk = sock->sk; 672 struct tipc_sock *tsk = tipc_sk(sk); 673 674 memset(addr, 0, sizeof(*addr)); 675 if (peer) { 676 if ((!tipc_sk_connected(sk)) && 677 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 678 return -ENOTCONN; 679 addr->addr.id.ref = tsk_peer_port(tsk); 680 addr->addr.id.node = tsk_peer_node(tsk); 681 } else { 682 addr->addr.id.ref = tsk->portid; 683 addr->addr.id.node = tipc_own_addr(sock_net(sk)); 684 } 685 686 addr->addrtype = TIPC_ADDR_ID; 687 addr->family = AF_TIPC; 688 addr->scope = 0; 689 addr->addr.name.domain = 0; 690 691 return sizeof(*addr); 692 } 693 694 /** 695 * tipc_poll - read pollmask 696 * @file: file structure associated with the socket 697 * @sock: socket for which to calculate the poll bits 698 * 699 * Returns pollmask value 700 * 701 * COMMENTARY: 702 * It appears that the usual socket locking mechanisms are not useful here 703 * since the pollmask info is potentially out-of-date the moment this routine 704 * exits. TCP and other protocols seem to rely on higher level poll routines 705 * to handle any preventable race conditions, so TIPC will do the same ... 706 * 707 * IMPORTANT: The fact that a read or write operation is indicated does NOT 708 * imply that the operation will succeed, merely that it should be performed 709 * and will not block. 710 */ 711 static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events) 712 { 713 struct sock *sk = sock->sk; 714 struct tipc_sock *tsk = tipc_sk(sk); 715 __poll_t revents = 0; 716 717 if (sk->sk_shutdown & RCV_SHUTDOWN) 718 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 719 if (sk->sk_shutdown == SHUTDOWN_MASK) 720 revents |= EPOLLHUP; 721 722 switch (sk->sk_state) { 723 case TIPC_ESTABLISHED: 724 case TIPC_CONNECTING: 725 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 726 revents |= EPOLLOUT; 727 /* fall thru' */ 728 case TIPC_LISTEN: 729 if (!skb_queue_empty(&sk->sk_receive_queue)) 730 revents |= EPOLLIN | EPOLLRDNORM; 731 break; 732 case TIPC_OPEN: 733 if (tsk->group_is_open && !tsk->cong_link_cnt) 734 revents |= EPOLLOUT; 735 if (!tipc_sk_type_connectionless(sk)) 736 break; 737 if (skb_queue_empty(&sk->sk_receive_queue)) 738 break; 739 revents |= EPOLLIN | EPOLLRDNORM; 740 break; 741 case TIPC_DISCONNECTING: 742 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 743 break; 744 } 745 return revents; 746 } 747 748 /** 749 * tipc_sendmcast - send multicast message 750 * @sock: socket structure 751 * @seq: destination address 752 * @msg: message to send 753 * @dlen: length of data to send 754 * @timeout: timeout to wait for wakeup 755 * 756 * Called from function tipc_sendmsg(), which has done all sanity checks 757 * Returns the number of bytes sent on success, or errno 758 */ 759 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 760 struct msghdr *msg, size_t dlen, long timeout) 761 { 762 struct sock *sk = sock->sk; 763 struct tipc_sock *tsk = tipc_sk(sk); 764 struct tipc_msg *hdr = &tsk->phdr; 765 struct net *net = sock_net(sk); 766 int mtu = tipc_bcast_get_mtu(net); 767 struct tipc_mc_method *method = &tsk->mc_method; 768 struct sk_buff_head pkts; 769 struct tipc_nlist dsts; 770 int rc; 771 772 if (tsk->group) 773 return -EACCES; 774 775 /* Block or return if any destination link is congested */ 776 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 777 if (unlikely(rc)) 778 return rc; 779 780 /* Lookup destination nodes */ 781 tipc_nlist_init(&dsts, tipc_own_addr(net)); 782 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 783 seq->upper, &dsts); 784 if (!dsts.local && !dsts.remote) 785 return -EHOSTUNREACH; 786 787 /* Build message header */ 788 msg_set_type(hdr, TIPC_MCAST_MSG); 789 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 790 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 791 msg_set_destport(hdr, 0); 792 msg_set_destnode(hdr, 0); 793 msg_set_nametype(hdr, seq->type); 794 msg_set_namelower(hdr, seq->lower); 795 msg_set_nameupper(hdr, seq->upper); 796 797 /* Build message as chain of buffers */ 798 skb_queue_head_init(&pkts); 799 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 800 801 /* Send message if build was successful */ 802 if (unlikely(rc == dlen)) 803 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 804 &tsk->cong_link_cnt); 805 806 tipc_nlist_purge(&dsts); 807 808 return rc ? rc : dlen; 809 } 810 811 /** 812 * tipc_send_group_msg - send a message to a member in the group 813 * @net: network namespace 814 * @m: message to send 815 * @mb: group member 816 * @dnode: destination node 817 * @dport: destination port 818 * @dlen: total length of message data 819 */ 820 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 821 struct msghdr *m, struct tipc_member *mb, 822 u32 dnode, u32 dport, int dlen) 823 { 824 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 825 struct tipc_mc_method *method = &tsk->mc_method; 826 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 827 struct tipc_msg *hdr = &tsk->phdr; 828 struct sk_buff_head pkts; 829 int mtu, rc; 830 831 /* Complete message header */ 832 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 833 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 834 msg_set_destport(hdr, dport); 835 msg_set_destnode(hdr, dnode); 836 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 837 838 /* Build message as chain of buffers */ 839 skb_queue_head_init(&pkts); 840 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 841 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 842 if (unlikely(rc != dlen)) 843 return rc; 844 845 /* Send message */ 846 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 847 if (unlikely(rc == -ELINKCONG)) { 848 tipc_dest_push(&tsk->cong_links, dnode, 0); 849 tsk->cong_link_cnt++; 850 } 851 852 /* Update send window */ 853 tipc_group_update_member(mb, blks); 854 855 /* A broadcast sent within next EXPIRE period must follow same path */ 856 method->rcast = true; 857 method->mandatory = true; 858 return dlen; 859 } 860 861 /** 862 * tipc_send_group_unicast - send message to a member in the group 863 * @sock: socket structure 864 * @m: message to send 865 * @dlen: total length of message data 866 * @timeout: timeout to wait for wakeup 867 * 868 * Called from function tipc_sendmsg(), which has done all sanity checks 869 * Returns the number of bytes sent on success, or errno 870 */ 871 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 872 int dlen, long timeout) 873 { 874 struct sock *sk = sock->sk; 875 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 876 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 877 struct tipc_sock *tsk = tipc_sk(sk); 878 struct tipc_group *grp = tsk->group; 879 struct net *net = sock_net(sk); 880 struct tipc_member *mb = NULL; 881 u32 node, port; 882 int rc; 883 884 node = dest->addr.id.node; 885 port = dest->addr.id.ref; 886 if (!port && !node) 887 return -EHOSTUNREACH; 888 889 /* Block or return if destination link or member is congested */ 890 rc = tipc_wait_for_cond(sock, &timeout, 891 !tipc_dest_find(&tsk->cong_links, node, 0) && 892 !tipc_group_cong(grp, node, port, blks, &mb)); 893 if (unlikely(rc)) 894 return rc; 895 896 if (unlikely(!mb)) 897 return -EHOSTUNREACH; 898 899 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 900 901 return rc ? rc : dlen; 902 } 903 904 /** 905 * tipc_send_group_anycast - send message to any member with given identity 906 * @sock: socket structure 907 * @m: message to send 908 * @dlen: total length of message data 909 * @timeout: timeout to wait for wakeup 910 * 911 * Called from function tipc_sendmsg(), which has done all sanity checks 912 * Returns the number of bytes sent on success, or errno 913 */ 914 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 915 int dlen, long timeout) 916 { 917 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 918 struct sock *sk = sock->sk; 919 struct tipc_sock *tsk = tipc_sk(sk); 920 struct list_head *cong_links = &tsk->cong_links; 921 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 922 struct tipc_group *grp = tsk->group; 923 struct tipc_msg *hdr = &tsk->phdr; 924 struct tipc_member *first = NULL; 925 struct tipc_member *mbr = NULL; 926 struct net *net = sock_net(sk); 927 u32 node, port, exclude; 928 struct list_head dsts; 929 u32 type, inst, scope; 930 int lookups = 0; 931 int dstcnt, rc; 932 bool cong; 933 934 INIT_LIST_HEAD(&dsts); 935 936 type = msg_nametype(hdr); 937 inst = dest->addr.name.name.instance; 938 scope = msg_lookup_scope(hdr); 939 exclude = tipc_group_exclude(grp); 940 941 while (++lookups < 4) { 942 first = NULL; 943 944 /* Look for a non-congested destination member, if any */ 945 while (1) { 946 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 947 &dstcnt, exclude, false)) 948 return -EHOSTUNREACH; 949 tipc_dest_pop(&dsts, &node, &port); 950 cong = tipc_group_cong(grp, node, port, blks, &mbr); 951 if (!cong) 952 break; 953 if (mbr == first) 954 break; 955 if (!first) 956 first = mbr; 957 } 958 959 /* Start over if destination was not in member list */ 960 if (unlikely(!mbr)) 961 continue; 962 963 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 964 break; 965 966 /* Block or return if destination link or member is congested */ 967 rc = tipc_wait_for_cond(sock, &timeout, 968 !tipc_dest_find(cong_links, node, 0) && 969 !tipc_group_cong(grp, node, port, 970 blks, &mbr)); 971 if (unlikely(rc)) 972 return rc; 973 974 /* Send, unless destination disappeared while waiting */ 975 if (likely(mbr)) 976 break; 977 } 978 979 if (unlikely(lookups >= 4)) 980 return -EHOSTUNREACH; 981 982 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 983 984 return rc ? rc : dlen; 985 } 986 987 /** 988 * tipc_send_group_bcast - send message to all members in communication group 989 * @sk: socket structure 990 * @m: message to send 991 * @dlen: total length of message data 992 * @timeout: timeout to wait for wakeup 993 * 994 * Called from function tipc_sendmsg(), which has done all sanity checks 995 * Returns the number of bytes sent on success, or errno 996 */ 997 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 998 int dlen, long timeout) 999 { 1000 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1001 struct sock *sk = sock->sk; 1002 struct net *net = sock_net(sk); 1003 struct tipc_sock *tsk = tipc_sk(sk); 1004 struct tipc_group *grp = tsk->group; 1005 struct tipc_nlist *dsts = tipc_group_dests(grp); 1006 struct tipc_mc_method *method = &tsk->mc_method; 1007 bool ack = method->mandatory && method->rcast; 1008 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1009 struct tipc_msg *hdr = &tsk->phdr; 1010 int mtu = tipc_bcast_get_mtu(net); 1011 struct sk_buff_head pkts; 1012 int rc = -EHOSTUNREACH; 1013 1014 if (!dsts->local && !dsts->remote) 1015 return -EHOSTUNREACH; 1016 1017 /* Block or return if any destination link or member is congested */ 1018 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1019 !tipc_group_bc_cong(grp, blks)); 1020 if (unlikely(rc)) 1021 return rc; 1022 1023 /* Complete message header */ 1024 if (dest) { 1025 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1026 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1027 } else { 1028 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1029 msg_set_nameinst(hdr, 0); 1030 } 1031 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1032 msg_set_destport(hdr, 0); 1033 msg_set_destnode(hdr, 0); 1034 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1035 1036 /* Avoid getting stuck with repeated forced replicasts */ 1037 msg_set_grp_bc_ack_req(hdr, ack); 1038 1039 /* Build message as chain of buffers */ 1040 skb_queue_head_init(&pkts); 1041 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1042 if (unlikely(rc != dlen)) 1043 return rc; 1044 1045 /* Send message */ 1046 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1047 if (unlikely(rc)) 1048 return rc; 1049 1050 /* Update broadcast sequence number and send windows */ 1051 tipc_group_update_bc_members(tsk->group, blks, ack); 1052 1053 /* Broadcast link is now free to choose method for next broadcast */ 1054 method->mandatory = false; 1055 method->expires = jiffies; 1056 1057 return dlen; 1058 } 1059 1060 /** 1061 * tipc_send_group_mcast - send message to all members with given identity 1062 * @sock: socket structure 1063 * @m: message to send 1064 * @dlen: total length of message data 1065 * @timeout: timeout to wait for wakeup 1066 * 1067 * Called from function tipc_sendmsg(), which has done all sanity checks 1068 * Returns the number of bytes sent on success, or errno 1069 */ 1070 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1071 int dlen, long timeout) 1072 { 1073 struct sock *sk = sock->sk; 1074 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1075 struct tipc_sock *tsk = tipc_sk(sk); 1076 struct tipc_group *grp = tsk->group; 1077 struct tipc_msg *hdr = &tsk->phdr; 1078 struct net *net = sock_net(sk); 1079 u32 type, inst, scope, exclude; 1080 struct list_head dsts; 1081 u32 dstcnt; 1082 1083 INIT_LIST_HEAD(&dsts); 1084 1085 type = msg_nametype(hdr); 1086 inst = dest->addr.name.name.instance; 1087 scope = msg_lookup_scope(hdr); 1088 exclude = tipc_group_exclude(grp); 1089 1090 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1091 &dstcnt, exclude, true)) 1092 return -EHOSTUNREACH; 1093 1094 if (dstcnt == 1) { 1095 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1096 return tipc_send_group_unicast(sock, m, dlen, timeout); 1097 } 1098 1099 tipc_dest_list_purge(&dsts); 1100 return tipc_send_group_bcast(sock, m, dlen, timeout); 1101 } 1102 1103 /** 1104 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1105 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1106 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1107 * 1108 * Multi-threaded: parallel calls with reference to same queues may occur 1109 */ 1110 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1111 struct sk_buff_head *inputq) 1112 { 1113 u32 self = tipc_own_addr(net); 1114 u32 type, lower, upper, scope; 1115 struct sk_buff *skb, *_skb; 1116 u32 portid, oport, onode; 1117 struct sk_buff_head tmpq; 1118 struct list_head dports; 1119 struct tipc_msg *hdr; 1120 int user, mtyp, hlen; 1121 bool exact; 1122 1123 __skb_queue_head_init(&tmpq); 1124 INIT_LIST_HEAD(&dports); 1125 1126 skb = tipc_skb_peek(arrvq, &inputq->lock); 1127 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1128 hdr = buf_msg(skb); 1129 user = msg_user(hdr); 1130 mtyp = msg_type(hdr); 1131 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1132 oport = msg_origport(hdr); 1133 onode = msg_orignode(hdr); 1134 type = msg_nametype(hdr); 1135 1136 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1137 spin_lock_bh(&inputq->lock); 1138 if (skb_peek(arrvq) == skb) { 1139 __skb_dequeue(arrvq); 1140 __skb_queue_tail(inputq, skb); 1141 } 1142 kfree_skb(skb); 1143 spin_unlock_bh(&inputq->lock); 1144 continue; 1145 } 1146 1147 /* Group messages require exact scope match */ 1148 if (msg_in_group(hdr)) { 1149 lower = 0; 1150 upper = ~0; 1151 scope = msg_lookup_scope(hdr); 1152 exact = true; 1153 } else { 1154 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1155 if (onode == self) 1156 scope = TIPC_NODE_SCOPE; 1157 else 1158 scope = TIPC_CLUSTER_SCOPE; 1159 exact = false; 1160 lower = msg_namelower(hdr); 1161 upper = msg_nameupper(hdr); 1162 } 1163 1164 /* Create destination port list: */ 1165 tipc_nametbl_mc_lookup(net, type, lower, upper, 1166 scope, exact, &dports); 1167 1168 /* Clone message per destination */ 1169 while (tipc_dest_pop(&dports, NULL, &portid)) { 1170 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1171 if (_skb) { 1172 msg_set_destport(buf_msg(_skb), portid); 1173 __skb_queue_tail(&tmpq, _skb); 1174 continue; 1175 } 1176 pr_warn("Failed to clone mcast rcv buffer\n"); 1177 } 1178 /* Append to inputq if not already done by other thread */ 1179 spin_lock_bh(&inputq->lock); 1180 if (skb_peek(arrvq) == skb) { 1181 skb_queue_splice_tail_init(&tmpq, inputq); 1182 kfree_skb(__skb_dequeue(arrvq)); 1183 } 1184 spin_unlock_bh(&inputq->lock); 1185 __skb_queue_purge(&tmpq); 1186 kfree_skb(skb); 1187 } 1188 tipc_sk_rcv(net, inputq); 1189 } 1190 1191 /** 1192 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1193 * @tsk: receiving socket 1194 * @skb: pointer to message buffer. 1195 */ 1196 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1197 struct sk_buff_head *xmitq) 1198 { 1199 struct tipc_msg *hdr = buf_msg(skb); 1200 u32 onode = tsk_own_node(tsk); 1201 struct sock *sk = &tsk->sk; 1202 int mtyp = msg_type(hdr); 1203 bool conn_cong; 1204 1205 /* Ignore if connection cannot be validated: */ 1206 if (!tsk_peer_msg(tsk, hdr)) 1207 goto exit; 1208 1209 if (unlikely(msg_errcode(hdr))) { 1210 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1211 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1212 tsk_peer_port(tsk)); 1213 sk->sk_state_change(sk); 1214 goto exit; 1215 } 1216 1217 tsk->probe_unacked = false; 1218 1219 if (mtyp == CONN_PROBE) { 1220 msg_set_type(hdr, CONN_PROBE_REPLY); 1221 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1222 __skb_queue_tail(xmitq, skb); 1223 return; 1224 } else if (mtyp == CONN_ACK) { 1225 conn_cong = tsk_conn_cong(tsk); 1226 tsk->snt_unacked -= msg_conn_ack(hdr); 1227 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1228 tsk->snd_win = msg_adv_win(hdr); 1229 if (conn_cong) 1230 sk->sk_write_space(sk); 1231 } else if (mtyp != CONN_PROBE_REPLY) { 1232 pr_warn("Received unknown CONN_PROTO msg\n"); 1233 } 1234 exit: 1235 kfree_skb(skb); 1236 } 1237 1238 /** 1239 * tipc_sendmsg - send message in connectionless manner 1240 * @sock: socket structure 1241 * @m: message to send 1242 * @dsz: amount of user data to be sent 1243 * 1244 * Message must have an destination specified explicitly. 1245 * Used for SOCK_RDM and SOCK_DGRAM messages, 1246 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1247 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1248 * 1249 * Returns the number of bytes sent on success, or errno otherwise 1250 */ 1251 static int tipc_sendmsg(struct socket *sock, 1252 struct msghdr *m, size_t dsz) 1253 { 1254 struct sock *sk = sock->sk; 1255 int ret; 1256 1257 lock_sock(sk); 1258 ret = __tipc_sendmsg(sock, m, dsz); 1259 release_sock(sk); 1260 1261 return ret; 1262 } 1263 1264 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1265 { 1266 struct sock *sk = sock->sk; 1267 struct net *net = sock_net(sk); 1268 struct tipc_sock *tsk = tipc_sk(sk); 1269 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1270 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1271 struct list_head *clinks = &tsk->cong_links; 1272 bool syn = !tipc_sk_type_connectionless(sk); 1273 struct tipc_group *grp = tsk->group; 1274 struct tipc_msg *hdr = &tsk->phdr; 1275 struct tipc_name_seq *seq; 1276 struct sk_buff_head pkts; 1277 u32 dport, dnode = 0; 1278 u32 type, inst; 1279 int mtu, rc; 1280 1281 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1282 return -EMSGSIZE; 1283 1284 if (likely(dest)) { 1285 if (unlikely(m->msg_namelen < sizeof(*dest))) 1286 return -EINVAL; 1287 if (unlikely(dest->family != AF_TIPC)) 1288 return -EINVAL; 1289 } 1290 1291 if (grp) { 1292 if (!dest) 1293 return tipc_send_group_bcast(sock, m, dlen, timeout); 1294 if (dest->addrtype == TIPC_ADDR_NAME) 1295 return tipc_send_group_anycast(sock, m, dlen, timeout); 1296 if (dest->addrtype == TIPC_ADDR_ID) 1297 return tipc_send_group_unicast(sock, m, dlen, timeout); 1298 if (dest->addrtype == TIPC_ADDR_MCAST) 1299 return tipc_send_group_mcast(sock, m, dlen, timeout); 1300 return -EINVAL; 1301 } 1302 1303 if (unlikely(!dest)) { 1304 dest = &tsk->peer; 1305 if (!syn || dest->family != AF_TIPC) 1306 return -EDESTADDRREQ; 1307 } 1308 1309 if (unlikely(syn)) { 1310 if (sk->sk_state == TIPC_LISTEN) 1311 return -EPIPE; 1312 if (sk->sk_state != TIPC_OPEN) 1313 return -EISCONN; 1314 if (tsk->published) 1315 return -EOPNOTSUPP; 1316 if (dest->addrtype == TIPC_ADDR_NAME) { 1317 tsk->conn_type = dest->addr.name.name.type; 1318 tsk->conn_instance = dest->addr.name.name.instance; 1319 } 1320 } 1321 1322 seq = &dest->addr.nameseq; 1323 if (dest->addrtype == TIPC_ADDR_MCAST) 1324 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1325 1326 if (dest->addrtype == TIPC_ADDR_NAME) { 1327 type = dest->addr.name.name.type; 1328 inst = dest->addr.name.name.instance; 1329 dnode = dest->addr.name.domain; 1330 msg_set_type(hdr, TIPC_NAMED_MSG); 1331 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1332 msg_set_nametype(hdr, type); 1333 msg_set_nameinst(hdr, inst); 1334 msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); 1335 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1336 msg_set_destnode(hdr, dnode); 1337 msg_set_destport(hdr, dport); 1338 if (unlikely(!dport && !dnode)) 1339 return -EHOSTUNREACH; 1340 } else if (dest->addrtype == TIPC_ADDR_ID) { 1341 dnode = dest->addr.id.node; 1342 msg_set_type(hdr, TIPC_DIRECT_MSG); 1343 msg_set_lookup_scope(hdr, 0); 1344 msg_set_destnode(hdr, dnode); 1345 msg_set_destport(hdr, dest->addr.id.ref); 1346 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1347 } else { 1348 return -EINVAL; 1349 } 1350 1351 /* Block or return if destination link is congested */ 1352 rc = tipc_wait_for_cond(sock, &timeout, 1353 !tipc_dest_find(clinks, dnode, 0)); 1354 if (unlikely(rc)) 1355 return rc; 1356 1357 skb_queue_head_init(&pkts); 1358 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1359 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1360 if (unlikely(rc != dlen)) 1361 return rc; 1362 1363 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1364 if (unlikely(rc == -ELINKCONG)) { 1365 tipc_dest_push(clinks, dnode, 0); 1366 tsk->cong_link_cnt++; 1367 rc = 0; 1368 } 1369 1370 if (unlikely(syn && !rc)) 1371 tipc_set_sk_state(sk, TIPC_CONNECTING); 1372 1373 return rc ? rc : dlen; 1374 } 1375 1376 /** 1377 * tipc_sendstream - send stream-oriented data 1378 * @sock: socket structure 1379 * @m: data to send 1380 * @dsz: total length of data to be transmitted 1381 * 1382 * Used for SOCK_STREAM data. 1383 * 1384 * Returns the number of bytes sent on success (or partial success), 1385 * or errno if no data sent 1386 */ 1387 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1388 { 1389 struct sock *sk = sock->sk; 1390 int ret; 1391 1392 lock_sock(sk); 1393 ret = __tipc_sendstream(sock, m, dsz); 1394 release_sock(sk); 1395 1396 return ret; 1397 } 1398 1399 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1400 { 1401 struct sock *sk = sock->sk; 1402 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1403 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1404 struct tipc_sock *tsk = tipc_sk(sk); 1405 struct tipc_msg *hdr = &tsk->phdr; 1406 struct net *net = sock_net(sk); 1407 struct sk_buff_head pkts; 1408 u32 dnode = tsk_peer_node(tsk); 1409 int send, sent = 0; 1410 int rc = 0; 1411 1412 skb_queue_head_init(&pkts); 1413 1414 if (unlikely(dlen > INT_MAX)) 1415 return -EMSGSIZE; 1416 1417 /* Handle implicit connection setup */ 1418 if (unlikely(dest)) { 1419 rc = __tipc_sendmsg(sock, m, dlen); 1420 if (dlen && (dlen == rc)) 1421 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1422 return rc; 1423 } 1424 1425 do { 1426 rc = tipc_wait_for_cond(sock, &timeout, 1427 (!tsk->cong_link_cnt && 1428 !tsk_conn_cong(tsk) && 1429 tipc_sk_connected(sk))); 1430 if (unlikely(rc)) 1431 break; 1432 1433 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1434 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1435 if (unlikely(rc != send)) 1436 break; 1437 1438 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1439 if (unlikely(rc == -ELINKCONG)) { 1440 tsk->cong_link_cnt = 1; 1441 rc = 0; 1442 } 1443 if (likely(!rc)) { 1444 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1445 sent += send; 1446 } 1447 } while (sent < dlen && !rc); 1448 1449 return sent ? sent : rc; 1450 } 1451 1452 /** 1453 * tipc_send_packet - send a connection-oriented message 1454 * @sock: socket structure 1455 * @m: message to send 1456 * @dsz: length of data to be transmitted 1457 * 1458 * Used for SOCK_SEQPACKET messages. 1459 * 1460 * Returns the number of bytes sent on success, or errno otherwise 1461 */ 1462 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1463 { 1464 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1465 return -EMSGSIZE; 1466 1467 return tipc_sendstream(sock, m, dsz); 1468 } 1469 1470 /* tipc_sk_finish_conn - complete the setup of a connection 1471 */ 1472 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1473 u32 peer_node) 1474 { 1475 struct sock *sk = &tsk->sk; 1476 struct net *net = sock_net(sk); 1477 struct tipc_msg *msg = &tsk->phdr; 1478 1479 msg_set_destnode(msg, peer_node); 1480 msg_set_destport(msg, peer_port); 1481 msg_set_type(msg, TIPC_CONN_MSG); 1482 msg_set_lookup_scope(msg, 0); 1483 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1484 1485 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1486 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1487 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1488 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1489 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1490 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1491 return; 1492 1493 /* Fall back to message based flow control */ 1494 tsk->rcv_win = FLOWCTL_MSG_WIN; 1495 tsk->snd_win = FLOWCTL_MSG_WIN; 1496 } 1497 1498 /** 1499 * tipc_sk_set_orig_addr - capture sender's address for received message 1500 * @m: descriptor for message info 1501 * @hdr: received message header 1502 * 1503 * Note: Address is not captured if not requested by receiver. 1504 */ 1505 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1506 { 1507 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1508 struct tipc_msg *hdr = buf_msg(skb); 1509 1510 if (!srcaddr) 1511 return; 1512 1513 srcaddr->sock.family = AF_TIPC; 1514 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1515 srcaddr->sock.scope = 0; 1516 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1517 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1518 srcaddr->sock.addr.name.domain = 0; 1519 m->msg_namelen = sizeof(struct sockaddr_tipc); 1520 1521 if (!msg_in_group(hdr)) 1522 return; 1523 1524 /* Group message users may also want to know sending member's id */ 1525 srcaddr->member.family = AF_TIPC; 1526 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1527 srcaddr->member.scope = 0; 1528 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1529 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1530 srcaddr->member.addr.name.domain = 0; 1531 m->msg_namelen = sizeof(*srcaddr); 1532 } 1533 1534 /** 1535 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1536 * @m: descriptor for message info 1537 * @msg: received message header 1538 * @tsk: TIPC port associated with message 1539 * 1540 * Note: Ancillary data is not captured if not requested by receiver. 1541 * 1542 * Returns 0 if successful, otherwise errno 1543 */ 1544 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1545 struct tipc_sock *tsk) 1546 { 1547 u32 anc_data[3]; 1548 u32 err; 1549 u32 dest_type; 1550 int has_name; 1551 int res; 1552 1553 if (likely(m->msg_controllen == 0)) 1554 return 0; 1555 1556 /* Optionally capture errored message object(s) */ 1557 err = msg ? msg_errcode(msg) : 0; 1558 if (unlikely(err)) { 1559 anc_data[0] = err; 1560 anc_data[1] = msg_data_sz(msg); 1561 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1562 if (res) 1563 return res; 1564 if (anc_data[1]) { 1565 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1566 msg_data(msg)); 1567 if (res) 1568 return res; 1569 } 1570 } 1571 1572 /* Optionally capture message destination object */ 1573 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1574 switch (dest_type) { 1575 case TIPC_NAMED_MSG: 1576 has_name = 1; 1577 anc_data[0] = msg_nametype(msg); 1578 anc_data[1] = msg_namelower(msg); 1579 anc_data[2] = msg_namelower(msg); 1580 break; 1581 case TIPC_MCAST_MSG: 1582 has_name = 1; 1583 anc_data[0] = msg_nametype(msg); 1584 anc_data[1] = msg_namelower(msg); 1585 anc_data[2] = msg_nameupper(msg); 1586 break; 1587 case TIPC_CONN_MSG: 1588 has_name = (tsk->conn_type != 0); 1589 anc_data[0] = tsk->conn_type; 1590 anc_data[1] = tsk->conn_instance; 1591 anc_data[2] = tsk->conn_instance; 1592 break; 1593 default: 1594 has_name = 0; 1595 } 1596 if (has_name) { 1597 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1598 if (res) 1599 return res; 1600 } 1601 1602 return 0; 1603 } 1604 1605 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1606 { 1607 struct sock *sk = &tsk->sk; 1608 struct net *net = sock_net(sk); 1609 struct sk_buff *skb = NULL; 1610 struct tipc_msg *msg; 1611 u32 peer_port = tsk_peer_port(tsk); 1612 u32 dnode = tsk_peer_node(tsk); 1613 1614 if (!tipc_sk_connected(sk)) 1615 return; 1616 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1617 dnode, tsk_own_node(tsk), peer_port, 1618 tsk->portid, TIPC_OK); 1619 if (!skb) 1620 return; 1621 msg = buf_msg(skb); 1622 msg_set_conn_ack(msg, tsk->rcv_unacked); 1623 tsk->rcv_unacked = 0; 1624 1625 /* Adjust to and advertize the correct window limit */ 1626 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1627 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1628 msg_set_adv_win(msg, tsk->rcv_win); 1629 } 1630 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1631 } 1632 1633 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1634 { 1635 struct sock *sk = sock->sk; 1636 DEFINE_WAIT(wait); 1637 long timeo = *timeop; 1638 int err = sock_error(sk); 1639 1640 if (err) 1641 return err; 1642 1643 for (;;) { 1644 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1645 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1646 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1647 err = -ENOTCONN; 1648 break; 1649 } 1650 release_sock(sk); 1651 timeo = schedule_timeout(timeo); 1652 lock_sock(sk); 1653 } 1654 err = 0; 1655 if (!skb_queue_empty(&sk->sk_receive_queue)) 1656 break; 1657 err = -EAGAIN; 1658 if (!timeo) 1659 break; 1660 err = sock_intr_errno(timeo); 1661 if (signal_pending(current)) 1662 break; 1663 1664 err = sock_error(sk); 1665 if (err) 1666 break; 1667 } 1668 finish_wait(sk_sleep(sk), &wait); 1669 *timeop = timeo; 1670 return err; 1671 } 1672 1673 /** 1674 * tipc_recvmsg - receive packet-oriented message 1675 * @m: descriptor for message info 1676 * @buflen: length of user buffer area 1677 * @flags: receive flags 1678 * 1679 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1680 * If the complete message doesn't fit in user area, truncate it. 1681 * 1682 * Returns size of returned message data, errno otherwise 1683 */ 1684 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1685 size_t buflen, int flags) 1686 { 1687 struct sock *sk = sock->sk; 1688 bool connected = !tipc_sk_type_connectionless(sk); 1689 struct tipc_sock *tsk = tipc_sk(sk); 1690 int rc, err, hlen, dlen, copy; 1691 struct sk_buff_head xmitq; 1692 struct tipc_msg *hdr; 1693 struct sk_buff *skb; 1694 bool grp_evt; 1695 long timeout; 1696 1697 /* Catch invalid receive requests */ 1698 if (unlikely(!buflen)) 1699 return -EINVAL; 1700 1701 lock_sock(sk); 1702 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1703 rc = -ENOTCONN; 1704 goto exit; 1705 } 1706 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1707 1708 /* Step rcv queue to first msg with data or error; wait if necessary */ 1709 do { 1710 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1711 if (unlikely(rc)) 1712 goto exit; 1713 skb = skb_peek(&sk->sk_receive_queue); 1714 hdr = buf_msg(skb); 1715 dlen = msg_data_sz(hdr); 1716 hlen = msg_hdr_sz(hdr); 1717 err = msg_errcode(hdr); 1718 grp_evt = msg_is_grp_evt(hdr); 1719 if (likely(dlen || err)) 1720 break; 1721 tsk_advance_rx_queue(sk); 1722 } while (1); 1723 1724 /* Collect msg meta data, including error code and rejected data */ 1725 tipc_sk_set_orig_addr(m, skb); 1726 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1727 if (unlikely(rc)) 1728 goto exit; 1729 1730 /* Capture data if non-error msg, otherwise just set return value */ 1731 if (likely(!err)) { 1732 copy = min_t(int, dlen, buflen); 1733 if (unlikely(copy != dlen)) 1734 m->msg_flags |= MSG_TRUNC; 1735 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1736 } else { 1737 copy = 0; 1738 rc = 0; 1739 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1740 rc = -ECONNRESET; 1741 } 1742 if (unlikely(rc)) 1743 goto exit; 1744 1745 /* Mark message as group event if applicable */ 1746 if (unlikely(grp_evt)) { 1747 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1748 m->msg_flags |= MSG_EOR; 1749 m->msg_flags |= MSG_OOB; 1750 copy = 0; 1751 } 1752 1753 /* Caption of data or error code/rejected data was successful */ 1754 if (unlikely(flags & MSG_PEEK)) 1755 goto exit; 1756 1757 /* Send group flow control advertisement when applicable */ 1758 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1759 skb_queue_head_init(&xmitq); 1760 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1761 msg_orignode(hdr), msg_origport(hdr), 1762 &xmitq); 1763 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1764 } 1765 1766 tsk_advance_rx_queue(sk); 1767 1768 if (likely(!connected)) 1769 goto exit; 1770 1771 /* Send connection flow control advertisement when applicable */ 1772 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1773 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1774 tipc_sk_send_ack(tsk); 1775 exit: 1776 release_sock(sk); 1777 return rc ? rc : copy; 1778 } 1779 1780 /** 1781 * tipc_recvstream - receive stream-oriented data 1782 * @m: descriptor for message info 1783 * @buflen: total size of user buffer area 1784 * @flags: receive flags 1785 * 1786 * Used for SOCK_STREAM messages only. If not enough data is available 1787 * will optionally wait for more; never truncates data. 1788 * 1789 * Returns size of returned message data, errno otherwise 1790 */ 1791 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1792 size_t buflen, int flags) 1793 { 1794 struct sock *sk = sock->sk; 1795 struct tipc_sock *tsk = tipc_sk(sk); 1796 struct sk_buff *skb; 1797 struct tipc_msg *hdr; 1798 struct tipc_skb_cb *skb_cb; 1799 bool peek = flags & MSG_PEEK; 1800 int offset, required, copy, copied = 0; 1801 int hlen, dlen, err, rc; 1802 long timeout; 1803 1804 /* Catch invalid receive attempts */ 1805 if (unlikely(!buflen)) 1806 return -EINVAL; 1807 1808 lock_sock(sk); 1809 1810 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1811 rc = -ENOTCONN; 1812 goto exit; 1813 } 1814 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1815 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1816 1817 do { 1818 /* Look at first msg in receive queue; wait if necessary */ 1819 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1820 if (unlikely(rc)) 1821 break; 1822 skb = skb_peek(&sk->sk_receive_queue); 1823 skb_cb = TIPC_SKB_CB(skb); 1824 hdr = buf_msg(skb); 1825 dlen = msg_data_sz(hdr); 1826 hlen = msg_hdr_sz(hdr); 1827 err = msg_errcode(hdr); 1828 1829 /* Discard any empty non-errored (SYN-) message */ 1830 if (unlikely(!dlen && !err)) { 1831 tsk_advance_rx_queue(sk); 1832 continue; 1833 } 1834 1835 /* Collect msg meta data, incl. error code and rejected data */ 1836 if (!copied) { 1837 tipc_sk_set_orig_addr(m, skb); 1838 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1839 if (rc) 1840 break; 1841 } 1842 1843 /* Copy data if msg ok, otherwise return error/partial data */ 1844 if (likely(!err)) { 1845 offset = skb_cb->bytes_read; 1846 copy = min_t(int, dlen - offset, buflen - copied); 1847 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1848 if (unlikely(rc)) 1849 break; 1850 copied += copy; 1851 offset += copy; 1852 if (unlikely(offset < dlen)) { 1853 if (!peek) 1854 skb_cb->bytes_read = offset; 1855 break; 1856 } 1857 } else { 1858 rc = 0; 1859 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1860 rc = -ECONNRESET; 1861 if (copied || rc) 1862 break; 1863 } 1864 1865 if (unlikely(peek)) 1866 break; 1867 1868 tsk_advance_rx_queue(sk); 1869 1870 /* Send connection flow control advertisement when applicable */ 1871 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1872 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1873 tipc_sk_send_ack(tsk); 1874 1875 /* Exit if all requested data or FIN/error received */ 1876 if (copied == buflen || err) 1877 break; 1878 1879 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1880 exit: 1881 release_sock(sk); 1882 return copied ? copied : rc; 1883 } 1884 1885 /** 1886 * tipc_write_space - wake up thread if port congestion is released 1887 * @sk: socket 1888 */ 1889 static void tipc_write_space(struct sock *sk) 1890 { 1891 struct socket_wq *wq; 1892 1893 rcu_read_lock(); 1894 wq = rcu_dereference(sk->sk_wq); 1895 if (skwq_has_sleeper(wq)) 1896 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1897 EPOLLWRNORM | EPOLLWRBAND); 1898 rcu_read_unlock(); 1899 } 1900 1901 /** 1902 * tipc_data_ready - wake up threads to indicate messages have been received 1903 * @sk: socket 1904 * @len: the length of messages 1905 */ 1906 static void tipc_data_ready(struct sock *sk) 1907 { 1908 struct socket_wq *wq; 1909 1910 rcu_read_lock(); 1911 wq = rcu_dereference(sk->sk_wq); 1912 if (skwq_has_sleeper(wq)) 1913 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1914 EPOLLRDNORM | EPOLLRDBAND); 1915 rcu_read_unlock(); 1916 } 1917 1918 static void tipc_sock_destruct(struct sock *sk) 1919 { 1920 __skb_queue_purge(&sk->sk_receive_queue); 1921 } 1922 1923 static void tipc_sk_proto_rcv(struct sock *sk, 1924 struct sk_buff_head *inputq, 1925 struct sk_buff_head *xmitq) 1926 { 1927 struct sk_buff *skb = __skb_dequeue(inputq); 1928 struct tipc_sock *tsk = tipc_sk(sk); 1929 struct tipc_msg *hdr = buf_msg(skb); 1930 struct tipc_group *grp = tsk->group; 1931 bool wakeup = false; 1932 1933 switch (msg_user(hdr)) { 1934 case CONN_MANAGER: 1935 tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1936 return; 1937 case SOCK_WAKEUP: 1938 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1939 tsk->cong_link_cnt--; 1940 wakeup = true; 1941 break; 1942 case GROUP_PROTOCOL: 1943 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1944 break; 1945 case TOP_SRV: 1946 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1947 hdr, inputq, xmitq); 1948 break; 1949 default: 1950 break; 1951 } 1952 1953 if (wakeup) 1954 sk->sk_write_space(sk); 1955 1956 kfree_skb(skb); 1957 } 1958 1959 /** 1960 * tipc_filter_connect - Handle incoming message for a connection-based socket 1961 * @tsk: TIPC socket 1962 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 1963 * 1964 * Returns true if everything ok, false otherwise 1965 */ 1966 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1967 { 1968 struct sock *sk = &tsk->sk; 1969 struct net *net = sock_net(sk); 1970 struct tipc_msg *hdr = buf_msg(skb); 1971 u32 pport = msg_origport(hdr); 1972 u32 pnode = msg_orignode(hdr); 1973 1974 if (unlikely(msg_mcast(hdr))) 1975 return false; 1976 1977 switch (sk->sk_state) { 1978 case TIPC_CONNECTING: 1979 /* Accept only ACK or NACK message */ 1980 if (unlikely(!msg_connected(hdr))) { 1981 if (pport != tsk_peer_port(tsk) || 1982 pnode != tsk_peer_node(tsk)) 1983 return false; 1984 1985 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1986 sk->sk_err = ECONNREFUSED; 1987 sk->sk_state_change(sk); 1988 return true; 1989 } 1990 1991 if (unlikely(msg_errcode(hdr))) { 1992 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1993 sk->sk_err = ECONNREFUSED; 1994 sk->sk_state_change(sk); 1995 return true; 1996 } 1997 1998 if (unlikely(!msg_isdata(hdr))) { 1999 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2000 sk->sk_err = EINVAL; 2001 sk->sk_state_change(sk); 2002 return true; 2003 } 2004 2005 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); 2006 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2007 2008 /* If 'ACK+' message, add to socket receive queue */ 2009 if (msg_data_sz(hdr)) 2010 return true; 2011 2012 /* If empty 'ACK-' message, wake up sleeping connect() */ 2013 sk->sk_data_ready(sk); 2014 2015 /* 'ACK-' message is neither accepted nor rejected: */ 2016 msg_set_dest_droppable(hdr, 1); 2017 return false; 2018 2019 case TIPC_OPEN: 2020 case TIPC_DISCONNECTING: 2021 break; 2022 case TIPC_LISTEN: 2023 /* Accept only SYN message */ 2024 if (!msg_connected(hdr) && !(msg_errcode(hdr))) 2025 return true; 2026 break; 2027 case TIPC_ESTABLISHED: 2028 /* Accept only connection-based messages sent by peer */ 2029 if (unlikely(!tsk_peer_msg(tsk, hdr))) 2030 return false; 2031 2032 if (unlikely(msg_errcode(hdr))) { 2033 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2034 /* Let timer expire on it's own */ 2035 tipc_node_remove_conn(net, tsk_peer_node(tsk), 2036 tsk->portid); 2037 sk->sk_state_change(sk); 2038 } 2039 return true; 2040 default: 2041 pr_err("Unknown sk_state %u\n", sk->sk_state); 2042 } 2043 2044 return false; 2045 } 2046 2047 /** 2048 * rcvbuf_limit - get proper overload limit of socket receive queue 2049 * @sk: socket 2050 * @skb: message 2051 * 2052 * For connection oriented messages, irrespective of importance, 2053 * default queue limit is 2 MB. 2054 * 2055 * For connectionless messages, queue limits are based on message 2056 * importance as follows: 2057 * 2058 * TIPC_LOW_IMPORTANCE (2 MB) 2059 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2060 * TIPC_HIGH_IMPORTANCE (8 MB) 2061 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2062 * 2063 * Returns overload limit according to corresponding message importance 2064 */ 2065 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2066 { 2067 struct tipc_sock *tsk = tipc_sk(sk); 2068 struct tipc_msg *hdr = buf_msg(skb); 2069 2070 if (unlikely(msg_in_group(hdr))) 2071 return sk->sk_rcvbuf; 2072 2073 if (unlikely(!msg_connected(hdr))) 2074 return sk->sk_rcvbuf << msg_importance(hdr); 2075 2076 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2077 return sk->sk_rcvbuf; 2078 2079 return FLOWCTL_MSG_LIM; 2080 } 2081 2082 /** 2083 * tipc_sk_filter_rcv - validate incoming message 2084 * @sk: socket 2085 * @skb: pointer to message. 2086 * 2087 * Enqueues message on receive queue if acceptable; optionally handles 2088 * disconnect indication for a connected socket. 2089 * 2090 * Called with socket lock already taken 2091 * 2092 */ 2093 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2094 struct sk_buff_head *xmitq) 2095 { 2096 bool sk_conn = !tipc_sk_type_connectionless(sk); 2097 struct tipc_sock *tsk = tipc_sk(sk); 2098 struct tipc_group *grp = tsk->group; 2099 struct tipc_msg *hdr = buf_msg(skb); 2100 struct net *net = sock_net(sk); 2101 struct sk_buff_head inputq; 2102 int limit, err = TIPC_OK; 2103 2104 TIPC_SKB_CB(skb)->bytes_read = 0; 2105 __skb_queue_head_init(&inputq); 2106 __skb_queue_tail(&inputq, skb); 2107 2108 if (unlikely(!msg_isdata(hdr))) 2109 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2110 2111 if (unlikely(grp)) 2112 tipc_group_filter_msg(grp, &inputq, xmitq); 2113 2114 /* Validate and add to receive buffer if there is space */ 2115 while ((skb = __skb_dequeue(&inputq))) { 2116 hdr = buf_msg(skb); 2117 limit = rcvbuf_limit(sk, skb); 2118 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2119 (!sk_conn && msg_connected(hdr)) || 2120 (!grp && msg_in_group(hdr))) 2121 err = TIPC_ERR_NO_PORT; 2122 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2123 atomic_inc(&sk->sk_drops); 2124 err = TIPC_ERR_OVERLOAD; 2125 } 2126 2127 if (unlikely(err)) { 2128 tipc_skb_reject(net, err, skb, xmitq); 2129 err = TIPC_OK; 2130 continue; 2131 } 2132 __skb_queue_tail(&sk->sk_receive_queue, skb); 2133 skb_set_owner_r(skb, sk); 2134 sk->sk_data_ready(sk); 2135 } 2136 } 2137 2138 /** 2139 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2140 * @sk: socket 2141 * @skb: message 2142 * 2143 * Caller must hold socket lock 2144 */ 2145 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2146 { 2147 unsigned int before = sk_rmem_alloc_get(sk); 2148 struct sk_buff_head xmitq; 2149 unsigned int added; 2150 2151 __skb_queue_head_init(&xmitq); 2152 2153 tipc_sk_filter_rcv(sk, skb, &xmitq); 2154 added = sk_rmem_alloc_get(sk) - before; 2155 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2156 2157 /* Send pending response/rejected messages, if any */ 2158 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2159 return 0; 2160 } 2161 2162 /** 2163 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2164 * inputq and try adding them to socket or backlog queue 2165 * @inputq: list of incoming buffers with potentially different destinations 2166 * @sk: socket where the buffers should be enqueued 2167 * @dport: port number for the socket 2168 * 2169 * Caller must hold socket lock 2170 */ 2171 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2172 u32 dport, struct sk_buff_head *xmitq) 2173 { 2174 unsigned long time_limit = jiffies + 2; 2175 struct sk_buff *skb; 2176 unsigned int lim; 2177 atomic_t *dcnt; 2178 u32 onode; 2179 2180 while (skb_queue_len(inputq)) { 2181 if (unlikely(time_after_eq(jiffies, time_limit))) 2182 return; 2183 2184 skb = tipc_skb_dequeue(inputq, dport); 2185 if (unlikely(!skb)) 2186 return; 2187 2188 /* Add message directly to receive queue if possible */ 2189 if (!sock_owned_by_user(sk)) { 2190 tipc_sk_filter_rcv(sk, skb, xmitq); 2191 continue; 2192 } 2193 2194 /* Try backlog, compensating for double-counted bytes */ 2195 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2196 if (!sk->sk_backlog.len) 2197 atomic_set(dcnt, 0); 2198 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2199 if (likely(!sk_add_backlog(sk, skb, lim))) 2200 continue; 2201 2202 /* Overload => reject message back to sender */ 2203 onode = tipc_own_addr(sock_net(sk)); 2204 atomic_inc(&sk->sk_drops); 2205 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2206 __skb_queue_tail(xmitq, skb); 2207 break; 2208 } 2209 } 2210 2211 /** 2212 * tipc_sk_rcv - handle a chain of incoming buffers 2213 * @inputq: buffer list containing the buffers 2214 * Consumes all buffers in list until inputq is empty 2215 * Note: may be called in multiple threads referring to the same queue 2216 */ 2217 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2218 { 2219 struct sk_buff_head xmitq; 2220 u32 dnode, dport = 0; 2221 int err; 2222 struct tipc_sock *tsk; 2223 struct sock *sk; 2224 struct sk_buff *skb; 2225 2226 __skb_queue_head_init(&xmitq); 2227 while (skb_queue_len(inputq)) { 2228 dport = tipc_skb_peek_port(inputq, dport); 2229 tsk = tipc_sk_lookup(net, dport); 2230 2231 if (likely(tsk)) { 2232 sk = &tsk->sk; 2233 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2234 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2235 spin_unlock_bh(&sk->sk_lock.slock); 2236 } 2237 /* Send pending response/rejected messages, if any */ 2238 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2239 sock_put(sk); 2240 continue; 2241 } 2242 /* No destination socket => dequeue skb if still there */ 2243 skb = tipc_skb_dequeue(inputq, dport); 2244 if (!skb) 2245 return; 2246 2247 /* Try secondary lookup if unresolved named message */ 2248 err = TIPC_ERR_NO_PORT; 2249 if (tipc_msg_lookup_dest(net, skb, &err)) 2250 goto xmit; 2251 2252 /* Prepare for message rejection */ 2253 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2254 continue; 2255 xmit: 2256 dnode = msg_destnode(buf_msg(skb)); 2257 tipc_node_xmit_skb(net, skb, dnode, dport); 2258 } 2259 } 2260 2261 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2262 { 2263 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2264 struct sock *sk = sock->sk; 2265 int done; 2266 2267 do { 2268 int err = sock_error(sk); 2269 if (err) 2270 return err; 2271 if (!*timeo_p) 2272 return -ETIMEDOUT; 2273 if (signal_pending(current)) 2274 return sock_intr_errno(*timeo_p); 2275 2276 add_wait_queue(sk_sleep(sk), &wait); 2277 done = sk_wait_event(sk, timeo_p, 2278 sk->sk_state != TIPC_CONNECTING, &wait); 2279 remove_wait_queue(sk_sleep(sk), &wait); 2280 } while (!done); 2281 return 0; 2282 } 2283 2284 /** 2285 * tipc_connect - establish a connection to another TIPC port 2286 * @sock: socket structure 2287 * @dest: socket address for destination port 2288 * @destlen: size of socket address data structure 2289 * @flags: file-related flags associated with socket 2290 * 2291 * Returns 0 on success, errno otherwise 2292 */ 2293 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2294 int destlen, int flags) 2295 { 2296 struct sock *sk = sock->sk; 2297 struct tipc_sock *tsk = tipc_sk(sk); 2298 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2299 struct msghdr m = {NULL,}; 2300 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2301 int previous; 2302 int res = 0; 2303 2304 if (destlen != sizeof(struct sockaddr_tipc)) 2305 return -EINVAL; 2306 2307 lock_sock(sk); 2308 2309 if (tsk->group) { 2310 res = -EINVAL; 2311 goto exit; 2312 } 2313 2314 if (dst->family == AF_UNSPEC) { 2315 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2316 if (!tipc_sk_type_connectionless(sk)) 2317 res = -EINVAL; 2318 goto exit; 2319 } else if (dst->family != AF_TIPC) { 2320 res = -EINVAL; 2321 } 2322 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2323 res = -EINVAL; 2324 if (res) 2325 goto exit; 2326 2327 /* DGRAM/RDM connect(), just save the destaddr */ 2328 if (tipc_sk_type_connectionless(sk)) { 2329 memcpy(&tsk->peer, dest, destlen); 2330 goto exit; 2331 } 2332 2333 previous = sk->sk_state; 2334 2335 switch (sk->sk_state) { 2336 case TIPC_OPEN: 2337 /* Send a 'SYN-' to destination */ 2338 m.msg_name = dest; 2339 m.msg_namelen = destlen; 2340 2341 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2342 * indicate send_msg() is never blocked. 2343 */ 2344 if (!timeout) 2345 m.msg_flags = MSG_DONTWAIT; 2346 2347 res = __tipc_sendmsg(sock, &m, 0); 2348 if ((res < 0) && (res != -EWOULDBLOCK)) 2349 goto exit; 2350 2351 /* Just entered TIPC_CONNECTING state; the only 2352 * difference is that return value in non-blocking 2353 * case is EINPROGRESS, rather than EALREADY. 2354 */ 2355 res = -EINPROGRESS; 2356 /* fall thru' */ 2357 case TIPC_CONNECTING: 2358 if (!timeout) { 2359 if (previous == TIPC_CONNECTING) 2360 res = -EALREADY; 2361 goto exit; 2362 } 2363 timeout = msecs_to_jiffies(timeout); 2364 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2365 res = tipc_wait_for_connect(sock, &timeout); 2366 break; 2367 case TIPC_ESTABLISHED: 2368 res = -EISCONN; 2369 break; 2370 default: 2371 res = -EINVAL; 2372 } 2373 2374 exit: 2375 release_sock(sk); 2376 return res; 2377 } 2378 2379 /** 2380 * tipc_listen - allow socket to listen for incoming connections 2381 * @sock: socket structure 2382 * @len: (unused) 2383 * 2384 * Returns 0 on success, errno otherwise 2385 */ 2386 static int tipc_listen(struct socket *sock, int len) 2387 { 2388 struct sock *sk = sock->sk; 2389 int res; 2390 2391 lock_sock(sk); 2392 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2393 release_sock(sk); 2394 2395 return res; 2396 } 2397 2398 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2399 { 2400 struct sock *sk = sock->sk; 2401 DEFINE_WAIT(wait); 2402 int err; 2403 2404 /* True wake-one mechanism for incoming connections: only 2405 * one process gets woken up, not the 'whole herd'. 2406 * Since we do not 'race & poll' for established sockets 2407 * anymore, the common case will execute the loop only once. 2408 */ 2409 for (;;) { 2410 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2411 TASK_INTERRUPTIBLE); 2412 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2413 release_sock(sk); 2414 timeo = schedule_timeout(timeo); 2415 lock_sock(sk); 2416 } 2417 err = 0; 2418 if (!skb_queue_empty(&sk->sk_receive_queue)) 2419 break; 2420 err = -EAGAIN; 2421 if (!timeo) 2422 break; 2423 err = sock_intr_errno(timeo); 2424 if (signal_pending(current)) 2425 break; 2426 } 2427 finish_wait(sk_sleep(sk), &wait); 2428 return err; 2429 } 2430 2431 /** 2432 * tipc_accept - wait for connection request 2433 * @sock: listening socket 2434 * @newsock: new socket that is to be connected 2435 * @flags: file-related flags associated with socket 2436 * 2437 * Returns 0 on success, errno otherwise 2438 */ 2439 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2440 bool kern) 2441 { 2442 struct sock *new_sk, *sk = sock->sk; 2443 struct sk_buff *buf; 2444 struct tipc_sock *new_tsock; 2445 struct tipc_msg *msg; 2446 long timeo; 2447 int res; 2448 2449 lock_sock(sk); 2450 2451 if (sk->sk_state != TIPC_LISTEN) { 2452 res = -EINVAL; 2453 goto exit; 2454 } 2455 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2456 res = tipc_wait_for_accept(sock, timeo); 2457 if (res) 2458 goto exit; 2459 2460 buf = skb_peek(&sk->sk_receive_queue); 2461 2462 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2463 if (res) 2464 goto exit; 2465 security_sk_clone(sock->sk, new_sock->sk); 2466 2467 new_sk = new_sock->sk; 2468 new_tsock = tipc_sk(new_sk); 2469 msg = buf_msg(buf); 2470 2471 /* we lock on new_sk; but lockdep sees the lock on sk */ 2472 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2473 2474 /* 2475 * Reject any stray messages received by new socket 2476 * before the socket lock was taken (very, very unlikely) 2477 */ 2478 tsk_rej_rx_queue(new_sk); 2479 2480 /* Connect new socket to it's peer */ 2481 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2482 2483 tsk_set_importance(new_tsock, msg_importance(msg)); 2484 if (msg_named(msg)) { 2485 new_tsock->conn_type = msg_nametype(msg); 2486 new_tsock->conn_instance = msg_nameinst(msg); 2487 } 2488 2489 /* 2490 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2491 * Respond to 'SYN+' by queuing it on new socket. 2492 */ 2493 if (!msg_data_sz(msg)) { 2494 struct msghdr m = {NULL,}; 2495 2496 tsk_advance_rx_queue(sk); 2497 __tipc_sendstream(new_sock, &m, 0); 2498 } else { 2499 __skb_dequeue(&sk->sk_receive_queue); 2500 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2501 skb_set_owner_r(buf, new_sk); 2502 } 2503 release_sock(new_sk); 2504 exit: 2505 release_sock(sk); 2506 return res; 2507 } 2508 2509 /** 2510 * tipc_shutdown - shutdown socket connection 2511 * @sock: socket structure 2512 * @how: direction to close (must be SHUT_RDWR) 2513 * 2514 * Terminates connection (if necessary), then purges socket's receive queue. 2515 * 2516 * Returns 0 on success, errno otherwise 2517 */ 2518 static int tipc_shutdown(struct socket *sock, int how) 2519 { 2520 struct sock *sk = sock->sk; 2521 int res; 2522 2523 if (how != SHUT_RDWR) 2524 return -EINVAL; 2525 2526 lock_sock(sk); 2527 2528 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2529 sk->sk_shutdown = SEND_SHUTDOWN; 2530 2531 if (sk->sk_state == TIPC_DISCONNECTING) { 2532 /* Discard any unreceived messages */ 2533 __skb_queue_purge(&sk->sk_receive_queue); 2534 2535 /* Wake up anyone sleeping in poll */ 2536 sk->sk_state_change(sk); 2537 res = 0; 2538 } else { 2539 res = -ENOTCONN; 2540 } 2541 2542 release_sock(sk); 2543 return res; 2544 } 2545 2546 static void tipc_sk_timeout(struct timer_list *t) 2547 { 2548 struct sock *sk = from_timer(sk, t, sk_timer); 2549 struct tipc_sock *tsk = tipc_sk(sk); 2550 u32 peer_port = tsk_peer_port(tsk); 2551 u32 peer_node = tsk_peer_node(tsk); 2552 u32 own_node = tsk_own_node(tsk); 2553 u32 own_port = tsk->portid; 2554 struct net *net = sock_net(sk); 2555 struct sk_buff *skb = NULL; 2556 2557 bh_lock_sock(sk); 2558 if (!tipc_sk_connected(sk)) 2559 goto exit; 2560 2561 /* Try again later if socket is busy */ 2562 if (sock_owned_by_user(sk)) { 2563 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2564 goto exit; 2565 } 2566 2567 if (tsk->probe_unacked) { 2568 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2569 tipc_node_remove_conn(net, peer_node, peer_port); 2570 sk->sk_state_change(sk); 2571 goto exit; 2572 } 2573 /* Send new probe */ 2574 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2575 peer_node, own_node, peer_port, own_port, 2576 TIPC_OK); 2577 tsk->probe_unacked = true; 2578 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2579 exit: 2580 bh_unlock_sock(sk); 2581 if (skb) 2582 tipc_node_xmit_skb(net, skb, peer_node, own_port); 2583 sock_put(sk); 2584 } 2585 2586 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2587 struct tipc_name_seq const *seq) 2588 { 2589 struct sock *sk = &tsk->sk; 2590 struct net *net = sock_net(sk); 2591 struct publication *publ; 2592 u32 key; 2593 2594 if (scope != TIPC_NODE_SCOPE) 2595 scope = TIPC_CLUSTER_SCOPE; 2596 2597 if (tipc_sk_connected(sk)) 2598 return -EINVAL; 2599 key = tsk->portid + tsk->pub_count + 1; 2600 if (key == tsk->portid) 2601 return -EADDRINUSE; 2602 2603 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2604 scope, tsk->portid, key); 2605 if (unlikely(!publ)) 2606 return -EINVAL; 2607 2608 list_add(&publ->binding_sock, &tsk->publications); 2609 tsk->pub_count++; 2610 tsk->published = 1; 2611 return 0; 2612 } 2613 2614 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2615 struct tipc_name_seq const *seq) 2616 { 2617 struct net *net = sock_net(&tsk->sk); 2618 struct publication *publ; 2619 struct publication *safe; 2620 int rc = -EINVAL; 2621 2622 if (scope != TIPC_NODE_SCOPE) 2623 scope = TIPC_CLUSTER_SCOPE; 2624 2625 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { 2626 if (seq) { 2627 if (publ->scope != scope) 2628 continue; 2629 if (publ->type != seq->type) 2630 continue; 2631 if (publ->lower != seq->lower) 2632 continue; 2633 if (publ->upper != seq->upper) 2634 break; 2635 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2636 publ->upper, publ->key); 2637 rc = 0; 2638 break; 2639 } 2640 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2641 publ->upper, publ->key); 2642 rc = 0; 2643 } 2644 if (list_empty(&tsk->publications)) 2645 tsk->published = 0; 2646 return rc; 2647 } 2648 2649 /* tipc_sk_reinit: set non-zero address in all existing sockets 2650 * when we go from standalone to network mode. 2651 */ 2652 void tipc_sk_reinit(struct net *net) 2653 { 2654 struct tipc_net *tn = net_generic(net, tipc_net_id); 2655 struct rhashtable_iter iter; 2656 struct tipc_sock *tsk; 2657 struct tipc_msg *msg; 2658 2659 rhashtable_walk_enter(&tn->sk_rht, &iter); 2660 2661 do { 2662 rhashtable_walk_start(&iter); 2663 2664 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2665 spin_lock_bh(&tsk->sk.sk_lock.slock); 2666 msg = &tsk->phdr; 2667 msg_set_prevnode(msg, tipc_own_addr(net)); 2668 msg_set_orignode(msg, tipc_own_addr(net)); 2669 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2670 } 2671 2672 rhashtable_walk_stop(&iter); 2673 } while (tsk == ERR_PTR(-EAGAIN)); 2674 } 2675 2676 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2677 { 2678 struct tipc_net *tn = net_generic(net, tipc_net_id); 2679 struct tipc_sock *tsk; 2680 2681 rcu_read_lock(); 2682 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2683 if (tsk) 2684 sock_hold(&tsk->sk); 2685 rcu_read_unlock(); 2686 2687 return tsk; 2688 } 2689 2690 static int tipc_sk_insert(struct tipc_sock *tsk) 2691 { 2692 struct sock *sk = &tsk->sk; 2693 struct net *net = sock_net(sk); 2694 struct tipc_net *tn = net_generic(net, tipc_net_id); 2695 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2696 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2697 2698 while (remaining--) { 2699 portid++; 2700 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2701 portid = TIPC_MIN_PORT; 2702 tsk->portid = portid; 2703 sock_hold(&tsk->sk); 2704 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2705 tsk_rht_params)) 2706 return 0; 2707 sock_put(&tsk->sk); 2708 } 2709 2710 return -1; 2711 } 2712 2713 static void tipc_sk_remove(struct tipc_sock *tsk) 2714 { 2715 struct sock *sk = &tsk->sk; 2716 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2717 2718 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2719 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2720 __sock_put(sk); 2721 } 2722 } 2723 2724 static const struct rhashtable_params tsk_rht_params = { 2725 .nelem_hint = 192, 2726 .head_offset = offsetof(struct tipc_sock, node), 2727 .key_offset = offsetof(struct tipc_sock, portid), 2728 .key_len = sizeof(u32), /* portid */ 2729 .max_size = 1048576, 2730 .min_size = 256, 2731 .automatic_shrinking = true, 2732 }; 2733 2734 int tipc_sk_rht_init(struct net *net) 2735 { 2736 struct tipc_net *tn = net_generic(net, tipc_net_id); 2737 2738 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2739 } 2740 2741 void tipc_sk_rht_destroy(struct net *net) 2742 { 2743 struct tipc_net *tn = net_generic(net, tipc_net_id); 2744 2745 /* Wait for socket readers to complete */ 2746 synchronize_net(); 2747 2748 rhashtable_destroy(&tn->sk_rht); 2749 } 2750 2751 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2752 { 2753 struct net *net = sock_net(&tsk->sk); 2754 struct tipc_group *grp = tsk->group; 2755 struct tipc_msg *hdr = &tsk->phdr; 2756 struct tipc_name_seq seq; 2757 int rc; 2758 2759 if (mreq->type < TIPC_RESERVED_TYPES) 2760 return -EACCES; 2761 if (mreq->scope > TIPC_NODE_SCOPE) 2762 return -EINVAL; 2763 if (grp) 2764 return -EACCES; 2765 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2766 if (!grp) 2767 return -ENOMEM; 2768 tsk->group = grp; 2769 msg_set_lookup_scope(hdr, mreq->scope); 2770 msg_set_nametype(hdr, mreq->type); 2771 msg_set_dest_droppable(hdr, true); 2772 seq.type = mreq->type; 2773 seq.lower = mreq->instance; 2774 seq.upper = seq.lower; 2775 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2776 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2777 if (rc) { 2778 tipc_group_delete(net, grp); 2779 tsk->group = NULL; 2780 return rc; 2781 } 2782 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2783 tsk->mc_method.rcast = true; 2784 tsk->mc_method.mandatory = true; 2785 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2786 return rc; 2787 } 2788 2789 static int tipc_sk_leave(struct tipc_sock *tsk) 2790 { 2791 struct net *net = sock_net(&tsk->sk); 2792 struct tipc_group *grp = tsk->group; 2793 struct tipc_name_seq seq; 2794 int scope; 2795 2796 if (!grp) 2797 return -EINVAL; 2798 tipc_group_self(grp, &seq, &scope); 2799 tipc_group_delete(net, grp); 2800 tsk->group = NULL; 2801 tipc_sk_withdraw(tsk, scope, &seq); 2802 return 0; 2803 } 2804 2805 /** 2806 * tipc_setsockopt - set socket option 2807 * @sock: socket structure 2808 * @lvl: option level 2809 * @opt: option identifier 2810 * @ov: pointer to new option value 2811 * @ol: length of option value 2812 * 2813 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2814 * (to ease compatibility). 2815 * 2816 * Returns 0 on success, errno otherwise 2817 */ 2818 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2819 char __user *ov, unsigned int ol) 2820 { 2821 struct sock *sk = sock->sk; 2822 struct tipc_sock *tsk = tipc_sk(sk); 2823 struct tipc_group_req mreq; 2824 u32 value = 0; 2825 int res = 0; 2826 2827 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2828 return 0; 2829 if (lvl != SOL_TIPC) 2830 return -ENOPROTOOPT; 2831 2832 switch (opt) { 2833 case TIPC_IMPORTANCE: 2834 case TIPC_SRC_DROPPABLE: 2835 case TIPC_DEST_DROPPABLE: 2836 case TIPC_CONN_TIMEOUT: 2837 if (ol < sizeof(value)) 2838 return -EINVAL; 2839 if (get_user(value, (u32 __user *)ov)) 2840 return -EFAULT; 2841 break; 2842 case TIPC_GROUP_JOIN: 2843 if (ol < sizeof(mreq)) 2844 return -EINVAL; 2845 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2846 return -EFAULT; 2847 break; 2848 default: 2849 if (ov || ol) 2850 return -EINVAL; 2851 } 2852 2853 lock_sock(sk); 2854 2855 switch (opt) { 2856 case TIPC_IMPORTANCE: 2857 res = tsk_set_importance(tsk, value); 2858 break; 2859 case TIPC_SRC_DROPPABLE: 2860 if (sock->type != SOCK_STREAM) 2861 tsk_set_unreliable(tsk, value); 2862 else 2863 res = -ENOPROTOOPT; 2864 break; 2865 case TIPC_DEST_DROPPABLE: 2866 tsk_set_unreturnable(tsk, value); 2867 break; 2868 case TIPC_CONN_TIMEOUT: 2869 tipc_sk(sk)->conn_timeout = value; 2870 break; 2871 case TIPC_MCAST_BROADCAST: 2872 tsk->mc_method.rcast = false; 2873 tsk->mc_method.mandatory = true; 2874 break; 2875 case TIPC_MCAST_REPLICAST: 2876 tsk->mc_method.rcast = true; 2877 tsk->mc_method.mandatory = true; 2878 break; 2879 case TIPC_GROUP_JOIN: 2880 res = tipc_sk_join(tsk, &mreq); 2881 break; 2882 case TIPC_GROUP_LEAVE: 2883 res = tipc_sk_leave(tsk); 2884 break; 2885 default: 2886 res = -EINVAL; 2887 } 2888 2889 release_sock(sk); 2890 2891 return res; 2892 } 2893 2894 /** 2895 * tipc_getsockopt - get socket option 2896 * @sock: socket structure 2897 * @lvl: option level 2898 * @opt: option identifier 2899 * @ov: receptacle for option value 2900 * @ol: receptacle for length of option value 2901 * 2902 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 2903 * (to ease compatibility). 2904 * 2905 * Returns 0 on success, errno otherwise 2906 */ 2907 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 2908 char __user *ov, int __user *ol) 2909 { 2910 struct sock *sk = sock->sk; 2911 struct tipc_sock *tsk = tipc_sk(sk); 2912 struct tipc_name_seq seq; 2913 int len, scope; 2914 u32 value; 2915 int res; 2916 2917 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2918 return put_user(0, ol); 2919 if (lvl != SOL_TIPC) 2920 return -ENOPROTOOPT; 2921 res = get_user(len, ol); 2922 if (res) 2923 return res; 2924 2925 lock_sock(sk); 2926 2927 switch (opt) { 2928 case TIPC_IMPORTANCE: 2929 value = tsk_importance(tsk); 2930 break; 2931 case TIPC_SRC_DROPPABLE: 2932 value = tsk_unreliable(tsk); 2933 break; 2934 case TIPC_DEST_DROPPABLE: 2935 value = tsk_unreturnable(tsk); 2936 break; 2937 case TIPC_CONN_TIMEOUT: 2938 value = tsk->conn_timeout; 2939 /* no need to set "res", since already 0 at this point */ 2940 break; 2941 case TIPC_NODE_RECVQ_DEPTH: 2942 value = 0; /* was tipc_queue_size, now obsolete */ 2943 break; 2944 case TIPC_SOCK_RECVQ_DEPTH: 2945 value = skb_queue_len(&sk->sk_receive_queue); 2946 break; 2947 case TIPC_GROUP_JOIN: 2948 seq.type = 0; 2949 if (tsk->group) 2950 tipc_group_self(tsk->group, &seq, &scope); 2951 value = seq.type; 2952 break; 2953 default: 2954 res = -EINVAL; 2955 } 2956 2957 release_sock(sk); 2958 2959 if (res) 2960 return res; /* "get" failed */ 2961 2962 if (len < sizeof(value)) 2963 return -EINVAL; 2964 2965 if (copy_to_user(ov, &value, sizeof(value))) 2966 return -EFAULT; 2967 2968 return put_user(sizeof(value), ol); 2969 } 2970 2971 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2972 { 2973 struct net *net = sock_net(sock->sk); 2974 struct tipc_sioc_nodeid_req nr = {0}; 2975 struct tipc_sioc_ln_req lnr; 2976 void __user *argp = (void __user *)arg; 2977 2978 switch (cmd) { 2979 case SIOCGETLINKNAME: 2980 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2981 return -EFAULT; 2982 if (!tipc_node_get_linkname(net, 2983 lnr.bearer_id & 0xffff, lnr.peer, 2984 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2985 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2986 return -EFAULT; 2987 return 0; 2988 } 2989 return -EADDRNOTAVAIL; 2990 case SIOCGETNODEID: 2991 if (copy_from_user(&nr, argp, sizeof(nr))) 2992 return -EFAULT; 2993 if (!tipc_node_get_id(net, nr.peer, nr.node_id)) 2994 return -EADDRNOTAVAIL; 2995 if (copy_to_user(argp, &nr, sizeof(nr))) 2996 return -EFAULT; 2997 return 0; 2998 default: 2999 return -ENOIOCTLCMD; 3000 } 3001 } 3002 3003 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 3004 { 3005 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 3006 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 3007 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 3008 3009 tsk1->peer.family = AF_TIPC; 3010 tsk1->peer.addrtype = TIPC_ADDR_ID; 3011 tsk1->peer.scope = TIPC_NODE_SCOPE; 3012 tsk1->peer.addr.id.ref = tsk2->portid; 3013 tsk1->peer.addr.id.node = onode; 3014 tsk2->peer.family = AF_TIPC; 3015 tsk2->peer.addrtype = TIPC_ADDR_ID; 3016 tsk2->peer.scope = TIPC_NODE_SCOPE; 3017 tsk2->peer.addr.id.ref = tsk1->portid; 3018 tsk2->peer.addr.id.node = onode; 3019 3020 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3021 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3022 return 0; 3023 } 3024 3025 /* Protocol switches for the various types of TIPC sockets */ 3026 3027 static const struct proto_ops msg_ops = { 3028 .owner = THIS_MODULE, 3029 .family = AF_TIPC, 3030 .release = tipc_release, 3031 .bind = tipc_bind, 3032 .connect = tipc_connect, 3033 .socketpair = tipc_socketpair, 3034 .accept = sock_no_accept, 3035 .getname = tipc_getname, 3036 .poll_mask = tipc_poll_mask, 3037 .ioctl = tipc_ioctl, 3038 .listen = sock_no_listen, 3039 .shutdown = tipc_shutdown, 3040 .setsockopt = tipc_setsockopt, 3041 .getsockopt = tipc_getsockopt, 3042 .sendmsg = tipc_sendmsg, 3043 .recvmsg = tipc_recvmsg, 3044 .mmap = sock_no_mmap, 3045 .sendpage = sock_no_sendpage 3046 }; 3047 3048 static const struct proto_ops packet_ops = { 3049 .owner = THIS_MODULE, 3050 .family = AF_TIPC, 3051 .release = tipc_release, 3052 .bind = tipc_bind, 3053 .connect = tipc_connect, 3054 .socketpair = tipc_socketpair, 3055 .accept = tipc_accept, 3056 .getname = tipc_getname, 3057 .poll_mask = tipc_poll_mask, 3058 .ioctl = tipc_ioctl, 3059 .listen = tipc_listen, 3060 .shutdown = tipc_shutdown, 3061 .setsockopt = tipc_setsockopt, 3062 .getsockopt = tipc_getsockopt, 3063 .sendmsg = tipc_send_packet, 3064 .recvmsg = tipc_recvmsg, 3065 .mmap = sock_no_mmap, 3066 .sendpage = sock_no_sendpage 3067 }; 3068 3069 static const struct proto_ops stream_ops = { 3070 .owner = THIS_MODULE, 3071 .family = AF_TIPC, 3072 .release = tipc_release, 3073 .bind = tipc_bind, 3074 .connect = tipc_connect, 3075 .socketpair = tipc_socketpair, 3076 .accept = tipc_accept, 3077 .getname = tipc_getname, 3078 .poll_mask = tipc_poll_mask, 3079 .ioctl = tipc_ioctl, 3080 .listen = tipc_listen, 3081 .shutdown = tipc_shutdown, 3082 .setsockopt = tipc_setsockopt, 3083 .getsockopt = tipc_getsockopt, 3084 .sendmsg = tipc_sendstream, 3085 .recvmsg = tipc_recvstream, 3086 .mmap = sock_no_mmap, 3087 .sendpage = sock_no_sendpage 3088 }; 3089 3090 static const struct net_proto_family tipc_family_ops = { 3091 .owner = THIS_MODULE, 3092 .family = AF_TIPC, 3093 .create = tipc_sk_create 3094 }; 3095 3096 static struct proto tipc_proto = { 3097 .name = "TIPC", 3098 .owner = THIS_MODULE, 3099 .obj_size = sizeof(struct tipc_sock), 3100 .sysctl_rmem = sysctl_tipc_rmem 3101 }; 3102 3103 /** 3104 * tipc_socket_init - initialize TIPC socket interface 3105 * 3106 * Returns 0 on success, errno otherwise 3107 */ 3108 int tipc_socket_init(void) 3109 { 3110 int res; 3111 3112 res = proto_register(&tipc_proto, 1); 3113 if (res) { 3114 pr_err("Failed to register TIPC protocol type\n"); 3115 goto out; 3116 } 3117 3118 res = sock_register(&tipc_family_ops); 3119 if (res) { 3120 pr_err("Failed to register TIPC socket type\n"); 3121 proto_unregister(&tipc_proto); 3122 goto out; 3123 } 3124 out: 3125 return res; 3126 } 3127 3128 /** 3129 * tipc_socket_stop - stop TIPC socket interface 3130 */ 3131 void tipc_socket_stop(void) 3132 { 3133 sock_unregister(tipc_family_ops.family); 3134 proto_unregister(&tipc_proto); 3135 } 3136 3137 /* Caller should hold socket lock for the passed tipc socket. */ 3138 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3139 { 3140 u32 peer_node; 3141 u32 peer_port; 3142 struct nlattr *nest; 3143 3144 peer_node = tsk_peer_node(tsk); 3145 peer_port = tsk_peer_port(tsk); 3146 3147 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3148 3149 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3150 goto msg_full; 3151 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3152 goto msg_full; 3153 3154 if (tsk->conn_type != 0) { 3155 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3156 goto msg_full; 3157 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3158 goto msg_full; 3159 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3160 goto msg_full; 3161 } 3162 nla_nest_end(skb, nest); 3163 3164 return 0; 3165 3166 msg_full: 3167 nla_nest_cancel(skb, nest); 3168 3169 return -EMSGSIZE; 3170 } 3171 3172 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock 3173 *tsk) 3174 { 3175 struct net *net = sock_net(skb->sk); 3176 struct sock *sk = &tsk->sk; 3177 3178 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || 3179 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) 3180 return -EMSGSIZE; 3181 3182 if (tipc_sk_connected(sk)) { 3183 if (__tipc_nl_add_sk_con(skb, tsk)) 3184 return -EMSGSIZE; 3185 } else if (!list_empty(&tsk->publications)) { 3186 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3187 return -EMSGSIZE; 3188 } 3189 return 0; 3190 } 3191 3192 /* Caller should hold socket lock for the passed tipc socket. */ 3193 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3194 struct tipc_sock *tsk) 3195 { 3196 struct nlattr *attrs; 3197 void *hdr; 3198 3199 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3200 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3201 if (!hdr) 3202 goto msg_cancel; 3203 3204 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3205 if (!attrs) 3206 goto genlmsg_cancel; 3207 3208 if (__tipc_nl_add_sk_info(skb, tsk)) 3209 goto attr_msg_cancel; 3210 3211 nla_nest_end(skb, attrs); 3212 genlmsg_end(skb, hdr); 3213 3214 return 0; 3215 3216 attr_msg_cancel: 3217 nla_nest_cancel(skb, attrs); 3218 genlmsg_cancel: 3219 genlmsg_cancel(skb, hdr); 3220 msg_cancel: 3221 return -EMSGSIZE; 3222 } 3223 3224 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 3225 int (*skb_handler)(struct sk_buff *skb, 3226 struct netlink_callback *cb, 3227 struct tipc_sock *tsk)) 3228 { 3229 struct net *net = sock_net(skb->sk); 3230 struct tipc_net *tn = tipc_net(net); 3231 const struct bucket_table *tbl; 3232 u32 prev_portid = cb->args[1]; 3233 u32 tbl_id = cb->args[0]; 3234 struct rhash_head *pos; 3235 struct tipc_sock *tsk; 3236 int err; 3237 3238 rcu_read_lock(); 3239 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); 3240 for (; tbl_id < tbl->size; tbl_id++) { 3241 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { 3242 spin_lock_bh(&tsk->sk.sk_lock.slock); 3243 if (prev_portid && prev_portid != tsk->portid) { 3244 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3245 continue; 3246 } 3247 3248 err = skb_handler(skb, cb, tsk); 3249 if (err) { 3250 prev_portid = tsk->portid; 3251 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3252 goto out; 3253 } 3254 3255 prev_portid = 0; 3256 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3257 } 3258 } 3259 out: 3260 rcu_read_unlock(); 3261 cb->args[0] = tbl_id; 3262 cb->args[1] = prev_portid; 3263 3264 return skb->len; 3265 } 3266 EXPORT_SYMBOL(tipc_nl_sk_walk); 3267 3268 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3269 struct tipc_sock *tsk, u32 sk_filter_state, 3270 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3271 { 3272 struct sock *sk = &tsk->sk; 3273 struct nlattr *attrs; 3274 struct nlattr *stat; 3275 3276 /*filter response w.r.t sk_state*/ 3277 if (!(sk_filter_state & (1 << sk->sk_state))) 3278 return 0; 3279 3280 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3281 if (!attrs) 3282 goto msg_cancel; 3283 3284 if (__tipc_nl_add_sk_info(skb, tsk)) 3285 goto attr_msg_cancel; 3286 3287 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || 3288 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3289 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3290 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3291 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3292 sock_i_uid(sk))) || 3293 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3294 tipc_diag_gen_cookie(sk), 3295 TIPC_NLA_SOCK_PAD)) 3296 goto attr_msg_cancel; 3297 3298 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); 3299 if (!stat) 3300 goto attr_msg_cancel; 3301 3302 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3303 skb_queue_len(&sk->sk_receive_queue)) || 3304 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3305 skb_queue_len(&sk->sk_write_queue)) || 3306 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3307 atomic_read(&sk->sk_drops))) 3308 goto stat_msg_cancel; 3309 3310 if (tsk->cong_link_cnt && 3311 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) 3312 goto stat_msg_cancel; 3313 3314 if (tsk_conn_cong(tsk) && 3315 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) 3316 goto stat_msg_cancel; 3317 3318 nla_nest_end(skb, stat); 3319 nla_nest_end(skb, attrs); 3320 3321 return 0; 3322 3323 stat_msg_cancel: 3324 nla_nest_cancel(skb, stat); 3325 attr_msg_cancel: 3326 nla_nest_cancel(skb, attrs); 3327 msg_cancel: 3328 return -EMSGSIZE; 3329 } 3330 EXPORT_SYMBOL(tipc_sk_fill_sock_diag); 3331 3332 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3333 { 3334 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); 3335 } 3336 3337 /* Caller should hold socket lock for the passed tipc socket. */ 3338 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3339 struct netlink_callback *cb, 3340 struct publication *publ) 3341 { 3342 void *hdr; 3343 struct nlattr *attrs; 3344 3345 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3346 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3347 if (!hdr) 3348 goto msg_cancel; 3349 3350 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 3351 if (!attrs) 3352 goto genlmsg_cancel; 3353 3354 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3355 goto attr_msg_cancel; 3356 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3357 goto attr_msg_cancel; 3358 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3359 goto attr_msg_cancel; 3360 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3361 goto attr_msg_cancel; 3362 3363 nla_nest_end(skb, attrs); 3364 genlmsg_end(skb, hdr); 3365 3366 return 0; 3367 3368 attr_msg_cancel: 3369 nla_nest_cancel(skb, attrs); 3370 genlmsg_cancel: 3371 genlmsg_cancel(skb, hdr); 3372 msg_cancel: 3373 return -EMSGSIZE; 3374 } 3375 3376 /* Caller should hold socket lock for the passed tipc socket. */ 3377 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3378 struct netlink_callback *cb, 3379 struct tipc_sock *tsk, u32 *last_publ) 3380 { 3381 int err; 3382 struct publication *p; 3383 3384 if (*last_publ) { 3385 list_for_each_entry(p, &tsk->publications, binding_sock) { 3386 if (p->key == *last_publ) 3387 break; 3388 } 3389 if (p->key != *last_publ) { 3390 /* We never set seq or call nl_dump_check_consistent() 3391 * this means that setting prev_seq here will cause the 3392 * consistence check to fail in the netlink callback 3393 * handler. Resulting in the last NLMSG_DONE message 3394 * having the NLM_F_DUMP_INTR flag set. 3395 */ 3396 cb->prev_seq = 1; 3397 *last_publ = 0; 3398 return -EPIPE; 3399 } 3400 } else { 3401 p = list_first_entry(&tsk->publications, struct publication, 3402 binding_sock); 3403 } 3404 3405 list_for_each_entry_from(p, &tsk->publications, binding_sock) { 3406 err = __tipc_nl_add_sk_publ(skb, cb, p); 3407 if (err) { 3408 *last_publ = p->key; 3409 return err; 3410 } 3411 } 3412 *last_publ = 0; 3413 3414 return 0; 3415 } 3416 3417 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3418 { 3419 int err; 3420 u32 tsk_portid = cb->args[0]; 3421 u32 last_publ = cb->args[1]; 3422 u32 done = cb->args[2]; 3423 struct net *net = sock_net(skb->sk); 3424 struct tipc_sock *tsk; 3425 3426 if (!tsk_portid) { 3427 struct nlattr **attrs; 3428 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3429 3430 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3431 if (err) 3432 return err; 3433 3434 if (!attrs[TIPC_NLA_SOCK]) 3435 return -EINVAL; 3436 3437 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 3438 attrs[TIPC_NLA_SOCK], 3439 tipc_nl_sock_policy, NULL); 3440 if (err) 3441 return err; 3442 3443 if (!sock[TIPC_NLA_SOCK_REF]) 3444 return -EINVAL; 3445 3446 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3447 } 3448 3449 if (done) 3450 return 0; 3451 3452 tsk = tipc_sk_lookup(net, tsk_portid); 3453 if (!tsk) 3454 return -EINVAL; 3455 3456 lock_sock(&tsk->sk); 3457 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3458 if (!err) 3459 done = 1; 3460 release_sock(&tsk->sk); 3461 sock_put(&tsk->sk); 3462 3463 cb->args[0] = tsk_portid; 3464 cb->args[1] = last_publ; 3465 cb->args[2] = done; 3466 3467 return skb->len; 3468 } 3469