1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, see 32 * <http://www.gnu.org/licenses/>. 33 * 34 * Please send any bug reports or fixes you make to the 35 * email address(es): 36 * lksctp developers <linux-sctp@vger.kernel.org> 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Narasimha Budihal <narsi@refcode.org> 41 * Karl Knutson <karl@athena.chicago.il.us> 42 * Jon Grimm <jgrimm@us.ibm.com> 43 * Xingang Guo <xingang.guo@intel.com> 44 * Daisy Chang <daisyc@us.ibm.com> 45 * Sridhar Samudrala <samudrala@us.ibm.com> 46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 47 * Ardelle Fan <ardelle.fan@intel.com> 48 * Ryan Layer <rmlayer@us.ibm.com> 49 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 50 * Kevin Gao <kevin.gao@intel.com> 51 */ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <linux/types.h> 56 #include <linux/kernel.h> 57 #include <linux/wait.h> 58 #include <linux/time.h> 59 #include <linux/ip.h> 60 #include <linux/capability.h> 61 #include <linux/fcntl.h> 62 #include <linux/poll.h> 63 #include <linux/init.h> 64 #include <linux/crypto.h> 65 #include <linux/slab.h> 66 #include <linux/file.h> 67 #include <linux/compat.h> 68 69 #include <net/ip.h> 70 #include <net/icmp.h> 71 #include <net/route.h> 72 #include <net/ipv6.h> 73 #include <net/inet_common.h> 74 #include <net/busy_poll.h> 75 76 #include <linux/socket.h> /* for sa_family_t */ 77 #include <linux/export.h> 78 #include <net/sock.h> 79 #include <net/sctp/sctp.h> 80 #include <net/sctp/sm.h> 81 82 /* Forward declarations for internal helper functions. */ 83 static int sctp_writeable(struct sock *sk); 84 static void sctp_wfree(struct sk_buff *skb); 85 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 86 size_t msg_len); 87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 88 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 89 static int sctp_wait_for_accept(struct sock *sk, long timeo); 90 static void sctp_wait_for_close(struct sock *sk, long timeo); 91 static void sctp_destruct_sock(struct sock *sk); 92 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 93 union sctp_addr *addr, int len); 94 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 95 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 96 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 98 static int sctp_send_asconf(struct sctp_association *asoc, 99 struct sctp_chunk *chunk); 100 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 101 static int sctp_autobind(struct sock *sk); 102 static void sctp_sock_migrate(struct sock *, struct sock *, 103 struct sctp_association *, sctp_socket_type_t); 104 105 extern struct kmem_cache *sctp_bucket_cachep; 106 extern long sysctl_sctp_mem[3]; 107 extern int sysctl_sctp_rmem[3]; 108 extern int sysctl_sctp_wmem[3]; 109 110 static int sctp_memory_pressure; 111 static atomic_long_t sctp_memory_allocated; 112 struct percpu_counter sctp_sockets_allocated; 113 114 static void sctp_enter_memory_pressure(struct sock *sk) 115 { 116 sctp_memory_pressure = 1; 117 } 118 119 120 /* Get the sndbuf space available at the time on the association. */ 121 static inline int sctp_wspace(struct sctp_association *asoc) 122 { 123 int amt; 124 125 if (asoc->ep->sndbuf_policy) 126 amt = asoc->sndbuf_used; 127 else 128 amt = sk_wmem_alloc_get(asoc->base.sk); 129 130 if (amt >= asoc->base.sk->sk_sndbuf) { 131 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 132 amt = 0; 133 else { 134 amt = sk_stream_wspace(asoc->base.sk); 135 if (amt < 0) 136 amt = 0; 137 } 138 } else { 139 amt = asoc->base.sk->sk_sndbuf - amt; 140 } 141 return amt; 142 } 143 144 /* Increment the used sndbuf space count of the corresponding association by 145 * the size of the outgoing data chunk. 146 * Also, set the skb destructor for sndbuf accounting later. 147 * 148 * Since it is always 1-1 between chunk and skb, and also a new skb is always 149 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 150 * destructor in the data chunk skb for the purpose of the sndbuf space 151 * tracking. 152 */ 153 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 154 { 155 struct sctp_association *asoc = chunk->asoc; 156 struct sock *sk = asoc->base.sk; 157 158 /* The sndbuf space is tracked per association. */ 159 sctp_association_hold(asoc); 160 161 skb_set_owner_w(chunk->skb, sk); 162 163 chunk->skb->destructor = sctp_wfree; 164 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 165 *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; 166 167 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 168 sizeof(struct sk_buff) + 169 sizeof(struct sctp_chunk); 170 171 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 172 sk->sk_wmem_queued += chunk->skb->truesize; 173 sk_mem_charge(sk, chunk->skb->truesize); 174 } 175 176 /* Verify that this is a valid address. */ 177 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 178 int len) 179 { 180 struct sctp_af *af; 181 182 /* Verify basic sockaddr. */ 183 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 184 if (!af) 185 return -EINVAL; 186 187 /* Is this a valid SCTP address? */ 188 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 189 return -EINVAL; 190 191 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 192 return -EINVAL; 193 194 return 0; 195 } 196 197 /* Look up the association by its id. If this is not a UDP-style 198 * socket, the ID field is always ignored. 199 */ 200 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 201 { 202 struct sctp_association *asoc = NULL; 203 204 /* If this is not a UDP-style socket, assoc id should be ignored. */ 205 if (!sctp_style(sk, UDP)) { 206 /* Return NULL if the socket state is not ESTABLISHED. It 207 * could be a TCP-style listening socket or a socket which 208 * hasn't yet called connect() to establish an association. 209 */ 210 if (!sctp_sstate(sk, ESTABLISHED)) 211 return NULL; 212 213 /* Get the first and the only association from the list. */ 214 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 215 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 216 struct sctp_association, asocs); 217 return asoc; 218 } 219 220 /* Otherwise this is a UDP-style socket. */ 221 if (!id || (id == (sctp_assoc_t)-1)) 222 return NULL; 223 224 spin_lock_bh(&sctp_assocs_id_lock); 225 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 226 spin_unlock_bh(&sctp_assocs_id_lock); 227 228 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 229 return NULL; 230 231 return asoc; 232 } 233 234 /* Look up the transport from an address and an assoc id. If both address and 235 * id are specified, the associations matching the address and the id should be 236 * the same. 237 */ 238 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 239 struct sockaddr_storage *addr, 240 sctp_assoc_t id) 241 { 242 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 243 struct sctp_transport *transport; 244 union sctp_addr *laddr = (union sctp_addr *)addr; 245 246 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 247 laddr, 248 &transport); 249 250 if (!addr_asoc) 251 return NULL; 252 253 id_asoc = sctp_id2assoc(sk, id); 254 if (id_asoc && (id_asoc != addr_asoc)) 255 return NULL; 256 257 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 258 (union sctp_addr *)addr); 259 260 return transport; 261 } 262 263 /* API 3.1.2 bind() - UDP Style Syntax 264 * The syntax of bind() is, 265 * 266 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 267 * 268 * sd - the socket descriptor returned by socket(). 269 * addr - the address structure (struct sockaddr_in or struct 270 * sockaddr_in6 [RFC 2553]), 271 * addr_len - the size of the address structure. 272 */ 273 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 274 { 275 int retval = 0; 276 277 lock_sock(sk); 278 279 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 280 addr, addr_len); 281 282 /* Disallow binding twice. */ 283 if (!sctp_sk(sk)->ep->base.bind_addr.port) 284 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 285 addr_len); 286 else 287 retval = -EINVAL; 288 289 release_sock(sk); 290 291 return retval; 292 } 293 294 static long sctp_get_port_local(struct sock *, union sctp_addr *); 295 296 /* Verify this is a valid sockaddr. */ 297 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 298 union sctp_addr *addr, int len) 299 { 300 struct sctp_af *af; 301 302 /* Check minimum size. */ 303 if (len < sizeof (struct sockaddr)) 304 return NULL; 305 306 /* V4 mapped address are really of AF_INET family */ 307 if (addr->sa.sa_family == AF_INET6 && 308 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 309 if (!opt->pf->af_supported(AF_INET, opt)) 310 return NULL; 311 } else { 312 /* Does this PF support this AF? */ 313 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 314 return NULL; 315 } 316 317 /* If we get this far, af is valid. */ 318 af = sctp_get_af_specific(addr->sa.sa_family); 319 320 if (len < af->sockaddr_len) 321 return NULL; 322 323 return af; 324 } 325 326 /* Bind a local address either to an endpoint or to an association. */ 327 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 328 { 329 struct net *net = sock_net(sk); 330 struct sctp_sock *sp = sctp_sk(sk); 331 struct sctp_endpoint *ep = sp->ep; 332 struct sctp_bind_addr *bp = &ep->base.bind_addr; 333 struct sctp_af *af; 334 unsigned short snum; 335 int ret = 0; 336 337 /* Common sockaddr verification. */ 338 af = sctp_sockaddr_af(sp, addr, len); 339 if (!af) { 340 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 341 __func__, sk, addr, len); 342 return -EINVAL; 343 } 344 345 snum = ntohs(addr->v4.sin_port); 346 347 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 348 __func__, sk, &addr->sa, bp->port, snum, len); 349 350 /* PF specific bind() address verification. */ 351 if (!sp->pf->bind_verify(sp, addr)) 352 return -EADDRNOTAVAIL; 353 354 /* We must either be unbound, or bind to the same port. 355 * It's OK to allow 0 ports if we are already bound. 356 * We'll just inhert an already bound port in this case 357 */ 358 if (bp->port) { 359 if (!snum) 360 snum = bp->port; 361 else if (snum != bp->port) { 362 pr_debug("%s: new port %d doesn't match existing port " 363 "%d\n", __func__, snum, bp->port); 364 return -EINVAL; 365 } 366 } 367 368 if (snum && snum < PROT_SOCK && 369 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 370 return -EACCES; 371 372 /* See if the address matches any of the addresses we may have 373 * already bound before checking against other endpoints. 374 */ 375 if (sctp_bind_addr_match(bp, addr, sp)) 376 return -EINVAL; 377 378 /* Make sure we are allowed to bind here. 379 * The function sctp_get_port_local() does duplicate address 380 * detection. 381 */ 382 addr->v4.sin_port = htons(snum); 383 if ((ret = sctp_get_port_local(sk, addr))) { 384 return -EADDRINUSE; 385 } 386 387 /* Refresh ephemeral port. */ 388 if (!bp->port) 389 bp->port = inet_sk(sk)->inet_num; 390 391 /* Add the address to the bind address list. 392 * Use GFP_ATOMIC since BHs will be disabled. 393 */ 394 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); 395 396 /* Copy back into socket for getsockname() use. */ 397 if (!ret) { 398 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 399 af->to_sk_saddr(addr, sk); 400 } 401 402 return ret; 403 } 404 405 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 406 * 407 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 408 * at any one time. If a sender, after sending an ASCONF chunk, decides 409 * it needs to transfer another ASCONF Chunk, it MUST wait until the 410 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 411 * subsequent ASCONF. Note this restriction binds each side, so at any 412 * time two ASCONF may be in-transit on any given association (one sent 413 * from each endpoint). 414 */ 415 static int sctp_send_asconf(struct sctp_association *asoc, 416 struct sctp_chunk *chunk) 417 { 418 struct net *net = sock_net(asoc->base.sk); 419 int retval = 0; 420 421 /* If there is an outstanding ASCONF chunk, queue it for later 422 * transmission. 423 */ 424 if (asoc->addip_last_asconf) { 425 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 426 goto out; 427 } 428 429 /* Hold the chunk until an ASCONF_ACK is received. */ 430 sctp_chunk_hold(chunk); 431 retval = sctp_primitive_ASCONF(net, asoc, chunk); 432 if (retval) 433 sctp_chunk_free(chunk); 434 else 435 asoc->addip_last_asconf = chunk; 436 437 out: 438 return retval; 439 } 440 441 /* Add a list of addresses as bind addresses to local endpoint or 442 * association. 443 * 444 * Basically run through each address specified in the addrs/addrcnt 445 * array/length pair, determine if it is IPv6 or IPv4 and call 446 * sctp_do_bind() on it. 447 * 448 * If any of them fails, then the operation will be reversed and the 449 * ones that were added will be removed. 450 * 451 * Only sctp_setsockopt_bindx() is supposed to call this function. 452 */ 453 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 454 { 455 int cnt; 456 int retval = 0; 457 void *addr_buf; 458 struct sockaddr *sa_addr; 459 struct sctp_af *af; 460 461 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 462 addrs, addrcnt); 463 464 addr_buf = addrs; 465 for (cnt = 0; cnt < addrcnt; cnt++) { 466 /* The list may contain either IPv4 or IPv6 address; 467 * determine the address length for walking thru the list. 468 */ 469 sa_addr = addr_buf; 470 af = sctp_get_af_specific(sa_addr->sa_family); 471 if (!af) { 472 retval = -EINVAL; 473 goto err_bindx_add; 474 } 475 476 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 477 af->sockaddr_len); 478 479 addr_buf += af->sockaddr_len; 480 481 err_bindx_add: 482 if (retval < 0) { 483 /* Failed. Cleanup the ones that have been added */ 484 if (cnt > 0) 485 sctp_bindx_rem(sk, addrs, cnt); 486 return retval; 487 } 488 } 489 490 return retval; 491 } 492 493 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 494 * associations that are part of the endpoint indicating that a list of local 495 * addresses are added to the endpoint. 496 * 497 * If any of the addresses is already in the bind address list of the 498 * association, we do not send the chunk for that association. But it will not 499 * affect other associations. 500 * 501 * Only sctp_setsockopt_bindx() is supposed to call this function. 502 */ 503 static int sctp_send_asconf_add_ip(struct sock *sk, 504 struct sockaddr *addrs, 505 int addrcnt) 506 { 507 struct net *net = sock_net(sk); 508 struct sctp_sock *sp; 509 struct sctp_endpoint *ep; 510 struct sctp_association *asoc; 511 struct sctp_bind_addr *bp; 512 struct sctp_chunk *chunk; 513 struct sctp_sockaddr_entry *laddr; 514 union sctp_addr *addr; 515 union sctp_addr saveaddr; 516 void *addr_buf; 517 struct sctp_af *af; 518 struct list_head *p; 519 int i; 520 int retval = 0; 521 522 if (!net->sctp.addip_enable) 523 return retval; 524 525 sp = sctp_sk(sk); 526 ep = sp->ep; 527 528 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 529 __func__, sk, addrs, addrcnt); 530 531 list_for_each_entry(asoc, &ep->asocs, asocs) { 532 if (!asoc->peer.asconf_capable) 533 continue; 534 535 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 536 continue; 537 538 if (!sctp_state(asoc, ESTABLISHED)) 539 continue; 540 541 /* Check if any address in the packed array of addresses is 542 * in the bind address list of the association. If so, 543 * do not send the asconf chunk to its peer, but continue with 544 * other associations. 545 */ 546 addr_buf = addrs; 547 for (i = 0; i < addrcnt; i++) { 548 addr = addr_buf; 549 af = sctp_get_af_specific(addr->v4.sin_family); 550 if (!af) { 551 retval = -EINVAL; 552 goto out; 553 } 554 555 if (sctp_assoc_lookup_laddr(asoc, addr)) 556 break; 557 558 addr_buf += af->sockaddr_len; 559 } 560 if (i < addrcnt) 561 continue; 562 563 /* Use the first valid address in bind addr list of 564 * association as Address Parameter of ASCONF CHUNK. 565 */ 566 bp = &asoc->base.bind_addr; 567 p = bp->address_list.next; 568 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 569 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 570 addrcnt, SCTP_PARAM_ADD_IP); 571 if (!chunk) { 572 retval = -ENOMEM; 573 goto out; 574 } 575 576 /* Add the new addresses to the bind address list with 577 * use_as_src set to 0. 578 */ 579 addr_buf = addrs; 580 for (i = 0; i < addrcnt; i++) { 581 addr = addr_buf; 582 af = sctp_get_af_specific(addr->v4.sin_family); 583 memcpy(&saveaddr, addr, af->sockaddr_len); 584 retval = sctp_add_bind_addr(bp, &saveaddr, 585 SCTP_ADDR_NEW, GFP_ATOMIC); 586 addr_buf += af->sockaddr_len; 587 } 588 if (asoc->src_out_of_asoc_ok) { 589 struct sctp_transport *trans; 590 591 list_for_each_entry(trans, 592 &asoc->peer.transport_addr_list, transports) { 593 /* Clear the source and route cache */ 594 dst_release(trans->dst); 595 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 596 2*asoc->pathmtu, 4380)); 597 trans->ssthresh = asoc->peer.i.a_rwnd; 598 trans->rto = asoc->rto_initial; 599 sctp_max_rto(asoc, trans); 600 trans->rtt = trans->srtt = trans->rttvar = 0; 601 sctp_transport_route(trans, NULL, 602 sctp_sk(asoc->base.sk)); 603 } 604 } 605 retval = sctp_send_asconf(asoc, chunk); 606 } 607 608 out: 609 return retval; 610 } 611 612 /* Remove a list of addresses from bind addresses list. Do not remove the 613 * last address. 614 * 615 * Basically run through each address specified in the addrs/addrcnt 616 * array/length pair, determine if it is IPv6 or IPv4 and call 617 * sctp_del_bind() on it. 618 * 619 * If any of them fails, then the operation will be reversed and the 620 * ones that were removed will be added back. 621 * 622 * At least one address has to be left; if only one address is 623 * available, the operation will return -EBUSY. 624 * 625 * Only sctp_setsockopt_bindx() is supposed to call this function. 626 */ 627 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 628 { 629 struct sctp_sock *sp = sctp_sk(sk); 630 struct sctp_endpoint *ep = sp->ep; 631 int cnt; 632 struct sctp_bind_addr *bp = &ep->base.bind_addr; 633 int retval = 0; 634 void *addr_buf; 635 union sctp_addr *sa_addr; 636 struct sctp_af *af; 637 638 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 639 __func__, sk, addrs, addrcnt); 640 641 addr_buf = addrs; 642 for (cnt = 0; cnt < addrcnt; cnt++) { 643 /* If the bind address list is empty or if there is only one 644 * bind address, there is nothing more to be removed (we need 645 * at least one address here). 646 */ 647 if (list_empty(&bp->address_list) || 648 (sctp_list_single_entry(&bp->address_list))) { 649 retval = -EBUSY; 650 goto err_bindx_rem; 651 } 652 653 sa_addr = addr_buf; 654 af = sctp_get_af_specific(sa_addr->sa.sa_family); 655 if (!af) { 656 retval = -EINVAL; 657 goto err_bindx_rem; 658 } 659 660 if (!af->addr_valid(sa_addr, sp, NULL)) { 661 retval = -EADDRNOTAVAIL; 662 goto err_bindx_rem; 663 } 664 665 if (sa_addr->v4.sin_port && 666 sa_addr->v4.sin_port != htons(bp->port)) { 667 retval = -EINVAL; 668 goto err_bindx_rem; 669 } 670 671 if (!sa_addr->v4.sin_port) 672 sa_addr->v4.sin_port = htons(bp->port); 673 674 /* FIXME - There is probably a need to check if sk->sk_saddr and 675 * sk->sk_rcv_addr are currently set to one of the addresses to 676 * be removed. This is something which needs to be looked into 677 * when we are fixing the outstanding issues with multi-homing 678 * socket routing and failover schemes. Refer to comments in 679 * sctp_do_bind(). -daisy 680 */ 681 retval = sctp_del_bind_addr(bp, sa_addr); 682 683 addr_buf += af->sockaddr_len; 684 err_bindx_rem: 685 if (retval < 0) { 686 /* Failed. Add the ones that has been removed back */ 687 if (cnt > 0) 688 sctp_bindx_add(sk, addrs, cnt); 689 return retval; 690 } 691 } 692 693 return retval; 694 } 695 696 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 697 * the associations that are part of the endpoint indicating that a list of 698 * local addresses are removed from the endpoint. 699 * 700 * If any of the addresses is already in the bind address list of the 701 * association, we do not send the chunk for that association. But it will not 702 * affect other associations. 703 * 704 * Only sctp_setsockopt_bindx() is supposed to call this function. 705 */ 706 static int sctp_send_asconf_del_ip(struct sock *sk, 707 struct sockaddr *addrs, 708 int addrcnt) 709 { 710 struct net *net = sock_net(sk); 711 struct sctp_sock *sp; 712 struct sctp_endpoint *ep; 713 struct sctp_association *asoc; 714 struct sctp_transport *transport; 715 struct sctp_bind_addr *bp; 716 struct sctp_chunk *chunk; 717 union sctp_addr *laddr; 718 void *addr_buf; 719 struct sctp_af *af; 720 struct sctp_sockaddr_entry *saddr; 721 int i; 722 int retval = 0; 723 int stored = 0; 724 725 chunk = NULL; 726 if (!net->sctp.addip_enable) 727 return retval; 728 729 sp = sctp_sk(sk); 730 ep = sp->ep; 731 732 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 733 __func__, sk, addrs, addrcnt); 734 735 list_for_each_entry(asoc, &ep->asocs, asocs) { 736 737 if (!asoc->peer.asconf_capable) 738 continue; 739 740 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 741 continue; 742 743 if (!sctp_state(asoc, ESTABLISHED)) 744 continue; 745 746 /* Check if any address in the packed array of addresses is 747 * not present in the bind address list of the association. 748 * If so, do not send the asconf chunk to its peer, but 749 * continue with other associations. 750 */ 751 addr_buf = addrs; 752 for (i = 0; i < addrcnt; i++) { 753 laddr = addr_buf; 754 af = sctp_get_af_specific(laddr->v4.sin_family); 755 if (!af) { 756 retval = -EINVAL; 757 goto out; 758 } 759 760 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 761 break; 762 763 addr_buf += af->sockaddr_len; 764 } 765 if (i < addrcnt) 766 continue; 767 768 /* Find one address in the association's bind address list 769 * that is not in the packed array of addresses. This is to 770 * make sure that we do not delete all the addresses in the 771 * association. 772 */ 773 bp = &asoc->base.bind_addr; 774 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 775 addrcnt, sp); 776 if ((laddr == NULL) && (addrcnt == 1)) { 777 if (asoc->asconf_addr_del_pending) 778 continue; 779 asoc->asconf_addr_del_pending = 780 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 781 if (asoc->asconf_addr_del_pending == NULL) { 782 retval = -ENOMEM; 783 goto out; 784 } 785 asoc->asconf_addr_del_pending->sa.sa_family = 786 addrs->sa_family; 787 asoc->asconf_addr_del_pending->v4.sin_port = 788 htons(bp->port); 789 if (addrs->sa_family == AF_INET) { 790 struct sockaddr_in *sin; 791 792 sin = (struct sockaddr_in *)addrs; 793 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 794 } else if (addrs->sa_family == AF_INET6) { 795 struct sockaddr_in6 *sin6; 796 797 sin6 = (struct sockaddr_in6 *)addrs; 798 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 799 } 800 801 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 802 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 803 asoc->asconf_addr_del_pending); 804 805 asoc->src_out_of_asoc_ok = 1; 806 stored = 1; 807 goto skip_mkasconf; 808 } 809 810 if (laddr == NULL) 811 return -EINVAL; 812 813 /* We do not need RCU protection throughout this loop 814 * because this is done under a socket lock from the 815 * setsockopt call. 816 */ 817 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 818 SCTP_PARAM_DEL_IP); 819 if (!chunk) { 820 retval = -ENOMEM; 821 goto out; 822 } 823 824 skip_mkasconf: 825 /* Reset use_as_src flag for the addresses in the bind address 826 * list that are to be deleted. 827 */ 828 addr_buf = addrs; 829 for (i = 0; i < addrcnt; i++) { 830 laddr = addr_buf; 831 af = sctp_get_af_specific(laddr->v4.sin_family); 832 list_for_each_entry(saddr, &bp->address_list, list) { 833 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 834 saddr->state = SCTP_ADDR_DEL; 835 } 836 addr_buf += af->sockaddr_len; 837 } 838 839 /* Update the route and saddr entries for all the transports 840 * as some of the addresses in the bind address list are 841 * about to be deleted and cannot be used as source addresses. 842 */ 843 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 844 transports) { 845 dst_release(transport->dst); 846 sctp_transport_route(transport, NULL, 847 sctp_sk(asoc->base.sk)); 848 } 849 850 if (stored) 851 /* We don't need to transmit ASCONF */ 852 continue; 853 retval = sctp_send_asconf(asoc, chunk); 854 } 855 out: 856 return retval; 857 } 858 859 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 860 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 861 { 862 struct sock *sk = sctp_opt2sk(sp); 863 union sctp_addr *addr; 864 struct sctp_af *af; 865 866 /* It is safe to write port space in caller. */ 867 addr = &addrw->a; 868 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 869 af = sctp_get_af_specific(addr->sa.sa_family); 870 if (!af) 871 return -EINVAL; 872 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 873 return -EINVAL; 874 875 if (addrw->state == SCTP_ADDR_NEW) 876 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 877 else 878 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 879 } 880 881 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 882 * 883 * API 8.1 884 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 885 * int flags); 886 * 887 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 888 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 889 * or IPv6 addresses. 890 * 891 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 892 * Section 3.1.2 for this usage. 893 * 894 * addrs is a pointer to an array of one or more socket addresses. Each 895 * address is contained in its appropriate structure (i.e. struct 896 * sockaddr_in or struct sockaddr_in6) the family of the address type 897 * must be used to distinguish the address length (note that this 898 * representation is termed a "packed array" of addresses). The caller 899 * specifies the number of addresses in the array with addrcnt. 900 * 901 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 902 * -1, and sets errno to the appropriate error code. 903 * 904 * For SCTP, the port given in each socket address must be the same, or 905 * sctp_bindx() will fail, setting errno to EINVAL. 906 * 907 * The flags parameter is formed from the bitwise OR of zero or more of 908 * the following currently defined flags: 909 * 910 * SCTP_BINDX_ADD_ADDR 911 * 912 * SCTP_BINDX_REM_ADDR 913 * 914 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 915 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 916 * addresses from the association. The two flags are mutually exclusive; 917 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 918 * not remove all addresses from an association; sctp_bindx() will 919 * reject such an attempt with EINVAL. 920 * 921 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 922 * additional addresses with an endpoint after calling bind(). Or use 923 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 924 * socket is associated with so that no new association accepted will be 925 * associated with those addresses. If the endpoint supports dynamic 926 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 927 * endpoint to send the appropriate message to the peer to change the 928 * peers address lists. 929 * 930 * Adding and removing addresses from a connected association is 931 * optional functionality. Implementations that do not support this 932 * functionality should return EOPNOTSUPP. 933 * 934 * Basically do nothing but copying the addresses from user to kernel 935 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 936 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 937 * from userspace. 938 * 939 * We don't use copy_from_user() for optimization: we first do the 940 * sanity checks (buffer size -fast- and access check-healthy 941 * pointer); if all of those succeed, then we can alloc the memory 942 * (expensive operation) needed to copy the data to kernel. Then we do 943 * the copying without checking the user space area 944 * (__copy_from_user()). 945 * 946 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 947 * it. 948 * 949 * sk The sk of the socket 950 * addrs The pointer to the addresses in user land 951 * addrssize Size of the addrs buffer 952 * op Operation to perform (add or remove, see the flags of 953 * sctp_bindx) 954 * 955 * Returns 0 if ok, <0 errno code on error. 956 */ 957 static int sctp_setsockopt_bindx(struct sock *sk, 958 struct sockaddr __user *addrs, 959 int addrs_size, int op) 960 { 961 struct sockaddr *kaddrs; 962 int err; 963 int addrcnt = 0; 964 int walk_size = 0; 965 struct sockaddr *sa_addr; 966 void *addr_buf; 967 struct sctp_af *af; 968 969 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 970 __func__, sk, addrs, addrs_size, op); 971 972 if (unlikely(addrs_size <= 0)) 973 return -EINVAL; 974 975 /* Check the user passed a healthy pointer. */ 976 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 977 return -EFAULT; 978 979 /* Alloc space for the address array in kernel memory. */ 980 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 981 if (unlikely(!kaddrs)) 982 return -ENOMEM; 983 984 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 985 kfree(kaddrs); 986 return -EFAULT; 987 } 988 989 /* Walk through the addrs buffer and count the number of addresses. */ 990 addr_buf = kaddrs; 991 while (walk_size < addrs_size) { 992 if (walk_size + sizeof(sa_family_t) > addrs_size) { 993 kfree(kaddrs); 994 return -EINVAL; 995 } 996 997 sa_addr = addr_buf; 998 af = sctp_get_af_specific(sa_addr->sa_family); 999 1000 /* If the address family is not supported or if this address 1001 * causes the address buffer to overflow return EINVAL. 1002 */ 1003 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1004 kfree(kaddrs); 1005 return -EINVAL; 1006 } 1007 addrcnt++; 1008 addr_buf += af->sockaddr_len; 1009 walk_size += af->sockaddr_len; 1010 } 1011 1012 /* Do the work. */ 1013 switch (op) { 1014 case SCTP_BINDX_ADD_ADDR: 1015 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1016 if (err) 1017 goto out; 1018 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1019 break; 1020 1021 case SCTP_BINDX_REM_ADDR: 1022 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1023 if (err) 1024 goto out; 1025 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1026 break; 1027 1028 default: 1029 err = -EINVAL; 1030 break; 1031 } 1032 1033 out: 1034 kfree(kaddrs); 1035 1036 return err; 1037 } 1038 1039 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1040 * 1041 * Common routine for handling connect() and sctp_connectx(). 1042 * Connect will come in with just a single address. 1043 */ 1044 static int __sctp_connect(struct sock *sk, 1045 struct sockaddr *kaddrs, 1046 int addrs_size, 1047 sctp_assoc_t *assoc_id) 1048 { 1049 struct net *net = sock_net(sk); 1050 struct sctp_sock *sp; 1051 struct sctp_endpoint *ep; 1052 struct sctp_association *asoc = NULL; 1053 struct sctp_association *asoc2; 1054 struct sctp_transport *transport; 1055 union sctp_addr to; 1056 struct sctp_af *af; 1057 sctp_scope_t scope; 1058 long timeo; 1059 int err = 0; 1060 int addrcnt = 0; 1061 int walk_size = 0; 1062 union sctp_addr *sa_addr = NULL; 1063 void *addr_buf; 1064 unsigned short port; 1065 unsigned int f_flags = 0; 1066 1067 sp = sctp_sk(sk); 1068 ep = sp->ep; 1069 1070 /* connect() cannot be done on a socket that is already in ESTABLISHED 1071 * state - UDP-style peeled off socket or a TCP-style socket that 1072 * is already connected. 1073 * It cannot be done even on a TCP-style listening socket. 1074 */ 1075 if (sctp_sstate(sk, ESTABLISHED) || 1076 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1077 err = -EISCONN; 1078 goto out_free; 1079 } 1080 1081 /* Walk through the addrs buffer and count the number of addresses. */ 1082 addr_buf = kaddrs; 1083 while (walk_size < addrs_size) { 1084 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1085 err = -EINVAL; 1086 goto out_free; 1087 } 1088 1089 sa_addr = addr_buf; 1090 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1091 1092 /* If the address family is not supported or if this address 1093 * causes the address buffer to overflow return EINVAL. 1094 */ 1095 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1096 err = -EINVAL; 1097 goto out_free; 1098 } 1099 1100 port = ntohs(sa_addr->v4.sin_port); 1101 1102 /* Save current address so we can work with it */ 1103 memcpy(&to, sa_addr, af->sockaddr_len); 1104 1105 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1106 if (err) 1107 goto out_free; 1108 1109 /* Make sure the destination port is correctly set 1110 * in all addresses. 1111 */ 1112 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1113 err = -EINVAL; 1114 goto out_free; 1115 } 1116 1117 /* Check if there already is a matching association on the 1118 * endpoint (other than the one created here). 1119 */ 1120 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1121 if (asoc2 && asoc2 != asoc) { 1122 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1123 err = -EISCONN; 1124 else 1125 err = -EALREADY; 1126 goto out_free; 1127 } 1128 1129 /* If we could not find a matching association on the endpoint, 1130 * make sure that there is no peeled-off association matching 1131 * the peer address even on another socket. 1132 */ 1133 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1134 err = -EADDRNOTAVAIL; 1135 goto out_free; 1136 } 1137 1138 if (!asoc) { 1139 /* If a bind() or sctp_bindx() is not called prior to 1140 * an sctp_connectx() call, the system picks an 1141 * ephemeral port and will choose an address set 1142 * equivalent to binding with a wildcard address. 1143 */ 1144 if (!ep->base.bind_addr.port) { 1145 if (sctp_autobind(sk)) { 1146 err = -EAGAIN; 1147 goto out_free; 1148 } 1149 } else { 1150 /* 1151 * If an unprivileged user inherits a 1-many 1152 * style socket with open associations on a 1153 * privileged port, it MAY be permitted to 1154 * accept new associations, but it SHOULD NOT 1155 * be permitted to open new associations. 1156 */ 1157 if (ep->base.bind_addr.port < PROT_SOCK && 1158 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1159 err = -EACCES; 1160 goto out_free; 1161 } 1162 } 1163 1164 scope = sctp_scope(&to); 1165 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1166 if (!asoc) { 1167 err = -ENOMEM; 1168 goto out_free; 1169 } 1170 1171 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1172 GFP_KERNEL); 1173 if (err < 0) { 1174 goto out_free; 1175 } 1176 1177 } 1178 1179 /* Prime the peer's transport structures. */ 1180 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1181 SCTP_UNKNOWN); 1182 if (!transport) { 1183 err = -ENOMEM; 1184 goto out_free; 1185 } 1186 1187 addrcnt++; 1188 addr_buf += af->sockaddr_len; 1189 walk_size += af->sockaddr_len; 1190 } 1191 1192 /* In case the user of sctp_connectx() wants an association 1193 * id back, assign one now. 1194 */ 1195 if (assoc_id) { 1196 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1197 if (err < 0) 1198 goto out_free; 1199 } 1200 1201 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1202 if (err < 0) { 1203 goto out_free; 1204 } 1205 1206 /* Initialize sk's dport and daddr for getpeername() */ 1207 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1208 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1209 af->to_sk_daddr(sa_addr, sk); 1210 sk->sk_err = 0; 1211 1212 /* in-kernel sockets don't generally have a file allocated to them 1213 * if all they do is call sock_create_kern(). 1214 */ 1215 if (sk->sk_socket->file) 1216 f_flags = sk->sk_socket->file->f_flags; 1217 1218 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1219 1220 err = sctp_wait_for_connect(asoc, &timeo); 1221 if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1222 *assoc_id = asoc->assoc_id; 1223 1224 /* Don't free association on exit. */ 1225 asoc = NULL; 1226 1227 out_free: 1228 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1229 __func__, asoc, kaddrs, err); 1230 1231 if (asoc) { 1232 /* sctp_primitive_ASSOCIATE may have added this association 1233 * To the hash table, try to unhash it, just in case, its a noop 1234 * if it wasn't hashed so we're safe 1235 */ 1236 sctp_unhash_established(asoc); 1237 sctp_association_free(asoc); 1238 } 1239 return err; 1240 } 1241 1242 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1243 * 1244 * API 8.9 1245 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1246 * sctp_assoc_t *asoc); 1247 * 1248 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1249 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1250 * or IPv6 addresses. 1251 * 1252 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1253 * Section 3.1.2 for this usage. 1254 * 1255 * addrs is a pointer to an array of one or more socket addresses. Each 1256 * address is contained in its appropriate structure (i.e. struct 1257 * sockaddr_in or struct sockaddr_in6) the family of the address type 1258 * must be used to distengish the address length (note that this 1259 * representation is termed a "packed array" of addresses). The caller 1260 * specifies the number of addresses in the array with addrcnt. 1261 * 1262 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1263 * the association id of the new association. On failure, sctp_connectx() 1264 * returns -1, and sets errno to the appropriate error code. The assoc_id 1265 * is not touched by the kernel. 1266 * 1267 * For SCTP, the port given in each socket address must be the same, or 1268 * sctp_connectx() will fail, setting errno to EINVAL. 1269 * 1270 * An application can use sctp_connectx to initiate an association with 1271 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1272 * allows a caller to specify multiple addresses at which a peer can be 1273 * reached. The way the SCTP stack uses the list of addresses to set up 1274 * the association is implementation dependent. This function only 1275 * specifies that the stack will try to make use of all the addresses in 1276 * the list when needed. 1277 * 1278 * Note that the list of addresses passed in is only used for setting up 1279 * the association. It does not necessarily equal the set of addresses 1280 * the peer uses for the resulting association. If the caller wants to 1281 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1282 * retrieve them after the association has been set up. 1283 * 1284 * Basically do nothing but copying the addresses from user to kernel 1285 * land and invoking either sctp_connectx(). This is used for tunneling 1286 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1287 * 1288 * We don't use copy_from_user() for optimization: we first do the 1289 * sanity checks (buffer size -fast- and access check-healthy 1290 * pointer); if all of those succeed, then we can alloc the memory 1291 * (expensive operation) needed to copy the data to kernel. Then we do 1292 * the copying without checking the user space area 1293 * (__copy_from_user()). 1294 * 1295 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1296 * it. 1297 * 1298 * sk The sk of the socket 1299 * addrs The pointer to the addresses in user land 1300 * addrssize Size of the addrs buffer 1301 * 1302 * Returns >=0 if ok, <0 errno code on error. 1303 */ 1304 static int __sctp_setsockopt_connectx(struct sock *sk, 1305 struct sockaddr __user *addrs, 1306 int addrs_size, 1307 sctp_assoc_t *assoc_id) 1308 { 1309 int err = 0; 1310 struct sockaddr *kaddrs; 1311 1312 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1313 __func__, sk, addrs, addrs_size); 1314 1315 if (unlikely(addrs_size <= 0)) 1316 return -EINVAL; 1317 1318 /* Check the user passed a healthy pointer. */ 1319 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1320 return -EFAULT; 1321 1322 /* Alloc space for the address array in kernel memory. */ 1323 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1324 if (unlikely(!kaddrs)) 1325 return -ENOMEM; 1326 1327 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1328 err = -EFAULT; 1329 } else { 1330 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1331 } 1332 1333 kfree(kaddrs); 1334 1335 return err; 1336 } 1337 1338 /* 1339 * This is an older interface. It's kept for backward compatibility 1340 * to the option that doesn't provide association id. 1341 */ 1342 static int sctp_setsockopt_connectx_old(struct sock *sk, 1343 struct sockaddr __user *addrs, 1344 int addrs_size) 1345 { 1346 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1347 } 1348 1349 /* 1350 * New interface for the API. The since the API is done with a socket 1351 * option, to make it simple we feed back the association id is as a return 1352 * indication to the call. Error is always negative and association id is 1353 * always positive. 1354 */ 1355 static int sctp_setsockopt_connectx(struct sock *sk, 1356 struct sockaddr __user *addrs, 1357 int addrs_size) 1358 { 1359 sctp_assoc_t assoc_id = 0; 1360 int err = 0; 1361 1362 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1363 1364 if (err) 1365 return err; 1366 else 1367 return assoc_id; 1368 } 1369 1370 /* 1371 * New (hopefully final) interface for the API. 1372 * We use the sctp_getaddrs_old structure so that use-space library 1373 * can avoid any unnecessary allocations. The only different part 1374 * is that we store the actual length of the address buffer into the 1375 * addrs_num structure member. That way we can re-use the existing 1376 * code. 1377 */ 1378 #ifdef CONFIG_COMPAT 1379 struct compat_sctp_getaddrs_old { 1380 sctp_assoc_t assoc_id; 1381 s32 addr_num; 1382 compat_uptr_t addrs; /* struct sockaddr * */ 1383 }; 1384 #endif 1385 1386 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1387 char __user *optval, 1388 int __user *optlen) 1389 { 1390 struct sctp_getaddrs_old param; 1391 sctp_assoc_t assoc_id = 0; 1392 int err = 0; 1393 1394 #ifdef CONFIG_COMPAT 1395 if (is_compat_task()) { 1396 struct compat_sctp_getaddrs_old param32; 1397 1398 if (len < sizeof(param32)) 1399 return -EINVAL; 1400 if (copy_from_user(¶m32, optval, sizeof(param32))) 1401 return -EFAULT; 1402 1403 param.assoc_id = param32.assoc_id; 1404 param.addr_num = param32.addr_num; 1405 param.addrs = compat_ptr(param32.addrs); 1406 } else 1407 #endif 1408 { 1409 if (len < sizeof(param)) 1410 return -EINVAL; 1411 if (copy_from_user(¶m, optval, sizeof(param))) 1412 return -EFAULT; 1413 } 1414 1415 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1416 param.addrs, param.addr_num, 1417 &assoc_id); 1418 if (err == 0 || err == -EINPROGRESS) { 1419 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1420 return -EFAULT; 1421 if (put_user(sizeof(assoc_id), optlen)) 1422 return -EFAULT; 1423 } 1424 1425 return err; 1426 } 1427 1428 /* API 3.1.4 close() - UDP Style Syntax 1429 * Applications use close() to perform graceful shutdown (as described in 1430 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1431 * by a UDP-style socket. 1432 * 1433 * The syntax is 1434 * 1435 * ret = close(int sd); 1436 * 1437 * sd - the socket descriptor of the associations to be closed. 1438 * 1439 * To gracefully shutdown a specific association represented by the 1440 * UDP-style socket, an application should use the sendmsg() call, 1441 * passing no user data, but including the appropriate flag in the 1442 * ancillary data (see Section xxxx). 1443 * 1444 * If sd in the close() call is a branched-off socket representing only 1445 * one association, the shutdown is performed on that association only. 1446 * 1447 * 4.1.6 close() - TCP Style Syntax 1448 * 1449 * Applications use close() to gracefully close down an association. 1450 * 1451 * The syntax is: 1452 * 1453 * int close(int sd); 1454 * 1455 * sd - the socket descriptor of the association to be closed. 1456 * 1457 * After an application calls close() on a socket descriptor, no further 1458 * socket operations will succeed on that descriptor. 1459 * 1460 * API 7.1.4 SO_LINGER 1461 * 1462 * An application using the TCP-style socket can use this option to 1463 * perform the SCTP ABORT primitive. The linger option structure is: 1464 * 1465 * struct linger { 1466 * int l_onoff; // option on/off 1467 * int l_linger; // linger time 1468 * }; 1469 * 1470 * To enable the option, set l_onoff to 1. If the l_linger value is set 1471 * to 0, calling close() is the same as the ABORT primitive. If the 1472 * value is set to a negative value, the setsockopt() call will return 1473 * an error. If the value is set to a positive value linger_time, the 1474 * close() can be blocked for at most linger_time ms. If the graceful 1475 * shutdown phase does not finish during this period, close() will 1476 * return but the graceful shutdown phase continues in the system. 1477 */ 1478 static void sctp_close(struct sock *sk, long timeout) 1479 { 1480 struct net *net = sock_net(sk); 1481 struct sctp_endpoint *ep; 1482 struct sctp_association *asoc; 1483 struct list_head *pos, *temp; 1484 unsigned int data_was_unread; 1485 1486 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1487 1488 lock_sock(sk); 1489 sk->sk_shutdown = SHUTDOWN_MASK; 1490 sk->sk_state = SCTP_SS_CLOSING; 1491 1492 ep = sctp_sk(sk)->ep; 1493 1494 /* Clean up any skbs sitting on the receive queue. */ 1495 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1496 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1497 1498 /* Walk all associations on an endpoint. */ 1499 list_for_each_safe(pos, temp, &ep->asocs) { 1500 asoc = list_entry(pos, struct sctp_association, asocs); 1501 1502 if (sctp_style(sk, TCP)) { 1503 /* A closed association can still be in the list if 1504 * it belongs to a TCP-style listening socket that is 1505 * not yet accepted. If so, free it. If not, send an 1506 * ABORT or SHUTDOWN based on the linger options. 1507 */ 1508 if (sctp_state(asoc, CLOSED)) { 1509 sctp_unhash_established(asoc); 1510 sctp_association_free(asoc); 1511 continue; 1512 } 1513 } 1514 1515 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1516 !skb_queue_empty(&asoc->ulpq.reasm) || 1517 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1518 struct sctp_chunk *chunk; 1519 1520 chunk = sctp_make_abort_user(asoc, NULL, 0); 1521 if (chunk) 1522 sctp_primitive_ABORT(net, asoc, chunk); 1523 } else 1524 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1525 } 1526 1527 /* On a TCP-style socket, block for at most linger_time if set. */ 1528 if (sctp_style(sk, TCP) && timeout) 1529 sctp_wait_for_close(sk, timeout); 1530 1531 /* This will run the backlog queue. */ 1532 release_sock(sk); 1533 1534 /* Supposedly, no process has access to the socket, but 1535 * the net layers still may. 1536 */ 1537 local_bh_disable(); 1538 bh_lock_sock(sk); 1539 1540 /* Hold the sock, since sk_common_release() will put sock_put() 1541 * and we have just a little more cleanup. 1542 */ 1543 sock_hold(sk); 1544 sk_common_release(sk); 1545 1546 bh_unlock_sock(sk); 1547 local_bh_enable(); 1548 1549 sock_put(sk); 1550 1551 SCTP_DBG_OBJCNT_DEC(sock); 1552 } 1553 1554 /* Handle EPIPE error. */ 1555 static int sctp_error(struct sock *sk, int flags, int err) 1556 { 1557 if (err == -EPIPE) 1558 err = sock_error(sk) ? : -EPIPE; 1559 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1560 send_sig(SIGPIPE, current, 0); 1561 return err; 1562 } 1563 1564 /* API 3.1.3 sendmsg() - UDP Style Syntax 1565 * 1566 * An application uses sendmsg() and recvmsg() calls to transmit data to 1567 * and receive data from its peer. 1568 * 1569 * ssize_t sendmsg(int socket, const struct msghdr *message, 1570 * int flags); 1571 * 1572 * socket - the socket descriptor of the endpoint. 1573 * message - pointer to the msghdr structure which contains a single 1574 * user message and possibly some ancillary data. 1575 * 1576 * See Section 5 for complete description of the data 1577 * structures. 1578 * 1579 * flags - flags sent or received with the user message, see Section 1580 * 5 for complete description of the flags. 1581 * 1582 * Note: This function could use a rewrite especially when explicit 1583 * connect support comes in. 1584 */ 1585 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1586 1587 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1588 1589 static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, 1590 struct msghdr *msg, size_t msg_len) 1591 { 1592 struct net *net = sock_net(sk); 1593 struct sctp_sock *sp; 1594 struct sctp_endpoint *ep; 1595 struct sctp_association *new_asoc = NULL, *asoc = NULL; 1596 struct sctp_transport *transport, *chunk_tp; 1597 struct sctp_chunk *chunk; 1598 union sctp_addr to; 1599 struct sockaddr *msg_name = NULL; 1600 struct sctp_sndrcvinfo default_sinfo; 1601 struct sctp_sndrcvinfo *sinfo; 1602 struct sctp_initmsg *sinit; 1603 sctp_assoc_t associd = 0; 1604 sctp_cmsgs_t cmsgs = { NULL }; 1605 int err; 1606 sctp_scope_t scope; 1607 long timeo; 1608 __u16 sinfo_flags = 0; 1609 struct sctp_datamsg *datamsg; 1610 int msg_flags = msg->msg_flags; 1611 1612 err = 0; 1613 sp = sctp_sk(sk); 1614 ep = sp->ep; 1615 1616 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1617 msg, msg_len, ep); 1618 1619 /* We cannot send a message over a TCP-style listening socket. */ 1620 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1621 err = -EPIPE; 1622 goto out_nounlock; 1623 } 1624 1625 /* Parse out the SCTP CMSGs. */ 1626 err = sctp_msghdr_parse(msg, &cmsgs); 1627 if (err) { 1628 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1629 goto out_nounlock; 1630 } 1631 1632 /* Fetch the destination address for this packet. This 1633 * address only selects the association--it is not necessarily 1634 * the address we will send to. 1635 * For a peeled-off socket, msg_name is ignored. 1636 */ 1637 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1638 int msg_namelen = msg->msg_namelen; 1639 1640 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1641 msg_namelen); 1642 if (err) 1643 return err; 1644 1645 if (msg_namelen > sizeof(to)) 1646 msg_namelen = sizeof(to); 1647 memcpy(&to, msg->msg_name, msg_namelen); 1648 msg_name = msg->msg_name; 1649 } 1650 1651 sinfo = cmsgs.info; 1652 sinit = cmsgs.init; 1653 1654 /* Did the user specify SNDRCVINFO? */ 1655 if (sinfo) { 1656 sinfo_flags = sinfo->sinfo_flags; 1657 associd = sinfo->sinfo_assoc_id; 1658 } 1659 1660 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1661 msg_len, sinfo_flags); 1662 1663 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1664 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1665 err = -EINVAL; 1666 goto out_nounlock; 1667 } 1668 1669 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1670 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1671 * If SCTP_ABORT is set, the message length could be non zero with 1672 * the msg_iov set to the user abort reason. 1673 */ 1674 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1675 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1676 err = -EINVAL; 1677 goto out_nounlock; 1678 } 1679 1680 /* If SCTP_ADDR_OVER is set, there must be an address 1681 * specified in msg_name. 1682 */ 1683 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1684 err = -EINVAL; 1685 goto out_nounlock; 1686 } 1687 1688 transport = NULL; 1689 1690 pr_debug("%s: about to look up association\n", __func__); 1691 1692 lock_sock(sk); 1693 1694 /* If a msg_name has been specified, assume this is to be used. */ 1695 if (msg_name) { 1696 /* Look for a matching association on the endpoint. */ 1697 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1698 if (!asoc) { 1699 /* If we could not find a matching association on the 1700 * endpoint, make sure that it is not a TCP-style 1701 * socket that already has an association or there is 1702 * no peeled-off association on another socket. 1703 */ 1704 if ((sctp_style(sk, TCP) && 1705 sctp_sstate(sk, ESTABLISHED)) || 1706 sctp_endpoint_is_peeled_off(ep, &to)) { 1707 err = -EADDRNOTAVAIL; 1708 goto out_unlock; 1709 } 1710 } 1711 } else { 1712 asoc = sctp_id2assoc(sk, associd); 1713 if (!asoc) { 1714 err = -EPIPE; 1715 goto out_unlock; 1716 } 1717 } 1718 1719 if (asoc) { 1720 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1721 1722 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1723 * socket that has an association in CLOSED state. This can 1724 * happen when an accepted socket has an association that is 1725 * already CLOSED. 1726 */ 1727 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1728 err = -EPIPE; 1729 goto out_unlock; 1730 } 1731 1732 if (sinfo_flags & SCTP_EOF) { 1733 pr_debug("%s: shutting down association:%p\n", 1734 __func__, asoc); 1735 1736 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1737 err = 0; 1738 goto out_unlock; 1739 } 1740 if (sinfo_flags & SCTP_ABORT) { 1741 1742 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1743 if (!chunk) { 1744 err = -ENOMEM; 1745 goto out_unlock; 1746 } 1747 1748 pr_debug("%s: aborting association:%p\n", 1749 __func__, asoc); 1750 1751 sctp_primitive_ABORT(net, asoc, chunk); 1752 err = 0; 1753 goto out_unlock; 1754 } 1755 } 1756 1757 /* Do we need to create the association? */ 1758 if (!asoc) { 1759 pr_debug("%s: there is no association yet\n", __func__); 1760 1761 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1762 err = -EINVAL; 1763 goto out_unlock; 1764 } 1765 1766 /* Check for invalid stream against the stream counts, 1767 * either the default or the user specified stream counts. 1768 */ 1769 if (sinfo) { 1770 if (!sinit || !sinit->sinit_num_ostreams) { 1771 /* Check against the defaults. */ 1772 if (sinfo->sinfo_stream >= 1773 sp->initmsg.sinit_num_ostreams) { 1774 err = -EINVAL; 1775 goto out_unlock; 1776 } 1777 } else { 1778 /* Check against the requested. */ 1779 if (sinfo->sinfo_stream >= 1780 sinit->sinit_num_ostreams) { 1781 err = -EINVAL; 1782 goto out_unlock; 1783 } 1784 } 1785 } 1786 1787 /* 1788 * API 3.1.2 bind() - UDP Style Syntax 1789 * If a bind() or sctp_bindx() is not called prior to a 1790 * sendmsg() call that initiates a new association, the 1791 * system picks an ephemeral port and will choose an address 1792 * set equivalent to binding with a wildcard address. 1793 */ 1794 if (!ep->base.bind_addr.port) { 1795 if (sctp_autobind(sk)) { 1796 err = -EAGAIN; 1797 goto out_unlock; 1798 } 1799 } else { 1800 /* 1801 * If an unprivileged user inherits a one-to-many 1802 * style socket with open associations on a privileged 1803 * port, it MAY be permitted to accept new associations, 1804 * but it SHOULD NOT be permitted to open new 1805 * associations. 1806 */ 1807 if (ep->base.bind_addr.port < PROT_SOCK && 1808 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1809 err = -EACCES; 1810 goto out_unlock; 1811 } 1812 } 1813 1814 scope = sctp_scope(&to); 1815 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1816 if (!new_asoc) { 1817 err = -ENOMEM; 1818 goto out_unlock; 1819 } 1820 asoc = new_asoc; 1821 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1822 if (err < 0) { 1823 err = -ENOMEM; 1824 goto out_free; 1825 } 1826 1827 /* If the SCTP_INIT ancillary data is specified, set all 1828 * the association init values accordingly. 1829 */ 1830 if (sinit) { 1831 if (sinit->sinit_num_ostreams) { 1832 asoc->c.sinit_num_ostreams = 1833 sinit->sinit_num_ostreams; 1834 } 1835 if (sinit->sinit_max_instreams) { 1836 asoc->c.sinit_max_instreams = 1837 sinit->sinit_max_instreams; 1838 } 1839 if (sinit->sinit_max_attempts) { 1840 asoc->max_init_attempts 1841 = sinit->sinit_max_attempts; 1842 } 1843 if (sinit->sinit_max_init_timeo) { 1844 asoc->max_init_timeo = 1845 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1846 } 1847 } 1848 1849 /* Prime the peer's transport structures. */ 1850 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1851 if (!transport) { 1852 err = -ENOMEM; 1853 goto out_free; 1854 } 1855 } 1856 1857 /* ASSERT: we have a valid association at this point. */ 1858 pr_debug("%s: we have a valid association\n", __func__); 1859 1860 if (!sinfo) { 1861 /* If the user didn't specify SNDRCVINFO, make up one with 1862 * some defaults. 1863 */ 1864 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1865 default_sinfo.sinfo_stream = asoc->default_stream; 1866 default_sinfo.sinfo_flags = asoc->default_flags; 1867 default_sinfo.sinfo_ppid = asoc->default_ppid; 1868 default_sinfo.sinfo_context = asoc->default_context; 1869 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1870 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1871 sinfo = &default_sinfo; 1872 } 1873 1874 /* API 7.1.7, the sndbuf size per association bounds the 1875 * maximum size of data that can be sent in a single send call. 1876 */ 1877 if (msg_len > sk->sk_sndbuf) { 1878 err = -EMSGSIZE; 1879 goto out_free; 1880 } 1881 1882 if (asoc->pmtu_pending) 1883 sctp_assoc_pending_pmtu(sk, asoc); 1884 1885 /* If fragmentation is disabled and the message length exceeds the 1886 * association fragmentation point, return EMSGSIZE. The I-D 1887 * does not specify what this error is, but this looks like 1888 * a great fit. 1889 */ 1890 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1891 err = -EMSGSIZE; 1892 goto out_free; 1893 } 1894 1895 /* Check for invalid stream. */ 1896 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1897 err = -EINVAL; 1898 goto out_free; 1899 } 1900 1901 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1902 if (!sctp_wspace(asoc)) { 1903 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1904 if (err) 1905 goto out_free; 1906 } 1907 1908 /* If an address is passed with the sendto/sendmsg call, it is used 1909 * to override the primary destination address in the TCP model, or 1910 * when SCTP_ADDR_OVER flag is set in the UDP model. 1911 */ 1912 if ((sctp_style(sk, TCP) && msg_name) || 1913 (sinfo_flags & SCTP_ADDR_OVER)) { 1914 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1915 if (!chunk_tp) { 1916 err = -EINVAL; 1917 goto out_free; 1918 } 1919 } else 1920 chunk_tp = NULL; 1921 1922 /* Auto-connect, if we aren't connected already. */ 1923 if (sctp_state(asoc, CLOSED)) { 1924 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1925 if (err < 0) 1926 goto out_free; 1927 1928 pr_debug("%s: we associated primitively\n", __func__); 1929 } 1930 1931 /* Break the message into multiple chunks of maximum size. */ 1932 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); 1933 if (IS_ERR(datamsg)) { 1934 err = PTR_ERR(datamsg); 1935 goto out_free; 1936 } 1937 1938 /* Now send the (possibly) fragmented message. */ 1939 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1940 sctp_chunk_hold(chunk); 1941 1942 /* Do accounting for the write space. */ 1943 sctp_set_owner_w(chunk); 1944 1945 chunk->transport = chunk_tp; 1946 } 1947 1948 /* Send it to the lower layers. Note: all chunks 1949 * must either fail or succeed. The lower layer 1950 * works that way today. Keep it that way or this 1951 * breaks. 1952 */ 1953 err = sctp_primitive_SEND(net, asoc, datamsg); 1954 /* Did the lower layer accept the chunk? */ 1955 if (err) { 1956 sctp_datamsg_free(datamsg); 1957 goto out_free; 1958 } 1959 1960 pr_debug("%s: we sent primitively\n", __func__); 1961 1962 sctp_datamsg_put(datamsg); 1963 err = msg_len; 1964 1965 /* If we are already past ASSOCIATE, the lower 1966 * layers are responsible for association cleanup. 1967 */ 1968 goto out_unlock; 1969 1970 out_free: 1971 if (new_asoc) { 1972 sctp_unhash_established(asoc); 1973 sctp_association_free(asoc); 1974 } 1975 out_unlock: 1976 release_sock(sk); 1977 1978 out_nounlock: 1979 return sctp_error(sk, msg_flags, err); 1980 1981 #if 0 1982 do_sock_err: 1983 if (msg_len) 1984 err = msg_len; 1985 else 1986 err = sock_error(sk); 1987 goto out; 1988 1989 do_interrupted: 1990 if (msg_len) 1991 err = msg_len; 1992 goto out; 1993 #endif /* 0 */ 1994 } 1995 1996 /* This is an extended version of skb_pull() that removes the data from the 1997 * start of a skb even when data is spread across the list of skb's in the 1998 * frag_list. len specifies the total amount of data that needs to be removed. 1999 * when 'len' bytes could be removed from the skb, it returns 0. 2000 * If 'len' exceeds the total skb length, it returns the no. of bytes that 2001 * could not be removed. 2002 */ 2003 static int sctp_skb_pull(struct sk_buff *skb, int len) 2004 { 2005 struct sk_buff *list; 2006 int skb_len = skb_headlen(skb); 2007 int rlen; 2008 2009 if (len <= skb_len) { 2010 __skb_pull(skb, len); 2011 return 0; 2012 } 2013 len -= skb_len; 2014 __skb_pull(skb, skb_len); 2015 2016 skb_walk_frags(skb, list) { 2017 rlen = sctp_skb_pull(list, len); 2018 skb->len -= (len-rlen); 2019 skb->data_len -= (len-rlen); 2020 2021 if (!rlen) 2022 return 0; 2023 2024 len = rlen; 2025 } 2026 2027 return len; 2028 } 2029 2030 /* API 3.1.3 recvmsg() - UDP Style Syntax 2031 * 2032 * ssize_t recvmsg(int socket, struct msghdr *message, 2033 * int flags); 2034 * 2035 * socket - the socket descriptor of the endpoint. 2036 * message - pointer to the msghdr structure which contains a single 2037 * user message and possibly some ancillary data. 2038 * 2039 * See Section 5 for complete description of the data 2040 * structures. 2041 * 2042 * flags - flags sent or received with the user message, see Section 2043 * 5 for complete description of the flags. 2044 */ 2045 static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); 2046 2047 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, 2048 struct msghdr *msg, size_t len, int noblock, 2049 int flags, int *addr_len) 2050 { 2051 struct sctp_ulpevent *event = NULL; 2052 struct sctp_sock *sp = sctp_sk(sk); 2053 struct sk_buff *skb; 2054 int copied; 2055 int err = 0; 2056 int skb_len; 2057 2058 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2059 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2060 addr_len); 2061 2062 lock_sock(sk); 2063 2064 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { 2065 err = -ENOTCONN; 2066 goto out; 2067 } 2068 2069 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2070 if (!skb) 2071 goto out; 2072 2073 /* Get the total length of the skb including any skb's in the 2074 * frag_list. 2075 */ 2076 skb_len = skb->len; 2077 2078 copied = skb_len; 2079 if (copied > len) 2080 copied = len; 2081 2082 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 2083 2084 event = sctp_skb2event(skb); 2085 2086 if (err) 2087 goto out_free; 2088 2089 sock_recv_ts_and_drops(msg, sk, skb); 2090 if (sctp_ulpevent_is_notification(event)) { 2091 msg->msg_flags |= MSG_NOTIFICATION; 2092 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2093 } else { 2094 sp->pf->skb_msgname(skb, msg->msg_name, addr_len); 2095 } 2096 2097 /* Check if we allow SCTP_SNDRCVINFO. */ 2098 if (sp->subscribe.sctp_data_io_event) 2099 sctp_ulpevent_read_sndrcvinfo(event, msg); 2100 #if 0 2101 /* FIXME: we should be calling IP/IPv6 layers. */ 2102 if (sk->sk_protinfo.af_inet.cmsg_flags) 2103 ip_cmsg_recv(msg, skb); 2104 #endif 2105 2106 err = copied; 2107 2108 /* If skb's length exceeds the user's buffer, update the skb and 2109 * push it back to the receive_queue so that the next call to 2110 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2111 */ 2112 if (skb_len > copied) { 2113 msg->msg_flags &= ~MSG_EOR; 2114 if (flags & MSG_PEEK) 2115 goto out_free; 2116 sctp_skb_pull(skb, copied); 2117 skb_queue_head(&sk->sk_receive_queue, skb); 2118 2119 /* When only partial message is copied to the user, increase 2120 * rwnd by that amount. If all the data in the skb is read, 2121 * rwnd is updated when the event is freed. 2122 */ 2123 if (!sctp_ulpevent_is_notification(event)) 2124 sctp_assoc_rwnd_increase(event->asoc, copied); 2125 goto out; 2126 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2127 (event->msg_flags & MSG_EOR)) 2128 msg->msg_flags |= MSG_EOR; 2129 else 2130 msg->msg_flags &= ~MSG_EOR; 2131 2132 out_free: 2133 if (flags & MSG_PEEK) { 2134 /* Release the skb reference acquired after peeking the skb in 2135 * sctp_skb_recv_datagram(). 2136 */ 2137 kfree_skb(skb); 2138 } else { 2139 /* Free the event which includes releasing the reference to 2140 * the owner of the skb, freeing the skb and updating the 2141 * rwnd. 2142 */ 2143 sctp_ulpevent_free(event); 2144 } 2145 out: 2146 release_sock(sk); 2147 return err; 2148 } 2149 2150 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2151 * 2152 * This option is a on/off flag. If enabled no SCTP message 2153 * fragmentation will be performed. Instead if a message being sent 2154 * exceeds the current PMTU size, the message will NOT be sent and 2155 * instead a error will be indicated to the user. 2156 */ 2157 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2158 char __user *optval, 2159 unsigned int optlen) 2160 { 2161 int val; 2162 2163 if (optlen < sizeof(int)) 2164 return -EINVAL; 2165 2166 if (get_user(val, (int __user *)optval)) 2167 return -EFAULT; 2168 2169 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2170 2171 return 0; 2172 } 2173 2174 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2175 unsigned int optlen) 2176 { 2177 struct sctp_association *asoc; 2178 struct sctp_ulpevent *event; 2179 2180 if (optlen > sizeof(struct sctp_event_subscribe)) 2181 return -EINVAL; 2182 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2183 return -EFAULT; 2184 2185 /* 2186 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2187 * if there is no data to be sent or retransmit, the stack will 2188 * immediately send up this notification. 2189 */ 2190 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2191 &sctp_sk(sk)->subscribe)) { 2192 asoc = sctp_id2assoc(sk, 0); 2193 2194 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2195 event = sctp_ulpevent_make_sender_dry_event(asoc, 2196 GFP_ATOMIC); 2197 if (!event) 2198 return -ENOMEM; 2199 2200 sctp_ulpq_tail_event(&asoc->ulpq, event); 2201 } 2202 } 2203 2204 return 0; 2205 } 2206 2207 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2208 * 2209 * This socket option is applicable to the UDP-style socket only. When 2210 * set it will cause associations that are idle for more than the 2211 * specified number of seconds to automatically close. An association 2212 * being idle is defined an association that has NOT sent or received 2213 * user data. The special value of '0' indicates that no automatic 2214 * close of any associations should be performed. The option expects an 2215 * integer defining the number of seconds of idle time before an 2216 * association is closed. 2217 */ 2218 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2219 unsigned int optlen) 2220 { 2221 struct sctp_sock *sp = sctp_sk(sk); 2222 struct net *net = sock_net(sk); 2223 2224 /* Applicable to UDP-style socket only */ 2225 if (sctp_style(sk, TCP)) 2226 return -EOPNOTSUPP; 2227 if (optlen != sizeof(int)) 2228 return -EINVAL; 2229 if (copy_from_user(&sp->autoclose, optval, optlen)) 2230 return -EFAULT; 2231 2232 if (sp->autoclose > net->sctp.max_autoclose) 2233 sp->autoclose = net->sctp.max_autoclose; 2234 2235 return 0; 2236 } 2237 2238 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2239 * 2240 * Applications can enable or disable heartbeats for any peer address of 2241 * an association, modify an address's heartbeat interval, force a 2242 * heartbeat to be sent immediately, and adjust the address's maximum 2243 * number of retransmissions sent before an address is considered 2244 * unreachable. The following structure is used to access and modify an 2245 * address's parameters: 2246 * 2247 * struct sctp_paddrparams { 2248 * sctp_assoc_t spp_assoc_id; 2249 * struct sockaddr_storage spp_address; 2250 * uint32_t spp_hbinterval; 2251 * uint16_t spp_pathmaxrxt; 2252 * uint32_t spp_pathmtu; 2253 * uint32_t spp_sackdelay; 2254 * uint32_t spp_flags; 2255 * }; 2256 * 2257 * spp_assoc_id - (one-to-many style socket) This is filled in the 2258 * application, and identifies the association for 2259 * this query. 2260 * spp_address - This specifies which address is of interest. 2261 * spp_hbinterval - This contains the value of the heartbeat interval, 2262 * in milliseconds. If a value of zero 2263 * is present in this field then no changes are to 2264 * be made to this parameter. 2265 * spp_pathmaxrxt - This contains the maximum number of 2266 * retransmissions before this address shall be 2267 * considered unreachable. If a value of zero 2268 * is present in this field then no changes are to 2269 * be made to this parameter. 2270 * spp_pathmtu - When Path MTU discovery is disabled the value 2271 * specified here will be the "fixed" path mtu. 2272 * Note that if the spp_address field is empty 2273 * then all associations on this address will 2274 * have this fixed path mtu set upon them. 2275 * 2276 * spp_sackdelay - When delayed sack is enabled, this value specifies 2277 * the number of milliseconds that sacks will be delayed 2278 * for. This value will apply to all addresses of an 2279 * association if the spp_address field is empty. Note 2280 * also, that if delayed sack is enabled and this 2281 * value is set to 0, no change is made to the last 2282 * recorded delayed sack timer value. 2283 * 2284 * spp_flags - These flags are used to control various features 2285 * on an association. The flag field may contain 2286 * zero or more of the following options. 2287 * 2288 * SPP_HB_ENABLE - Enable heartbeats on the 2289 * specified address. Note that if the address 2290 * field is empty all addresses for the association 2291 * have heartbeats enabled upon them. 2292 * 2293 * SPP_HB_DISABLE - Disable heartbeats on the 2294 * speicifed address. Note that if the address 2295 * field is empty all addresses for the association 2296 * will have their heartbeats disabled. Note also 2297 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2298 * mutually exclusive, only one of these two should 2299 * be specified. Enabling both fields will have 2300 * undetermined results. 2301 * 2302 * SPP_HB_DEMAND - Request a user initiated heartbeat 2303 * to be made immediately. 2304 * 2305 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2306 * heartbeat delayis to be set to the value of 0 2307 * milliseconds. 2308 * 2309 * SPP_PMTUD_ENABLE - This field will enable PMTU 2310 * discovery upon the specified address. Note that 2311 * if the address feild is empty then all addresses 2312 * on the association are effected. 2313 * 2314 * SPP_PMTUD_DISABLE - This field will disable PMTU 2315 * discovery upon the specified address. Note that 2316 * if the address feild is empty then all addresses 2317 * on the association are effected. Not also that 2318 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2319 * exclusive. Enabling both will have undetermined 2320 * results. 2321 * 2322 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2323 * on delayed sack. The time specified in spp_sackdelay 2324 * is used to specify the sack delay for this address. Note 2325 * that if spp_address is empty then all addresses will 2326 * enable delayed sack and take on the sack delay 2327 * value specified in spp_sackdelay. 2328 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2329 * off delayed sack. If the spp_address field is blank then 2330 * delayed sack is disabled for the entire association. Note 2331 * also that this field is mutually exclusive to 2332 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2333 * results. 2334 */ 2335 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2336 struct sctp_transport *trans, 2337 struct sctp_association *asoc, 2338 struct sctp_sock *sp, 2339 int hb_change, 2340 int pmtud_change, 2341 int sackdelay_change) 2342 { 2343 int error; 2344 2345 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2346 struct net *net = sock_net(trans->asoc->base.sk); 2347 2348 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2349 if (error) 2350 return error; 2351 } 2352 2353 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2354 * this field is ignored. Note also that a value of zero indicates 2355 * the current setting should be left unchanged. 2356 */ 2357 if (params->spp_flags & SPP_HB_ENABLE) { 2358 2359 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2360 * set. This lets us use 0 value when this flag 2361 * is set. 2362 */ 2363 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2364 params->spp_hbinterval = 0; 2365 2366 if (params->spp_hbinterval || 2367 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2368 if (trans) { 2369 trans->hbinterval = 2370 msecs_to_jiffies(params->spp_hbinterval); 2371 } else if (asoc) { 2372 asoc->hbinterval = 2373 msecs_to_jiffies(params->spp_hbinterval); 2374 } else { 2375 sp->hbinterval = params->spp_hbinterval; 2376 } 2377 } 2378 } 2379 2380 if (hb_change) { 2381 if (trans) { 2382 trans->param_flags = 2383 (trans->param_flags & ~SPP_HB) | hb_change; 2384 } else if (asoc) { 2385 asoc->param_flags = 2386 (asoc->param_flags & ~SPP_HB) | hb_change; 2387 } else { 2388 sp->param_flags = 2389 (sp->param_flags & ~SPP_HB) | hb_change; 2390 } 2391 } 2392 2393 /* When Path MTU discovery is disabled the value specified here will 2394 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2395 * include the flag SPP_PMTUD_DISABLE for this field to have any 2396 * effect). 2397 */ 2398 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2399 if (trans) { 2400 trans->pathmtu = params->spp_pathmtu; 2401 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2402 } else if (asoc) { 2403 asoc->pathmtu = params->spp_pathmtu; 2404 sctp_frag_point(asoc, params->spp_pathmtu); 2405 } else { 2406 sp->pathmtu = params->spp_pathmtu; 2407 } 2408 } 2409 2410 if (pmtud_change) { 2411 if (trans) { 2412 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2413 (params->spp_flags & SPP_PMTUD_ENABLE); 2414 trans->param_flags = 2415 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2416 if (update) { 2417 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2418 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2419 } 2420 } else if (asoc) { 2421 asoc->param_flags = 2422 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2423 } else { 2424 sp->param_flags = 2425 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2426 } 2427 } 2428 2429 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2430 * value of this field is ignored. Note also that a value of zero 2431 * indicates the current setting should be left unchanged. 2432 */ 2433 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2434 if (trans) { 2435 trans->sackdelay = 2436 msecs_to_jiffies(params->spp_sackdelay); 2437 } else if (asoc) { 2438 asoc->sackdelay = 2439 msecs_to_jiffies(params->spp_sackdelay); 2440 } else { 2441 sp->sackdelay = params->spp_sackdelay; 2442 } 2443 } 2444 2445 if (sackdelay_change) { 2446 if (trans) { 2447 trans->param_flags = 2448 (trans->param_flags & ~SPP_SACKDELAY) | 2449 sackdelay_change; 2450 } else if (asoc) { 2451 asoc->param_flags = 2452 (asoc->param_flags & ~SPP_SACKDELAY) | 2453 sackdelay_change; 2454 } else { 2455 sp->param_flags = 2456 (sp->param_flags & ~SPP_SACKDELAY) | 2457 sackdelay_change; 2458 } 2459 } 2460 2461 /* Note that a value of zero indicates the current setting should be 2462 left unchanged. 2463 */ 2464 if (params->spp_pathmaxrxt) { 2465 if (trans) { 2466 trans->pathmaxrxt = params->spp_pathmaxrxt; 2467 } else if (asoc) { 2468 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2469 } else { 2470 sp->pathmaxrxt = params->spp_pathmaxrxt; 2471 } 2472 } 2473 2474 return 0; 2475 } 2476 2477 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2478 char __user *optval, 2479 unsigned int optlen) 2480 { 2481 struct sctp_paddrparams params; 2482 struct sctp_transport *trans = NULL; 2483 struct sctp_association *asoc = NULL; 2484 struct sctp_sock *sp = sctp_sk(sk); 2485 int error; 2486 int hb_change, pmtud_change, sackdelay_change; 2487 2488 if (optlen != sizeof(struct sctp_paddrparams)) 2489 return -EINVAL; 2490 2491 if (copy_from_user(¶ms, optval, optlen)) 2492 return -EFAULT; 2493 2494 /* Validate flags and value parameters. */ 2495 hb_change = params.spp_flags & SPP_HB; 2496 pmtud_change = params.spp_flags & SPP_PMTUD; 2497 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2498 2499 if (hb_change == SPP_HB || 2500 pmtud_change == SPP_PMTUD || 2501 sackdelay_change == SPP_SACKDELAY || 2502 params.spp_sackdelay > 500 || 2503 (params.spp_pathmtu && 2504 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2505 return -EINVAL; 2506 2507 /* If an address other than INADDR_ANY is specified, and 2508 * no transport is found, then the request is invalid. 2509 */ 2510 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 2511 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2512 params.spp_assoc_id); 2513 if (!trans) 2514 return -EINVAL; 2515 } 2516 2517 /* Get association, if assoc_id != 0 and the socket is a one 2518 * to many style socket, and an association was not found, then 2519 * the id was invalid. 2520 */ 2521 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2522 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2523 return -EINVAL; 2524 2525 /* Heartbeat demand can only be sent on a transport or 2526 * association, but not a socket. 2527 */ 2528 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2529 return -EINVAL; 2530 2531 /* Process parameters. */ 2532 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2533 hb_change, pmtud_change, 2534 sackdelay_change); 2535 2536 if (error) 2537 return error; 2538 2539 /* If changes are for association, also apply parameters to each 2540 * transport. 2541 */ 2542 if (!trans && asoc) { 2543 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2544 transports) { 2545 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2546 hb_change, pmtud_change, 2547 sackdelay_change); 2548 } 2549 } 2550 2551 return 0; 2552 } 2553 2554 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) 2555 { 2556 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; 2557 } 2558 2559 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) 2560 { 2561 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; 2562 } 2563 2564 /* 2565 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2566 * 2567 * This option will effect the way delayed acks are performed. This 2568 * option allows you to get or set the delayed ack time, in 2569 * milliseconds. It also allows changing the delayed ack frequency. 2570 * Changing the frequency to 1 disables the delayed sack algorithm. If 2571 * the assoc_id is 0, then this sets or gets the endpoints default 2572 * values. If the assoc_id field is non-zero, then the set or get 2573 * effects the specified association for the one to many model (the 2574 * assoc_id field is ignored by the one to one model). Note that if 2575 * sack_delay or sack_freq are 0 when setting this option, then the 2576 * current values will remain unchanged. 2577 * 2578 * struct sctp_sack_info { 2579 * sctp_assoc_t sack_assoc_id; 2580 * uint32_t sack_delay; 2581 * uint32_t sack_freq; 2582 * }; 2583 * 2584 * sack_assoc_id - This parameter, indicates which association the user 2585 * is performing an action upon. Note that if this field's value is 2586 * zero then the endpoints default value is changed (effecting future 2587 * associations only). 2588 * 2589 * sack_delay - This parameter contains the number of milliseconds that 2590 * the user is requesting the delayed ACK timer be set to. Note that 2591 * this value is defined in the standard to be between 200 and 500 2592 * milliseconds. 2593 * 2594 * sack_freq - This parameter contains the number of packets that must 2595 * be received before a sack is sent without waiting for the delay 2596 * timer to expire. The default value for this is 2, setting this 2597 * value to 1 will disable the delayed sack algorithm. 2598 */ 2599 2600 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2601 char __user *optval, unsigned int optlen) 2602 { 2603 struct sctp_sack_info params; 2604 struct sctp_transport *trans = NULL; 2605 struct sctp_association *asoc = NULL; 2606 struct sctp_sock *sp = sctp_sk(sk); 2607 2608 if (optlen == sizeof(struct sctp_sack_info)) { 2609 if (copy_from_user(¶ms, optval, optlen)) 2610 return -EFAULT; 2611 2612 if (params.sack_delay == 0 && params.sack_freq == 0) 2613 return 0; 2614 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2615 pr_warn_ratelimited(DEPRECATED 2616 "%s (pid %d) " 2617 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 2618 "Use struct sctp_sack_info instead\n", 2619 current->comm, task_pid_nr(current)); 2620 if (copy_from_user(¶ms, optval, optlen)) 2621 return -EFAULT; 2622 2623 if (params.sack_delay == 0) 2624 params.sack_freq = 1; 2625 else 2626 params.sack_freq = 0; 2627 } else 2628 return -EINVAL; 2629 2630 /* Validate value parameter. */ 2631 if (params.sack_delay > 500) 2632 return -EINVAL; 2633 2634 /* Get association, if sack_assoc_id != 0 and the socket is a one 2635 * to many style socket, and an association was not found, then 2636 * the id was invalid. 2637 */ 2638 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2639 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2640 return -EINVAL; 2641 2642 if (params.sack_delay) { 2643 if (asoc) { 2644 asoc->sackdelay = 2645 msecs_to_jiffies(params.sack_delay); 2646 asoc->param_flags = 2647 sctp_spp_sackdelay_enable(asoc->param_flags); 2648 } else { 2649 sp->sackdelay = params.sack_delay; 2650 sp->param_flags = 2651 sctp_spp_sackdelay_enable(sp->param_flags); 2652 } 2653 } 2654 2655 if (params.sack_freq == 1) { 2656 if (asoc) { 2657 asoc->param_flags = 2658 sctp_spp_sackdelay_disable(asoc->param_flags); 2659 } else { 2660 sp->param_flags = 2661 sctp_spp_sackdelay_disable(sp->param_flags); 2662 } 2663 } else if (params.sack_freq > 1) { 2664 if (asoc) { 2665 asoc->sackfreq = params.sack_freq; 2666 asoc->param_flags = 2667 sctp_spp_sackdelay_enable(asoc->param_flags); 2668 } else { 2669 sp->sackfreq = params.sack_freq; 2670 sp->param_flags = 2671 sctp_spp_sackdelay_enable(sp->param_flags); 2672 } 2673 } 2674 2675 /* If change is for association, also apply to each transport. */ 2676 if (asoc) { 2677 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2678 transports) { 2679 if (params.sack_delay) { 2680 trans->sackdelay = 2681 msecs_to_jiffies(params.sack_delay); 2682 trans->param_flags = 2683 sctp_spp_sackdelay_enable(trans->param_flags); 2684 } 2685 if (params.sack_freq == 1) { 2686 trans->param_flags = 2687 sctp_spp_sackdelay_disable(trans->param_flags); 2688 } else if (params.sack_freq > 1) { 2689 trans->sackfreq = params.sack_freq; 2690 trans->param_flags = 2691 sctp_spp_sackdelay_enable(trans->param_flags); 2692 } 2693 } 2694 } 2695 2696 return 0; 2697 } 2698 2699 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2700 * 2701 * Applications can specify protocol parameters for the default association 2702 * initialization. The option name argument to setsockopt() and getsockopt() 2703 * is SCTP_INITMSG. 2704 * 2705 * Setting initialization parameters is effective only on an unconnected 2706 * socket (for UDP-style sockets only future associations are effected 2707 * by the change). With TCP-style sockets, this option is inherited by 2708 * sockets derived from a listener socket. 2709 */ 2710 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2711 { 2712 struct sctp_initmsg sinit; 2713 struct sctp_sock *sp = sctp_sk(sk); 2714 2715 if (optlen != sizeof(struct sctp_initmsg)) 2716 return -EINVAL; 2717 if (copy_from_user(&sinit, optval, optlen)) 2718 return -EFAULT; 2719 2720 if (sinit.sinit_num_ostreams) 2721 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2722 if (sinit.sinit_max_instreams) 2723 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2724 if (sinit.sinit_max_attempts) 2725 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2726 if (sinit.sinit_max_init_timeo) 2727 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2728 2729 return 0; 2730 } 2731 2732 /* 2733 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2734 * 2735 * Applications that wish to use the sendto() system call may wish to 2736 * specify a default set of parameters that would normally be supplied 2737 * through the inclusion of ancillary data. This socket option allows 2738 * such an application to set the default sctp_sndrcvinfo structure. 2739 * The application that wishes to use this socket option simply passes 2740 * in to this call the sctp_sndrcvinfo structure defined in Section 2741 * 5.2.2) The input parameters accepted by this call include 2742 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2743 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2744 * to this call if the caller is using the UDP model. 2745 */ 2746 static int sctp_setsockopt_default_send_param(struct sock *sk, 2747 char __user *optval, 2748 unsigned int optlen) 2749 { 2750 struct sctp_sndrcvinfo info; 2751 struct sctp_association *asoc; 2752 struct sctp_sock *sp = sctp_sk(sk); 2753 2754 if (optlen != sizeof(struct sctp_sndrcvinfo)) 2755 return -EINVAL; 2756 if (copy_from_user(&info, optval, optlen)) 2757 return -EFAULT; 2758 2759 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2760 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2761 return -EINVAL; 2762 2763 if (asoc) { 2764 asoc->default_stream = info.sinfo_stream; 2765 asoc->default_flags = info.sinfo_flags; 2766 asoc->default_ppid = info.sinfo_ppid; 2767 asoc->default_context = info.sinfo_context; 2768 asoc->default_timetolive = info.sinfo_timetolive; 2769 } else { 2770 sp->default_stream = info.sinfo_stream; 2771 sp->default_flags = info.sinfo_flags; 2772 sp->default_ppid = info.sinfo_ppid; 2773 sp->default_context = info.sinfo_context; 2774 sp->default_timetolive = info.sinfo_timetolive; 2775 } 2776 2777 return 0; 2778 } 2779 2780 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2781 * 2782 * Requests that the local SCTP stack use the enclosed peer address as 2783 * the association primary. The enclosed address must be one of the 2784 * association peer's addresses. 2785 */ 2786 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2787 unsigned int optlen) 2788 { 2789 struct sctp_prim prim; 2790 struct sctp_transport *trans; 2791 2792 if (optlen != sizeof(struct sctp_prim)) 2793 return -EINVAL; 2794 2795 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2796 return -EFAULT; 2797 2798 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2799 if (!trans) 2800 return -EINVAL; 2801 2802 sctp_assoc_set_primary(trans->asoc, trans); 2803 2804 return 0; 2805 } 2806 2807 /* 2808 * 7.1.5 SCTP_NODELAY 2809 * 2810 * Turn on/off any Nagle-like algorithm. This means that packets are 2811 * generally sent as soon as possible and no unnecessary delays are 2812 * introduced, at the cost of more packets in the network. Expects an 2813 * integer boolean flag. 2814 */ 2815 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2816 unsigned int optlen) 2817 { 2818 int val; 2819 2820 if (optlen < sizeof(int)) 2821 return -EINVAL; 2822 if (get_user(val, (int __user *)optval)) 2823 return -EFAULT; 2824 2825 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2826 return 0; 2827 } 2828 2829 /* 2830 * 2831 * 7.1.1 SCTP_RTOINFO 2832 * 2833 * The protocol parameters used to initialize and bound retransmission 2834 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2835 * and modify these parameters. 2836 * All parameters are time values, in milliseconds. A value of 0, when 2837 * modifying the parameters, indicates that the current value should not 2838 * be changed. 2839 * 2840 */ 2841 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2842 { 2843 struct sctp_rtoinfo rtoinfo; 2844 struct sctp_association *asoc; 2845 unsigned long rto_min, rto_max; 2846 struct sctp_sock *sp = sctp_sk(sk); 2847 2848 if (optlen != sizeof (struct sctp_rtoinfo)) 2849 return -EINVAL; 2850 2851 if (copy_from_user(&rtoinfo, optval, optlen)) 2852 return -EFAULT; 2853 2854 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2855 2856 /* Set the values to the specific association */ 2857 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2858 return -EINVAL; 2859 2860 rto_max = rtoinfo.srto_max; 2861 rto_min = rtoinfo.srto_min; 2862 2863 if (rto_max) 2864 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 2865 else 2866 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 2867 2868 if (rto_min) 2869 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 2870 else 2871 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 2872 2873 if (rto_min > rto_max) 2874 return -EINVAL; 2875 2876 if (asoc) { 2877 if (rtoinfo.srto_initial != 0) 2878 asoc->rto_initial = 2879 msecs_to_jiffies(rtoinfo.srto_initial); 2880 asoc->rto_max = rto_max; 2881 asoc->rto_min = rto_min; 2882 } else { 2883 /* If there is no association or the association-id = 0 2884 * set the values to the endpoint. 2885 */ 2886 if (rtoinfo.srto_initial != 0) 2887 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2888 sp->rtoinfo.srto_max = rto_max; 2889 sp->rtoinfo.srto_min = rto_min; 2890 } 2891 2892 return 0; 2893 } 2894 2895 /* 2896 * 2897 * 7.1.2 SCTP_ASSOCINFO 2898 * 2899 * This option is used to tune the maximum retransmission attempts 2900 * of the association. 2901 * Returns an error if the new association retransmission value is 2902 * greater than the sum of the retransmission value of the peer. 2903 * See [SCTP] for more information. 2904 * 2905 */ 2906 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2907 { 2908 2909 struct sctp_assocparams assocparams; 2910 struct sctp_association *asoc; 2911 2912 if (optlen != sizeof(struct sctp_assocparams)) 2913 return -EINVAL; 2914 if (copy_from_user(&assocparams, optval, optlen)) 2915 return -EFAULT; 2916 2917 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2918 2919 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2920 return -EINVAL; 2921 2922 /* Set the values to the specific association */ 2923 if (asoc) { 2924 if (assocparams.sasoc_asocmaxrxt != 0) { 2925 __u32 path_sum = 0; 2926 int paths = 0; 2927 struct sctp_transport *peer_addr; 2928 2929 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 2930 transports) { 2931 path_sum += peer_addr->pathmaxrxt; 2932 paths++; 2933 } 2934 2935 /* Only validate asocmaxrxt if we have more than 2936 * one path/transport. We do this because path 2937 * retransmissions are only counted when we have more 2938 * then one path. 2939 */ 2940 if (paths > 1 && 2941 assocparams.sasoc_asocmaxrxt > path_sum) 2942 return -EINVAL; 2943 2944 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 2945 } 2946 2947 if (assocparams.sasoc_cookie_life != 0) 2948 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 2949 } else { 2950 /* Set the values to the endpoint */ 2951 struct sctp_sock *sp = sctp_sk(sk); 2952 2953 if (assocparams.sasoc_asocmaxrxt != 0) 2954 sp->assocparams.sasoc_asocmaxrxt = 2955 assocparams.sasoc_asocmaxrxt; 2956 if (assocparams.sasoc_cookie_life != 0) 2957 sp->assocparams.sasoc_cookie_life = 2958 assocparams.sasoc_cookie_life; 2959 } 2960 return 0; 2961 } 2962 2963 /* 2964 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 2965 * 2966 * This socket option is a boolean flag which turns on or off mapped V4 2967 * addresses. If this option is turned on and the socket is type 2968 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 2969 * If this option is turned off, then no mapping will be done of V4 2970 * addresses and a user will receive both PF_INET6 and PF_INET type 2971 * addresses on the socket. 2972 */ 2973 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 2974 { 2975 int val; 2976 struct sctp_sock *sp = sctp_sk(sk); 2977 2978 if (optlen < sizeof(int)) 2979 return -EINVAL; 2980 if (get_user(val, (int __user *)optval)) 2981 return -EFAULT; 2982 if (val) 2983 sp->v4mapped = 1; 2984 else 2985 sp->v4mapped = 0; 2986 2987 return 0; 2988 } 2989 2990 /* 2991 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 2992 * This option will get or set the maximum size to put in any outgoing 2993 * SCTP DATA chunk. If a message is larger than this size it will be 2994 * fragmented by SCTP into the specified size. Note that the underlying 2995 * SCTP implementation may fragment into smaller sized chunks when the 2996 * PMTU of the underlying association is smaller than the value set by 2997 * the user. The default value for this option is '0' which indicates 2998 * the user is NOT limiting fragmentation and only the PMTU will effect 2999 * SCTP's choice of DATA chunk size. Note also that values set larger 3000 * than the maximum size of an IP datagram will effectively let SCTP 3001 * control fragmentation (i.e. the same as setting this option to 0). 3002 * 3003 * The following structure is used to access and modify this parameter: 3004 * 3005 * struct sctp_assoc_value { 3006 * sctp_assoc_t assoc_id; 3007 * uint32_t assoc_value; 3008 * }; 3009 * 3010 * assoc_id: This parameter is ignored for one-to-one style sockets. 3011 * For one-to-many style sockets this parameter indicates which 3012 * association the user is performing an action upon. Note that if 3013 * this field's value is zero then the endpoints default value is 3014 * changed (effecting future associations only). 3015 * assoc_value: This parameter specifies the maximum size in bytes. 3016 */ 3017 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3018 { 3019 struct sctp_assoc_value params; 3020 struct sctp_association *asoc; 3021 struct sctp_sock *sp = sctp_sk(sk); 3022 int val; 3023 3024 if (optlen == sizeof(int)) { 3025 pr_warn_ratelimited(DEPRECATED 3026 "%s (pid %d) " 3027 "Use of int in maxseg socket option.\n" 3028 "Use struct sctp_assoc_value instead\n", 3029 current->comm, task_pid_nr(current)); 3030 if (copy_from_user(&val, optval, optlen)) 3031 return -EFAULT; 3032 params.assoc_id = 0; 3033 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3034 if (copy_from_user(¶ms, optval, optlen)) 3035 return -EFAULT; 3036 val = params.assoc_value; 3037 } else 3038 return -EINVAL; 3039 3040 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 3041 return -EINVAL; 3042 3043 asoc = sctp_id2assoc(sk, params.assoc_id); 3044 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3045 return -EINVAL; 3046 3047 if (asoc) { 3048 if (val == 0) { 3049 val = asoc->pathmtu; 3050 val -= sp->pf->af->net_header_len; 3051 val -= sizeof(struct sctphdr) + 3052 sizeof(struct sctp_data_chunk); 3053 } 3054 asoc->user_frag = val; 3055 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3056 } else { 3057 sp->user_frag = val; 3058 } 3059 3060 return 0; 3061 } 3062 3063 3064 /* 3065 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3066 * 3067 * Requests that the peer mark the enclosed address as the association 3068 * primary. The enclosed address must be one of the association's 3069 * locally bound addresses. The following structure is used to make a 3070 * set primary request: 3071 */ 3072 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3073 unsigned int optlen) 3074 { 3075 struct net *net = sock_net(sk); 3076 struct sctp_sock *sp; 3077 struct sctp_association *asoc = NULL; 3078 struct sctp_setpeerprim prim; 3079 struct sctp_chunk *chunk; 3080 struct sctp_af *af; 3081 int err; 3082 3083 sp = sctp_sk(sk); 3084 3085 if (!net->sctp.addip_enable) 3086 return -EPERM; 3087 3088 if (optlen != sizeof(struct sctp_setpeerprim)) 3089 return -EINVAL; 3090 3091 if (copy_from_user(&prim, optval, optlen)) 3092 return -EFAULT; 3093 3094 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3095 if (!asoc) 3096 return -EINVAL; 3097 3098 if (!asoc->peer.asconf_capable) 3099 return -EPERM; 3100 3101 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3102 return -EPERM; 3103 3104 if (!sctp_state(asoc, ESTABLISHED)) 3105 return -ENOTCONN; 3106 3107 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3108 if (!af) 3109 return -EINVAL; 3110 3111 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3112 return -EADDRNOTAVAIL; 3113 3114 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3115 return -EADDRNOTAVAIL; 3116 3117 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3118 chunk = sctp_make_asconf_set_prim(asoc, 3119 (union sctp_addr *)&prim.sspp_addr); 3120 if (!chunk) 3121 return -ENOMEM; 3122 3123 err = sctp_send_asconf(asoc, chunk); 3124 3125 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3126 3127 return err; 3128 } 3129 3130 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3131 unsigned int optlen) 3132 { 3133 struct sctp_setadaptation adaptation; 3134 3135 if (optlen != sizeof(struct sctp_setadaptation)) 3136 return -EINVAL; 3137 if (copy_from_user(&adaptation, optval, optlen)) 3138 return -EFAULT; 3139 3140 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3141 3142 return 0; 3143 } 3144 3145 /* 3146 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3147 * 3148 * The context field in the sctp_sndrcvinfo structure is normally only 3149 * used when a failed message is retrieved holding the value that was 3150 * sent down on the actual send call. This option allows the setting of 3151 * a default context on an association basis that will be received on 3152 * reading messages from the peer. This is especially helpful in the 3153 * one-2-many model for an application to keep some reference to an 3154 * internal state machine that is processing messages on the 3155 * association. Note that the setting of this value only effects 3156 * received messages from the peer and does not effect the value that is 3157 * saved with outbound messages. 3158 */ 3159 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3160 unsigned int optlen) 3161 { 3162 struct sctp_assoc_value params; 3163 struct sctp_sock *sp; 3164 struct sctp_association *asoc; 3165 3166 if (optlen != sizeof(struct sctp_assoc_value)) 3167 return -EINVAL; 3168 if (copy_from_user(¶ms, optval, optlen)) 3169 return -EFAULT; 3170 3171 sp = sctp_sk(sk); 3172 3173 if (params.assoc_id != 0) { 3174 asoc = sctp_id2assoc(sk, params.assoc_id); 3175 if (!asoc) 3176 return -EINVAL; 3177 asoc->default_rcv_context = params.assoc_value; 3178 } else { 3179 sp->default_rcv_context = params.assoc_value; 3180 } 3181 3182 return 0; 3183 } 3184 3185 /* 3186 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3187 * 3188 * This options will at a minimum specify if the implementation is doing 3189 * fragmented interleave. Fragmented interleave, for a one to many 3190 * socket, is when subsequent calls to receive a message may return 3191 * parts of messages from different associations. Some implementations 3192 * may allow you to turn this value on or off. If so, when turned off, 3193 * no fragment interleave will occur (which will cause a head of line 3194 * blocking amongst multiple associations sharing the same one to many 3195 * socket). When this option is turned on, then each receive call may 3196 * come from a different association (thus the user must receive data 3197 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3198 * association each receive belongs to. 3199 * 3200 * This option takes a boolean value. A non-zero value indicates that 3201 * fragmented interleave is on. A value of zero indicates that 3202 * fragmented interleave is off. 3203 * 3204 * Note that it is important that an implementation that allows this 3205 * option to be turned on, have it off by default. Otherwise an unaware 3206 * application using the one to many model may become confused and act 3207 * incorrectly. 3208 */ 3209 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3210 char __user *optval, 3211 unsigned int optlen) 3212 { 3213 int val; 3214 3215 if (optlen != sizeof(int)) 3216 return -EINVAL; 3217 if (get_user(val, (int __user *)optval)) 3218 return -EFAULT; 3219 3220 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3221 3222 return 0; 3223 } 3224 3225 /* 3226 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3227 * (SCTP_PARTIAL_DELIVERY_POINT) 3228 * 3229 * This option will set or get the SCTP partial delivery point. This 3230 * point is the size of a message where the partial delivery API will be 3231 * invoked to help free up rwnd space for the peer. Setting this to a 3232 * lower value will cause partial deliveries to happen more often. The 3233 * calls argument is an integer that sets or gets the partial delivery 3234 * point. Note also that the call will fail if the user attempts to set 3235 * this value larger than the socket receive buffer size. 3236 * 3237 * Note that any single message having a length smaller than or equal to 3238 * the SCTP partial delivery point will be delivered in one single read 3239 * call as long as the user provided buffer is large enough to hold the 3240 * message. 3241 */ 3242 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3243 char __user *optval, 3244 unsigned int optlen) 3245 { 3246 u32 val; 3247 3248 if (optlen != sizeof(u32)) 3249 return -EINVAL; 3250 if (get_user(val, (int __user *)optval)) 3251 return -EFAULT; 3252 3253 /* Note: We double the receive buffer from what the user sets 3254 * it to be, also initial rwnd is based on rcvbuf/2. 3255 */ 3256 if (val > (sk->sk_rcvbuf >> 1)) 3257 return -EINVAL; 3258 3259 sctp_sk(sk)->pd_point = val; 3260 3261 return 0; /* is this the right error code? */ 3262 } 3263 3264 /* 3265 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3266 * 3267 * This option will allow a user to change the maximum burst of packets 3268 * that can be emitted by this association. Note that the default value 3269 * is 4, and some implementations may restrict this setting so that it 3270 * can only be lowered. 3271 * 3272 * NOTE: This text doesn't seem right. Do this on a socket basis with 3273 * future associations inheriting the socket value. 3274 */ 3275 static int sctp_setsockopt_maxburst(struct sock *sk, 3276 char __user *optval, 3277 unsigned int optlen) 3278 { 3279 struct sctp_assoc_value params; 3280 struct sctp_sock *sp; 3281 struct sctp_association *asoc; 3282 int val; 3283 int assoc_id = 0; 3284 3285 if (optlen == sizeof(int)) { 3286 pr_warn_ratelimited(DEPRECATED 3287 "%s (pid %d) " 3288 "Use of int in max_burst socket option deprecated.\n" 3289 "Use struct sctp_assoc_value instead\n", 3290 current->comm, task_pid_nr(current)); 3291 if (copy_from_user(&val, optval, optlen)) 3292 return -EFAULT; 3293 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3294 if (copy_from_user(¶ms, optval, optlen)) 3295 return -EFAULT; 3296 val = params.assoc_value; 3297 assoc_id = params.assoc_id; 3298 } else 3299 return -EINVAL; 3300 3301 sp = sctp_sk(sk); 3302 3303 if (assoc_id != 0) { 3304 asoc = sctp_id2assoc(sk, assoc_id); 3305 if (!asoc) 3306 return -EINVAL; 3307 asoc->max_burst = val; 3308 } else 3309 sp->max_burst = val; 3310 3311 return 0; 3312 } 3313 3314 /* 3315 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3316 * 3317 * This set option adds a chunk type that the user is requesting to be 3318 * received only in an authenticated way. Changes to the list of chunks 3319 * will only effect future associations on the socket. 3320 */ 3321 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3322 char __user *optval, 3323 unsigned int optlen) 3324 { 3325 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3326 struct sctp_authchunk val; 3327 3328 if (!ep->auth_enable) 3329 return -EACCES; 3330 3331 if (optlen != sizeof(struct sctp_authchunk)) 3332 return -EINVAL; 3333 if (copy_from_user(&val, optval, optlen)) 3334 return -EFAULT; 3335 3336 switch (val.sauth_chunk) { 3337 case SCTP_CID_INIT: 3338 case SCTP_CID_INIT_ACK: 3339 case SCTP_CID_SHUTDOWN_COMPLETE: 3340 case SCTP_CID_AUTH: 3341 return -EINVAL; 3342 } 3343 3344 /* add this chunk id to the endpoint */ 3345 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); 3346 } 3347 3348 /* 3349 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3350 * 3351 * This option gets or sets the list of HMAC algorithms that the local 3352 * endpoint requires the peer to use. 3353 */ 3354 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3355 char __user *optval, 3356 unsigned int optlen) 3357 { 3358 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3359 struct sctp_hmacalgo *hmacs; 3360 u32 idents; 3361 int err; 3362 3363 if (!ep->auth_enable) 3364 return -EACCES; 3365 3366 if (optlen < sizeof(struct sctp_hmacalgo)) 3367 return -EINVAL; 3368 3369 hmacs = memdup_user(optval, optlen); 3370 if (IS_ERR(hmacs)) 3371 return PTR_ERR(hmacs); 3372 3373 idents = hmacs->shmac_num_idents; 3374 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3375 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3376 err = -EINVAL; 3377 goto out; 3378 } 3379 3380 err = sctp_auth_ep_set_hmacs(ep, hmacs); 3381 out: 3382 kfree(hmacs); 3383 return err; 3384 } 3385 3386 /* 3387 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3388 * 3389 * This option will set a shared secret key which is used to build an 3390 * association shared key. 3391 */ 3392 static int sctp_setsockopt_auth_key(struct sock *sk, 3393 char __user *optval, 3394 unsigned int optlen) 3395 { 3396 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3397 struct sctp_authkey *authkey; 3398 struct sctp_association *asoc; 3399 int ret; 3400 3401 if (!ep->auth_enable) 3402 return -EACCES; 3403 3404 if (optlen <= sizeof(struct sctp_authkey)) 3405 return -EINVAL; 3406 3407 authkey = memdup_user(optval, optlen); 3408 if (IS_ERR(authkey)) 3409 return PTR_ERR(authkey); 3410 3411 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3412 ret = -EINVAL; 3413 goto out; 3414 } 3415 3416 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3417 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3418 ret = -EINVAL; 3419 goto out; 3420 } 3421 3422 ret = sctp_auth_set_key(ep, asoc, authkey); 3423 out: 3424 kzfree(authkey); 3425 return ret; 3426 } 3427 3428 /* 3429 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3430 * 3431 * This option will get or set the active shared key to be used to build 3432 * the association shared key. 3433 */ 3434 static int sctp_setsockopt_active_key(struct sock *sk, 3435 char __user *optval, 3436 unsigned int optlen) 3437 { 3438 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3439 struct sctp_authkeyid val; 3440 struct sctp_association *asoc; 3441 3442 if (!ep->auth_enable) 3443 return -EACCES; 3444 3445 if (optlen != sizeof(struct sctp_authkeyid)) 3446 return -EINVAL; 3447 if (copy_from_user(&val, optval, optlen)) 3448 return -EFAULT; 3449 3450 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3451 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3452 return -EINVAL; 3453 3454 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3455 } 3456 3457 /* 3458 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3459 * 3460 * This set option will delete a shared secret key from use. 3461 */ 3462 static int sctp_setsockopt_del_key(struct sock *sk, 3463 char __user *optval, 3464 unsigned int optlen) 3465 { 3466 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3467 struct sctp_authkeyid val; 3468 struct sctp_association *asoc; 3469 3470 if (!ep->auth_enable) 3471 return -EACCES; 3472 3473 if (optlen != sizeof(struct sctp_authkeyid)) 3474 return -EINVAL; 3475 if (copy_from_user(&val, optval, optlen)) 3476 return -EFAULT; 3477 3478 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3479 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3480 return -EINVAL; 3481 3482 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3483 3484 } 3485 3486 /* 3487 * 8.1.23 SCTP_AUTO_ASCONF 3488 * 3489 * This option will enable or disable the use of the automatic generation of 3490 * ASCONF chunks to add and delete addresses to an existing association. Note 3491 * that this option has two caveats namely: a) it only affects sockets that 3492 * are bound to all addresses available to the SCTP stack, and b) the system 3493 * administrator may have an overriding control that turns the ASCONF feature 3494 * off no matter what setting the socket option may have. 3495 * This option expects an integer boolean flag, where a non-zero value turns on 3496 * the option, and a zero value turns off the option. 3497 * Note. In this implementation, socket operation overrides default parameter 3498 * being set by sysctl as well as FreeBSD implementation 3499 */ 3500 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3501 unsigned int optlen) 3502 { 3503 int val; 3504 struct sctp_sock *sp = sctp_sk(sk); 3505 3506 if (optlen < sizeof(int)) 3507 return -EINVAL; 3508 if (get_user(val, (int __user *)optval)) 3509 return -EFAULT; 3510 if (!sctp_is_ep_boundall(sk) && val) 3511 return -EINVAL; 3512 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3513 return 0; 3514 3515 if (val == 0 && sp->do_auto_asconf) { 3516 list_del(&sp->auto_asconf_list); 3517 sp->do_auto_asconf = 0; 3518 } else if (val && !sp->do_auto_asconf) { 3519 list_add_tail(&sp->auto_asconf_list, 3520 &sock_net(sk)->sctp.auto_asconf_splist); 3521 sp->do_auto_asconf = 1; 3522 } 3523 return 0; 3524 } 3525 3526 3527 /* 3528 * SCTP_PEER_ADDR_THLDS 3529 * 3530 * This option allows us to alter the partially failed threshold for one or all 3531 * transports in an association. See Section 6.1 of: 3532 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3533 */ 3534 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3535 char __user *optval, 3536 unsigned int optlen) 3537 { 3538 struct sctp_paddrthlds val; 3539 struct sctp_transport *trans; 3540 struct sctp_association *asoc; 3541 3542 if (optlen < sizeof(struct sctp_paddrthlds)) 3543 return -EINVAL; 3544 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3545 sizeof(struct sctp_paddrthlds))) 3546 return -EFAULT; 3547 3548 3549 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3550 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3551 if (!asoc) 3552 return -ENOENT; 3553 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3554 transports) { 3555 if (val.spt_pathmaxrxt) 3556 trans->pathmaxrxt = val.spt_pathmaxrxt; 3557 trans->pf_retrans = val.spt_pathpfthld; 3558 } 3559 3560 if (val.spt_pathmaxrxt) 3561 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3562 asoc->pf_retrans = val.spt_pathpfthld; 3563 } else { 3564 trans = sctp_addr_id2transport(sk, &val.spt_address, 3565 val.spt_assoc_id); 3566 if (!trans) 3567 return -ENOENT; 3568 3569 if (val.spt_pathmaxrxt) 3570 trans->pathmaxrxt = val.spt_pathmaxrxt; 3571 trans->pf_retrans = val.spt_pathpfthld; 3572 } 3573 3574 return 0; 3575 } 3576 3577 /* API 6.2 setsockopt(), getsockopt() 3578 * 3579 * Applications use setsockopt() and getsockopt() to set or retrieve 3580 * socket options. Socket options are used to change the default 3581 * behavior of sockets calls. They are described in Section 7. 3582 * 3583 * The syntax is: 3584 * 3585 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3586 * int __user *optlen); 3587 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3588 * int optlen); 3589 * 3590 * sd - the socket descript. 3591 * level - set to IPPROTO_SCTP for all SCTP options. 3592 * optname - the option name. 3593 * optval - the buffer to store the value of the option. 3594 * optlen - the size of the buffer. 3595 */ 3596 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3597 char __user *optval, unsigned int optlen) 3598 { 3599 int retval = 0; 3600 3601 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3602 3603 /* I can hardly begin to describe how wrong this is. This is 3604 * so broken as to be worse than useless. The API draft 3605 * REALLY is NOT helpful here... I am not convinced that the 3606 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3607 * are at all well-founded. 3608 */ 3609 if (level != SOL_SCTP) { 3610 struct sctp_af *af = sctp_sk(sk)->pf->af; 3611 retval = af->setsockopt(sk, level, optname, optval, optlen); 3612 goto out_nounlock; 3613 } 3614 3615 lock_sock(sk); 3616 3617 switch (optname) { 3618 case SCTP_SOCKOPT_BINDX_ADD: 3619 /* 'optlen' is the size of the addresses buffer. */ 3620 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3621 optlen, SCTP_BINDX_ADD_ADDR); 3622 break; 3623 3624 case SCTP_SOCKOPT_BINDX_REM: 3625 /* 'optlen' is the size of the addresses buffer. */ 3626 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3627 optlen, SCTP_BINDX_REM_ADDR); 3628 break; 3629 3630 case SCTP_SOCKOPT_CONNECTX_OLD: 3631 /* 'optlen' is the size of the addresses buffer. */ 3632 retval = sctp_setsockopt_connectx_old(sk, 3633 (struct sockaddr __user *)optval, 3634 optlen); 3635 break; 3636 3637 case SCTP_SOCKOPT_CONNECTX: 3638 /* 'optlen' is the size of the addresses buffer. */ 3639 retval = sctp_setsockopt_connectx(sk, 3640 (struct sockaddr __user *)optval, 3641 optlen); 3642 break; 3643 3644 case SCTP_DISABLE_FRAGMENTS: 3645 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3646 break; 3647 3648 case SCTP_EVENTS: 3649 retval = sctp_setsockopt_events(sk, optval, optlen); 3650 break; 3651 3652 case SCTP_AUTOCLOSE: 3653 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3654 break; 3655 3656 case SCTP_PEER_ADDR_PARAMS: 3657 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3658 break; 3659 3660 case SCTP_DELAYED_SACK: 3661 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3662 break; 3663 case SCTP_PARTIAL_DELIVERY_POINT: 3664 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3665 break; 3666 3667 case SCTP_INITMSG: 3668 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3669 break; 3670 case SCTP_DEFAULT_SEND_PARAM: 3671 retval = sctp_setsockopt_default_send_param(sk, optval, 3672 optlen); 3673 break; 3674 case SCTP_PRIMARY_ADDR: 3675 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3676 break; 3677 case SCTP_SET_PEER_PRIMARY_ADDR: 3678 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3679 break; 3680 case SCTP_NODELAY: 3681 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3682 break; 3683 case SCTP_RTOINFO: 3684 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3685 break; 3686 case SCTP_ASSOCINFO: 3687 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3688 break; 3689 case SCTP_I_WANT_MAPPED_V4_ADDR: 3690 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3691 break; 3692 case SCTP_MAXSEG: 3693 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3694 break; 3695 case SCTP_ADAPTATION_LAYER: 3696 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3697 break; 3698 case SCTP_CONTEXT: 3699 retval = sctp_setsockopt_context(sk, optval, optlen); 3700 break; 3701 case SCTP_FRAGMENT_INTERLEAVE: 3702 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3703 break; 3704 case SCTP_MAX_BURST: 3705 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3706 break; 3707 case SCTP_AUTH_CHUNK: 3708 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3709 break; 3710 case SCTP_HMAC_IDENT: 3711 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3712 break; 3713 case SCTP_AUTH_KEY: 3714 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3715 break; 3716 case SCTP_AUTH_ACTIVE_KEY: 3717 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3718 break; 3719 case SCTP_AUTH_DELETE_KEY: 3720 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3721 break; 3722 case SCTP_AUTO_ASCONF: 3723 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3724 break; 3725 case SCTP_PEER_ADDR_THLDS: 3726 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3727 break; 3728 default: 3729 retval = -ENOPROTOOPT; 3730 break; 3731 } 3732 3733 release_sock(sk); 3734 3735 out_nounlock: 3736 return retval; 3737 } 3738 3739 /* API 3.1.6 connect() - UDP Style Syntax 3740 * 3741 * An application may use the connect() call in the UDP model to initiate an 3742 * association without sending data. 3743 * 3744 * The syntax is: 3745 * 3746 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3747 * 3748 * sd: the socket descriptor to have a new association added to. 3749 * 3750 * nam: the address structure (either struct sockaddr_in or struct 3751 * sockaddr_in6 defined in RFC2553 [7]). 3752 * 3753 * len: the size of the address. 3754 */ 3755 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3756 int addr_len) 3757 { 3758 int err = 0; 3759 struct sctp_af *af; 3760 3761 lock_sock(sk); 3762 3763 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3764 addr, addr_len); 3765 3766 /* Validate addr_len before calling common connect/connectx routine. */ 3767 af = sctp_get_af_specific(addr->sa_family); 3768 if (!af || addr_len < af->sockaddr_len) { 3769 err = -EINVAL; 3770 } else { 3771 /* Pass correct addr len to common routine (so it knows there 3772 * is only one address being passed. 3773 */ 3774 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3775 } 3776 3777 release_sock(sk); 3778 return err; 3779 } 3780 3781 /* FIXME: Write comments. */ 3782 static int sctp_disconnect(struct sock *sk, int flags) 3783 { 3784 return -EOPNOTSUPP; /* STUB */ 3785 } 3786 3787 /* 4.1.4 accept() - TCP Style Syntax 3788 * 3789 * Applications use accept() call to remove an established SCTP 3790 * association from the accept queue of the endpoint. A new socket 3791 * descriptor will be returned from accept() to represent the newly 3792 * formed association. 3793 */ 3794 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3795 { 3796 struct sctp_sock *sp; 3797 struct sctp_endpoint *ep; 3798 struct sock *newsk = NULL; 3799 struct sctp_association *asoc; 3800 long timeo; 3801 int error = 0; 3802 3803 lock_sock(sk); 3804 3805 sp = sctp_sk(sk); 3806 ep = sp->ep; 3807 3808 if (!sctp_style(sk, TCP)) { 3809 error = -EOPNOTSUPP; 3810 goto out; 3811 } 3812 3813 if (!sctp_sstate(sk, LISTENING)) { 3814 error = -EINVAL; 3815 goto out; 3816 } 3817 3818 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 3819 3820 error = sctp_wait_for_accept(sk, timeo); 3821 if (error) 3822 goto out; 3823 3824 /* We treat the list of associations on the endpoint as the accept 3825 * queue and pick the first association on the list. 3826 */ 3827 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 3828 3829 newsk = sp->pf->create_accept_sk(sk, asoc); 3830 if (!newsk) { 3831 error = -ENOMEM; 3832 goto out; 3833 } 3834 3835 /* Populate the fields of the newsk from the oldsk and migrate the 3836 * asoc to the newsk. 3837 */ 3838 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 3839 3840 out: 3841 release_sock(sk); 3842 *err = error; 3843 return newsk; 3844 } 3845 3846 /* The SCTP ioctl handler. */ 3847 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3848 { 3849 int rc = -ENOTCONN; 3850 3851 lock_sock(sk); 3852 3853 /* 3854 * SEQPACKET-style sockets in LISTENING state are valid, for 3855 * SCTP, so only discard TCP-style sockets in LISTENING state. 3856 */ 3857 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 3858 goto out; 3859 3860 switch (cmd) { 3861 case SIOCINQ: { 3862 struct sk_buff *skb; 3863 unsigned int amount = 0; 3864 3865 skb = skb_peek(&sk->sk_receive_queue); 3866 if (skb != NULL) { 3867 /* 3868 * We will only return the amount of this packet since 3869 * that is all that will be read. 3870 */ 3871 amount = skb->len; 3872 } 3873 rc = put_user(amount, (int __user *)arg); 3874 break; 3875 } 3876 default: 3877 rc = -ENOIOCTLCMD; 3878 break; 3879 } 3880 out: 3881 release_sock(sk); 3882 return rc; 3883 } 3884 3885 /* This is the function which gets called during socket creation to 3886 * initialized the SCTP-specific portion of the sock. 3887 * The sock structure should already be zero-filled memory. 3888 */ 3889 static int sctp_init_sock(struct sock *sk) 3890 { 3891 struct net *net = sock_net(sk); 3892 struct sctp_sock *sp; 3893 3894 pr_debug("%s: sk:%p\n", __func__, sk); 3895 3896 sp = sctp_sk(sk); 3897 3898 /* Initialize the SCTP per socket area. */ 3899 switch (sk->sk_type) { 3900 case SOCK_SEQPACKET: 3901 sp->type = SCTP_SOCKET_UDP; 3902 break; 3903 case SOCK_STREAM: 3904 sp->type = SCTP_SOCKET_TCP; 3905 break; 3906 default: 3907 return -ESOCKTNOSUPPORT; 3908 } 3909 3910 /* Initialize default send parameters. These parameters can be 3911 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 3912 */ 3913 sp->default_stream = 0; 3914 sp->default_ppid = 0; 3915 sp->default_flags = 0; 3916 sp->default_context = 0; 3917 sp->default_timetolive = 0; 3918 3919 sp->default_rcv_context = 0; 3920 sp->max_burst = net->sctp.max_burst; 3921 3922 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 3923 3924 /* Initialize default setup parameters. These parameters 3925 * can be modified with the SCTP_INITMSG socket option or 3926 * overridden by the SCTP_INIT CMSG. 3927 */ 3928 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 3929 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 3930 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 3931 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 3932 3933 /* Initialize default RTO related parameters. These parameters can 3934 * be modified for with the SCTP_RTOINFO socket option. 3935 */ 3936 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 3937 sp->rtoinfo.srto_max = net->sctp.rto_max; 3938 sp->rtoinfo.srto_min = net->sctp.rto_min; 3939 3940 /* Initialize default association related parameters. These parameters 3941 * can be modified with the SCTP_ASSOCINFO socket option. 3942 */ 3943 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 3944 sp->assocparams.sasoc_number_peer_destinations = 0; 3945 sp->assocparams.sasoc_peer_rwnd = 0; 3946 sp->assocparams.sasoc_local_rwnd = 0; 3947 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 3948 3949 /* Initialize default event subscriptions. By default, all the 3950 * options are off. 3951 */ 3952 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 3953 3954 /* Default Peer Address Parameters. These defaults can 3955 * be modified via SCTP_PEER_ADDR_PARAMS 3956 */ 3957 sp->hbinterval = net->sctp.hb_interval; 3958 sp->pathmaxrxt = net->sctp.max_retrans_path; 3959 sp->pathmtu = 0; /* allow default discovery */ 3960 sp->sackdelay = net->sctp.sack_timeout; 3961 sp->sackfreq = 2; 3962 sp->param_flags = SPP_HB_ENABLE | 3963 SPP_PMTUD_ENABLE | 3964 SPP_SACKDELAY_ENABLE; 3965 3966 /* If enabled no SCTP message fragmentation will be performed. 3967 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 3968 */ 3969 sp->disable_fragments = 0; 3970 3971 /* Enable Nagle algorithm by default. */ 3972 sp->nodelay = 0; 3973 3974 /* Enable by default. */ 3975 sp->v4mapped = 1; 3976 3977 /* Auto-close idle associations after the configured 3978 * number of seconds. A value of 0 disables this 3979 * feature. Configure through the SCTP_AUTOCLOSE socket option, 3980 * for UDP-style sockets only. 3981 */ 3982 sp->autoclose = 0; 3983 3984 /* User specified fragmentation limit. */ 3985 sp->user_frag = 0; 3986 3987 sp->adaptation_ind = 0; 3988 3989 sp->pf = sctp_get_pf_specific(sk->sk_family); 3990 3991 /* Control variables for partial data delivery. */ 3992 atomic_set(&sp->pd_mode, 0); 3993 skb_queue_head_init(&sp->pd_lobby); 3994 sp->frag_interleave = 0; 3995 3996 /* Create a per socket endpoint structure. Even if we 3997 * change the data structure relationships, this may still 3998 * be useful for storing pre-connect address information. 3999 */ 4000 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 4001 if (!sp->ep) 4002 return -ENOMEM; 4003 4004 sp->hmac = NULL; 4005 4006 sk->sk_destruct = sctp_destruct_sock; 4007 4008 SCTP_DBG_OBJCNT_INC(sock); 4009 4010 local_bh_disable(); 4011 percpu_counter_inc(&sctp_sockets_allocated); 4012 sock_prot_inuse_add(net, sk->sk_prot, 1); 4013 if (net->sctp.default_auto_asconf) { 4014 list_add_tail(&sp->auto_asconf_list, 4015 &net->sctp.auto_asconf_splist); 4016 sp->do_auto_asconf = 1; 4017 } else 4018 sp->do_auto_asconf = 0; 4019 local_bh_enable(); 4020 4021 return 0; 4022 } 4023 4024 /* Cleanup any SCTP per socket resources. */ 4025 static void sctp_destroy_sock(struct sock *sk) 4026 { 4027 struct sctp_sock *sp; 4028 4029 pr_debug("%s: sk:%p\n", __func__, sk); 4030 4031 /* Release our hold on the endpoint. */ 4032 sp = sctp_sk(sk); 4033 /* This could happen during socket init, thus we bail out 4034 * early, since the rest of the below is not setup either. 4035 */ 4036 if (sp->ep == NULL) 4037 return; 4038 4039 if (sp->do_auto_asconf) { 4040 sp->do_auto_asconf = 0; 4041 list_del(&sp->auto_asconf_list); 4042 } 4043 sctp_endpoint_free(sp->ep); 4044 local_bh_disable(); 4045 percpu_counter_dec(&sctp_sockets_allocated); 4046 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4047 local_bh_enable(); 4048 } 4049 4050 /* Triggered when there are no references on the socket anymore */ 4051 static void sctp_destruct_sock(struct sock *sk) 4052 { 4053 struct sctp_sock *sp = sctp_sk(sk); 4054 4055 /* Free up the HMAC transform. */ 4056 crypto_free_hash(sp->hmac); 4057 4058 inet_sock_destruct(sk); 4059 } 4060 4061 /* API 4.1.7 shutdown() - TCP Style Syntax 4062 * int shutdown(int socket, int how); 4063 * 4064 * sd - the socket descriptor of the association to be closed. 4065 * how - Specifies the type of shutdown. The values are 4066 * as follows: 4067 * SHUT_RD 4068 * Disables further receive operations. No SCTP 4069 * protocol action is taken. 4070 * SHUT_WR 4071 * Disables further send operations, and initiates 4072 * the SCTP shutdown sequence. 4073 * SHUT_RDWR 4074 * Disables further send and receive operations 4075 * and initiates the SCTP shutdown sequence. 4076 */ 4077 static void sctp_shutdown(struct sock *sk, int how) 4078 { 4079 struct net *net = sock_net(sk); 4080 struct sctp_endpoint *ep; 4081 struct sctp_association *asoc; 4082 4083 if (!sctp_style(sk, TCP)) 4084 return; 4085 4086 if (how & SEND_SHUTDOWN) { 4087 ep = sctp_sk(sk)->ep; 4088 if (!list_empty(&ep->asocs)) { 4089 asoc = list_entry(ep->asocs.next, 4090 struct sctp_association, asocs); 4091 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4092 } 4093 } 4094 } 4095 4096 /* 7.2.1 Association Status (SCTP_STATUS) 4097 4098 * Applications can retrieve current status information about an 4099 * association, including association state, peer receiver window size, 4100 * number of unacked data chunks, and number of data chunks pending 4101 * receipt. This information is read-only. 4102 */ 4103 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4104 char __user *optval, 4105 int __user *optlen) 4106 { 4107 struct sctp_status status; 4108 struct sctp_association *asoc = NULL; 4109 struct sctp_transport *transport; 4110 sctp_assoc_t associd; 4111 int retval = 0; 4112 4113 if (len < sizeof(status)) { 4114 retval = -EINVAL; 4115 goto out; 4116 } 4117 4118 len = sizeof(status); 4119 if (copy_from_user(&status, optval, len)) { 4120 retval = -EFAULT; 4121 goto out; 4122 } 4123 4124 associd = status.sstat_assoc_id; 4125 asoc = sctp_id2assoc(sk, associd); 4126 if (!asoc) { 4127 retval = -EINVAL; 4128 goto out; 4129 } 4130 4131 transport = asoc->peer.primary_path; 4132 4133 status.sstat_assoc_id = sctp_assoc2id(asoc); 4134 status.sstat_state = asoc->state; 4135 status.sstat_rwnd = asoc->peer.rwnd; 4136 status.sstat_unackdata = asoc->unack_data; 4137 4138 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4139 status.sstat_instrms = asoc->c.sinit_max_instreams; 4140 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4141 status.sstat_fragmentation_point = asoc->frag_point; 4142 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4143 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4144 transport->af_specific->sockaddr_len); 4145 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4146 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4147 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4148 status.sstat_primary.spinfo_state = transport->state; 4149 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4150 status.sstat_primary.spinfo_srtt = transport->srtt; 4151 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4152 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4153 4154 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4155 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4156 4157 if (put_user(len, optlen)) { 4158 retval = -EFAULT; 4159 goto out; 4160 } 4161 4162 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4163 __func__, len, status.sstat_state, status.sstat_rwnd, 4164 status.sstat_assoc_id); 4165 4166 if (copy_to_user(optval, &status, len)) { 4167 retval = -EFAULT; 4168 goto out; 4169 } 4170 4171 out: 4172 return retval; 4173 } 4174 4175 4176 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4177 * 4178 * Applications can retrieve information about a specific peer address 4179 * of an association, including its reachability state, congestion 4180 * window, and retransmission timer values. This information is 4181 * read-only. 4182 */ 4183 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4184 char __user *optval, 4185 int __user *optlen) 4186 { 4187 struct sctp_paddrinfo pinfo; 4188 struct sctp_transport *transport; 4189 int retval = 0; 4190 4191 if (len < sizeof(pinfo)) { 4192 retval = -EINVAL; 4193 goto out; 4194 } 4195 4196 len = sizeof(pinfo); 4197 if (copy_from_user(&pinfo, optval, len)) { 4198 retval = -EFAULT; 4199 goto out; 4200 } 4201 4202 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4203 pinfo.spinfo_assoc_id); 4204 if (!transport) 4205 return -EINVAL; 4206 4207 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4208 pinfo.spinfo_state = transport->state; 4209 pinfo.spinfo_cwnd = transport->cwnd; 4210 pinfo.spinfo_srtt = transport->srtt; 4211 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4212 pinfo.spinfo_mtu = transport->pathmtu; 4213 4214 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4215 pinfo.spinfo_state = SCTP_ACTIVE; 4216 4217 if (put_user(len, optlen)) { 4218 retval = -EFAULT; 4219 goto out; 4220 } 4221 4222 if (copy_to_user(optval, &pinfo, len)) { 4223 retval = -EFAULT; 4224 goto out; 4225 } 4226 4227 out: 4228 return retval; 4229 } 4230 4231 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4232 * 4233 * This option is a on/off flag. If enabled no SCTP message 4234 * fragmentation will be performed. Instead if a message being sent 4235 * exceeds the current PMTU size, the message will NOT be sent and 4236 * instead a error will be indicated to the user. 4237 */ 4238 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4239 char __user *optval, int __user *optlen) 4240 { 4241 int val; 4242 4243 if (len < sizeof(int)) 4244 return -EINVAL; 4245 4246 len = sizeof(int); 4247 val = (sctp_sk(sk)->disable_fragments == 1); 4248 if (put_user(len, optlen)) 4249 return -EFAULT; 4250 if (copy_to_user(optval, &val, len)) 4251 return -EFAULT; 4252 return 0; 4253 } 4254 4255 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4256 * 4257 * This socket option is used to specify various notifications and 4258 * ancillary data the user wishes to receive. 4259 */ 4260 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4261 int __user *optlen) 4262 { 4263 if (len <= 0) 4264 return -EINVAL; 4265 if (len > sizeof(struct sctp_event_subscribe)) 4266 len = sizeof(struct sctp_event_subscribe); 4267 if (put_user(len, optlen)) 4268 return -EFAULT; 4269 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4270 return -EFAULT; 4271 return 0; 4272 } 4273 4274 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4275 * 4276 * This socket option is applicable to the UDP-style socket only. When 4277 * set it will cause associations that are idle for more than the 4278 * specified number of seconds to automatically close. An association 4279 * being idle is defined an association that has NOT sent or received 4280 * user data. The special value of '0' indicates that no automatic 4281 * close of any associations should be performed. The option expects an 4282 * integer defining the number of seconds of idle time before an 4283 * association is closed. 4284 */ 4285 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4286 { 4287 /* Applicable to UDP-style socket only */ 4288 if (sctp_style(sk, TCP)) 4289 return -EOPNOTSUPP; 4290 if (len < sizeof(int)) 4291 return -EINVAL; 4292 len = sizeof(int); 4293 if (put_user(len, optlen)) 4294 return -EFAULT; 4295 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4296 return -EFAULT; 4297 return 0; 4298 } 4299 4300 /* Helper routine to branch off an association to a new socket. */ 4301 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4302 { 4303 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4304 struct socket *sock; 4305 struct sctp_af *af; 4306 int err = 0; 4307 4308 if (!asoc) 4309 return -EINVAL; 4310 4311 /* An association cannot be branched off from an already peeled-off 4312 * socket, nor is this supported for tcp style sockets. 4313 */ 4314 if (!sctp_style(sk, UDP)) 4315 return -EINVAL; 4316 4317 /* Create a new socket. */ 4318 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4319 if (err < 0) 4320 return err; 4321 4322 sctp_copy_sock(sock->sk, sk, asoc); 4323 4324 /* Make peeled-off sockets more like 1-1 accepted sockets. 4325 * Set the daddr and initialize id to something more random 4326 */ 4327 af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family); 4328 af->to_sk_daddr(&asoc->peer.primary_addr, sk); 4329 4330 /* Populate the fields of the newsk from the oldsk and migrate the 4331 * asoc to the newsk. 4332 */ 4333 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4334 4335 *sockp = sock; 4336 4337 return err; 4338 } 4339 EXPORT_SYMBOL(sctp_do_peeloff); 4340 4341 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4342 { 4343 sctp_peeloff_arg_t peeloff; 4344 struct socket *newsock; 4345 struct file *newfile; 4346 int retval = 0; 4347 4348 if (len < sizeof(sctp_peeloff_arg_t)) 4349 return -EINVAL; 4350 len = sizeof(sctp_peeloff_arg_t); 4351 if (copy_from_user(&peeloff, optval, len)) 4352 return -EFAULT; 4353 4354 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4355 if (retval < 0) 4356 goto out; 4357 4358 /* Map the socket to an unused fd that can be returned to the user. */ 4359 retval = get_unused_fd_flags(0); 4360 if (retval < 0) { 4361 sock_release(newsock); 4362 goto out; 4363 } 4364 4365 newfile = sock_alloc_file(newsock, 0, NULL); 4366 if (unlikely(IS_ERR(newfile))) { 4367 put_unused_fd(retval); 4368 sock_release(newsock); 4369 return PTR_ERR(newfile); 4370 } 4371 4372 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4373 retval); 4374 4375 /* Return the fd mapped to the new socket. */ 4376 if (put_user(len, optlen)) { 4377 fput(newfile); 4378 put_unused_fd(retval); 4379 return -EFAULT; 4380 } 4381 peeloff.sd = retval; 4382 if (copy_to_user(optval, &peeloff, len)) { 4383 fput(newfile); 4384 put_unused_fd(retval); 4385 return -EFAULT; 4386 } 4387 fd_install(retval, newfile); 4388 out: 4389 return retval; 4390 } 4391 4392 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4393 * 4394 * Applications can enable or disable heartbeats for any peer address of 4395 * an association, modify an address's heartbeat interval, force a 4396 * heartbeat to be sent immediately, and adjust the address's maximum 4397 * number of retransmissions sent before an address is considered 4398 * unreachable. The following structure is used to access and modify an 4399 * address's parameters: 4400 * 4401 * struct sctp_paddrparams { 4402 * sctp_assoc_t spp_assoc_id; 4403 * struct sockaddr_storage spp_address; 4404 * uint32_t spp_hbinterval; 4405 * uint16_t spp_pathmaxrxt; 4406 * uint32_t spp_pathmtu; 4407 * uint32_t spp_sackdelay; 4408 * uint32_t spp_flags; 4409 * }; 4410 * 4411 * spp_assoc_id - (one-to-many style socket) This is filled in the 4412 * application, and identifies the association for 4413 * this query. 4414 * spp_address - This specifies which address is of interest. 4415 * spp_hbinterval - This contains the value of the heartbeat interval, 4416 * in milliseconds. If a value of zero 4417 * is present in this field then no changes are to 4418 * be made to this parameter. 4419 * spp_pathmaxrxt - This contains the maximum number of 4420 * retransmissions before this address shall be 4421 * considered unreachable. If a value of zero 4422 * is present in this field then no changes are to 4423 * be made to this parameter. 4424 * spp_pathmtu - When Path MTU discovery is disabled the value 4425 * specified here will be the "fixed" path mtu. 4426 * Note that if the spp_address field is empty 4427 * then all associations on this address will 4428 * have this fixed path mtu set upon them. 4429 * 4430 * spp_sackdelay - When delayed sack is enabled, this value specifies 4431 * the number of milliseconds that sacks will be delayed 4432 * for. This value will apply to all addresses of an 4433 * association if the spp_address field is empty. Note 4434 * also, that if delayed sack is enabled and this 4435 * value is set to 0, no change is made to the last 4436 * recorded delayed sack timer value. 4437 * 4438 * spp_flags - These flags are used to control various features 4439 * on an association. The flag field may contain 4440 * zero or more of the following options. 4441 * 4442 * SPP_HB_ENABLE - Enable heartbeats on the 4443 * specified address. Note that if the address 4444 * field is empty all addresses for the association 4445 * have heartbeats enabled upon them. 4446 * 4447 * SPP_HB_DISABLE - Disable heartbeats on the 4448 * speicifed address. Note that if the address 4449 * field is empty all addresses for the association 4450 * will have their heartbeats disabled. Note also 4451 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4452 * mutually exclusive, only one of these two should 4453 * be specified. Enabling both fields will have 4454 * undetermined results. 4455 * 4456 * SPP_HB_DEMAND - Request a user initiated heartbeat 4457 * to be made immediately. 4458 * 4459 * SPP_PMTUD_ENABLE - This field will enable PMTU 4460 * discovery upon the specified address. Note that 4461 * if the address feild is empty then all addresses 4462 * on the association are effected. 4463 * 4464 * SPP_PMTUD_DISABLE - This field will disable PMTU 4465 * discovery upon the specified address. Note that 4466 * if the address feild is empty then all addresses 4467 * on the association are effected. Not also that 4468 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4469 * exclusive. Enabling both will have undetermined 4470 * results. 4471 * 4472 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4473 * on delayed sack. The time specified in spp_sackdelay 4474 * is used to specify the sack delay for this address. Note 4475 * that if spp_address is empty then all addresses will 4476 * enable delayed sack and take on the sack delay 4477 * value specified in spp_sackdelay. 4478 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4479 * off delayed sack. If the spp_address field is blank then 4480 * delayed sack is disabled for the entire association. Note 4481 * also that this field is mutually exclusive to 4482 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4483 * results. 4484 */ 4485 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4486 char __user *optval, int __user *optlen) 4487 { 4488 struct sctp_paddrparams params; 4489 struct sctp_transport *trans = NULL; 4490 struct sctp_association *asoc = NULL; 4491 struct sctp_sock *sp = sctp_sk(sk); 4492 4493 if (len < sizeof(struct sctp_paddrparams)) 4494 return -EINVAL; 4495 len = sizeof(struct sctp_paddrparams); 4496 if (copy_from_user(¶ms, optval, len)) 4497 return -EFAULT; 4498 4499 /* If an address other than INADDR_ANY is specified, and 4500 * no transport is found, then the request is invalid. 4501 */ 4502 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 4503 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4504 params.spp_assoc_id); 4505 if (!trans) { 4506 pr_debug("%s: failed no transport\n", __func__); 4507 return -EINVAL; 4508 } 4509 } 4510 4511 /* Get association, if assoc_id != 0 and the socket is a one 4512 * to many style socket, and an association was not found, then 4513 * the id was invalid. 4514 */ 4515 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4516 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4517 pr_debug("%s: failed no association\n", __func__); 4518 return -EINVAL; 4519 } 4520 4521 if (trans) { 4522 /* Fetch transport values. */ 4523 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4524 params.spp_pathmtu = trans->pathmtu; 4525 params.spp_pathmaxrxt = trans->pathmaxrxt; 4526 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4527 4528 /*draft-11 doesn't say what to return in spp_flags*/ 4529 params.spp_flags = trans->param_flags; 4530 } else if (asoc) { 4531 /* Fetch association values. */ 4532 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4533 params.spp_pathmtu = asoc->pathmtu; 4534 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4535 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4536 4537 /*draft-11 doesn't say what to return in spp_flags*/ 4538 params.spp_flags = asoc->param_flags; 4539 } else { 4540 /* Fetch socket values. */ 4541 params.spp_hbinterval = sp->hbinterval; 4542 params.spp_pathmtu = sp->pathmtu; 4543 params.spp_sackdelay = sp->sackdelay; 4544 params.spp_pathmaxrxt = sp->pathmaxrxt; 4545 4546 /*draft-11 doesn't say what to return in spp_flags*/ 4547 params.spp_flags = sp->param_flags; 4548 } 4549 4550 if (copy_to_user(optval, ¶ms, len)) 4551 return -EFAULT; 4552 4553 if (put_user(len, optlen)) 4554 return -EFAULT; 4555 4556 return 0; 4557 } 4558 4559 /* 4560 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4561 * 4562 * This option will effect the way delayed acks are performed. This 4563 * option allows you to get or set the delayed ack time, in 4564 * milliseconds. It also allows changing the delayed ack frequency. 4565 * Changing the frequency to 1 disables the delayed sack algorithm. If 4566 * the assoc_id is 0, then this sets or gets the endpoints default 4567 * values. If the assoc_id field is non-zero, then the set or get 4568 * effects the specified association for the one to many model (the 4569 * assoc_id field is ignored by the one to one model). Note that if 4570 * sack_delay or sack_freq are 0 when setting this option, then the 4571 * current values will remain unchanged. 4572 * 4573 * struct sctp_sack_info { 4574 * sctp_assoc_t sack_assoc_id; 4575 * uint32_t sack_delay; 4576 * uint32_t sack_freq; 4577 * }; 4578 * 4579 * sack_assoc_id - This parameter, indicates which association the user 4580 * is performing an action upon. Note that if this field's value is 4581 * zero then the endpoints default value is changed (effecting future 4582 * associations only). 4583 * 4584 * sack_delay - This parameter contains the number of milliseconds that 4585 * the user is requesting the delayed ACK timer be set to. Note that 4586 * this value is defined in the standard to be between 200 and 500 4587 * milliseconds. 4588 * 4589 * sack_freq - This parameter contains the number of packets that must 4590 * be received before a sack is sent without waiting for the delay 4591 * timer to expire. The default value for this is 2, setting this 4592 * value to 1 will disable the delayed sack algorithm. 4593 */ 4594 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 4595 char __user *optval, 4596 int __user *optlen) 4597 { 4598 struct sctp_sack_info params; 4599 struct sctp_association *asoc = NULL; 4600 struct sctp_sock *sp = sctp_sk(sk); 4601 4602 if (len >= sizeof(struct sctp_sack_info)) { 4603 len = sizeof(struct sctp_sack_info); 4604 4605 if (copy_from_user(¶ms, optval, len)) 4606 return -EFAULT; 4607 } else if (len == sizeof(struct sctp_assoc_value)) { 4608 pr_warn_ratelimited(DEPRECATED 4609 "%s (pid %d) " 4610 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 4611 "Use struct sctp_sack_info instead\n", 4612 current->comm, task_pid_nr(current)); 4613 if (copy_from_user(¶ms, optval, len)) 4614 return -EFAULT; 4615 } else 4616 return -EINVAL; 4617 4618 /* Get association, if sack_assoc_id != 0 and the socket is a one 4619 * to many style socket, and an association was not found, then 4620 * the id was invalid. 4621 */ 4622 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 4623 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 4624 return -EINVAL; 4625 4626 if (asoc) { 4627 /* Fetch association values. */ 4628 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 4629 params.sack_delay = jiffies_to_msecs( 4630 asoc->sackdelay); 4631 params.sack_freq = asoc->sackfreq; 4632 4633 } else { 4634 params.sack_delay = 0; 4635 params.sack_freq = 1; 4636 } 4637 } else { 4638 /* Fetch socket values. */ 4639 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 4640 params.sack_delay = sp->sackdelay; 4641 params.sack_freq = sp->sackfreq; 4642 } else { 4643 params.sack_delay = 0; 4644 params.sack_freq = 1; 4645 } 4646 } 4647 4648 if (copy_to_user(optval, ¶ms, len)) 4649 return -EFAULT; 4650 4651 if (put_user(len, optlen)) 4652 return -EFAULT; 4653 4654 return 0; 4655 } 4656 4657 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 4658 * 4659 * Applications can specify protocol parameters for the default association 4660 * initialization. The option name argument to setsockopt() and getsockopt() 4661 * is SCTP_INITMSG. 4662 * 4663 * Setting initialization parameters is effective only on an unconnected 4664 * socket (for UDP-style sockets only future associations are effected 4665 * by the change). With TCP-style sockets, this option is inherited by 4666 * sockets derived from a listener socket. 4667 */ 4668 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 4669 { 4670 if (len < sizeof(struct sctp_initmsg)) 4671 return -EINVAL; 4672 len = sizeof(struct sctp_initmsg); 4673 if (put_user(len, optlen)) 4674 return -EFAULT; 4675 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 4676 return -EFAULT; 4677 return 0; 4678 } 4679 4680 4681 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4682 char __user *optval, int __user *optlen) 4683 { 4684 struct sctp_association *asoc; 4685 int cnt = 0; 4686 struct sctp_getaddrs getaddrs; 4687 struct sctp_transport *from; 4688 void __user *to; 4689 union sctp_addr temp; 4690 struct sctp_sock *sp = sctp_sk(sk); 4691 int addrlen; 4692 size_t space_left; 4693 int bytes_copied; 4694 4695 if (len < sizeof(struct sctp_getaddrs)) 4696 return -EINVAL; 4697 4698 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4699 return -EFAULT; 4700 4701 /* For UDP-style sockets, id specifies the association to query. */ 4702 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4703 if (!asoc) 4704 return -EINVAL; 4705 4706 to = optval + offsetof(struct sctp_getaddrs, addrs); 4707 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4708 4709 list_for_each_entry(from, &asoc->peer.transport_addr_list, 4710 transports) { 4711 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4712 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4713 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4714 if (space_left < addrlen) 4715 return -ENOMEM; 4716 if (copy_to_user(to, &temp, addrlen)) 4717 return -EFAULT; 4718 to += addrlen; 4719 cnt++; 4720 space_left -= addrlen; 4721 } 4722 4723 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4724 return -EFAULT; 4725 bytes_copied = ((char __user *)to) - optval; 4726 if (put_user(bytes_copied, optlen)) 4727 return -EFAULT; 4728 4729 return 0; 4730 } 4731 4732 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4733 size_t space_left, int *bytes_copied) 4734 { 4735 struct sctp_sockaddr_entry *addr; 4736 union sctp_addr temp; 4737 int cnt = 0; 4738 int addrlen; 4739 struct net *net = sock_net(sk); 4740 4741 rcu_read_lock(); 4742 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 4743 if (!addr->valid) 4744 continue; 4745 4746 if ((PF_INET == sk->sk_family) && 4747 (AF_INET6 == addr->a.sa.sa_family)) 4748 continue; 4749 if ((PF_INET6 == sk->sk_family) && 4750 inet_v6_ipv6only(sk) && 4751 (AF_INET == addr->a.sa.sa_family)) 4752 continue; 4753 memcpy(&temp, &addr->a, sizeof(temp)); 4754 if (!temp.v4.sin_port) 4755 temp.v4.sin_port = htons(port); 4756 4757 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4758 &temp); 4759 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4760 if (space_left < addrlen) { 4761 cnt = -ENOMEM; 4762 break; 4763 } 4764 memcpy(to, &temp, addrlen); 4765 4766 to += addrlen; 4767 cnt++; 4768 space_left -= addrlen; 4769 *bytes_copied += addrlen; 4770 } 4771 rcu_read_unlock(); 4772 4773 return cnt; 4774 } 4775 4776 4777 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4778 char __user *optval, int __user *optlen) 4779 { 4780 struct sctp_bind_addr *bp; 4781 struct sctp_association *asoc; 4782 int cnt = 0; 4783 struct sctp_getaddrs getaddrs; 4784 struct sctp_sockaddr_entry *addr; 4785 void __user *to; 4786 union sctp_addr temp; 4787 struct sctp_sock *sp = sctp_sk(sk); 4788 int addrlen; 4789 int err = 0; 4790 size_t space_left; 4791 int bytes_copied = 0; 4792 void *addrs; 4793 void *buf; 4794 4795 if (len < sizeof(struct sctp_getaddrs)) 4796 return -EINVAL; 4797 4798 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4799 return -EFAULT; 4800 4801 /* 4802 * For UDP-style sockets, id specifies the association to query. 4803 * If the id field is set to the value '0' then the locally bound 4804 * addresses are returned without regard to any particular 4805 * association. 4806 */ 4807 if (0 == getaddrs.assoc_id) { 4808 bp = &sctp_sk(sk)->ep->base.bind_addr; 4809 } else { 4810 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4811 if (!asoc) 4812 return -EINVAL; 4813 bp = &asoc->base.bind_addr; 4814 } 4815 4816 to = optval + offsetof(struct sctp_getaddrs, addrs); 4817 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4818 4819 addrs = kmalloc(space_left, GFP_KERNEL); 4820 if (!addrs) 4821 return -ENOMEM; 4822 4823 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4824 * addresses from the global local address list. 4825 */ 4826 if (sctp_list_single_entry(&bp->address_list)) { 4827 addr = list_entry(bp->address_list.next, 4828 struct sctp_sockaddr_entry, list); 4829 if (sctp_is_any(sk, &addr->a)) { 4830 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 4831 space_left, &bytes_copied); 4832 if (cnt < 0) { 4833 err = cnt; 4834 goto out; 4835 } 4836 goto copy_getaddrs; 4837 } 4838 } 4839 4840 buf = addrs; 4841 /* Protection on the bound address list is not needed since 4842 * in the socket option context we hold a socket lock and 4843 * thus the bound address list can't change. 4844 */ 4845 list_for_each_entry(addr, &bp->address_list, list) { 4846 memcpy(&temp, &addr->a, sizeof(temp)); 4847 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4848 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4849 if (space_left < addrlen) { 4850 err = -ENOMEM; /*fixme: right error?*/ 4851 goto out; 4852 } 4853 memcpy(buf, &temp, addrlen); 4854 buf += addrlen; 4855 bytes_copied += addrlen; 4856 cnt++; 4857 space_left -= addrlen; 4858 } 4859 4860 copy_getaddrs: 4861 if (copy_to_user(to, addrs, bytes_copied)) { 4862 err = -EFAULT; 4863 goto out; 4864 } 4865 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 4866 err = -EFAULT; 4867 goto out; 4868 } 4869 if (put_user(bytes_copied, optlen)) 4870 err = -EFAULT; 4871 out: 4872 kfree(addrs); 4873 return err; 4874 } 4875 4876 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 4877 * 4878 * Requests that the local SCTP stack use the enclosed peer address as 4879 * the association primary. The enclosed address must be one of the 4880 * association peer's addresses. 4881 */ 4882 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 4883 char __user *optval, int __user *optlen) 4884 { 4885 struct sctp_prim prim; 4886 struct sctp_association *asoc; 4887 struct sctp_sock *sp = sctp_sk(sk); 4888 4889 if (len < sizeof(struct sctp_prim)) 4890 return -EINVAL; 4891 4892 len = sizeof(struct sctp_prim); 4893 4894 if (copy_from_user(&prim, optval, len)) 4895 return -EFAULT; 4896 4897 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 4898 if (!asoc) 4899 return -EINVAL; 4900 4901 if (!asoc->peer.primary_path) 4902 return -ENOTCONN; 4903 4904 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 4905 asoc->peer.primary_path->af_specific->sockaddr_len); 4906 4907 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, 4908 (union sctp_addr *)&prim.ssp_addr); 4909 4910 if (put_user(len, optlen)) 4911 return -EFAULT; 4912 if (copy_to_user(optval, &prim, len)) 4913 return -EFAULT; 4914 4915 return 0; 4916 } 4917 4918 /* 4919 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 4920 * 4921 * Requests that the local endpoint set the specified Adaptation Layer 4922 * Indication parameter for all future INIT and INIT-ACK exchanges. 4923 */ 4924 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 4925 char __user *optval, int __user *optlen) 4926 { 4927 struct sctp_setadaptation adaptation; 4928 4929 if (len < sizeof(struct sctp_setadaptation)) 4930 return -EINVAL; 4931 4932 len = sizeof(struct sctp_setadaptation); 4933 4934 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 4935 4936 if (put_user(len, optlen)) 4937 return -EFAULT; 4938 if (copy_to_user(optval, &adaptation, len)) 4939 return -EFAULT; 4940 4941 return 0; 4942 } 4943 4944 /* 4945 * 4946 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 4947 * 4948 * Applications that wish to use the sendto() system call may wish to 4949 * specify a default set of parameters that would normally be supplied 4950 * through the inclusion of ancillary data. This socket option allows 4951 * such an application to set the default sctp_sndrcvinfo structure. 4952 4953 4954 * The application that wishes to use this socket option simply passes 4955 * in to this call the sctp_sndrcvinfo structure defined in Section 4956 * 5.2.2) The input parameters accepted by this call include 4957 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 4958 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 4959 * to this call if the caller is using the UDP model. 4960 * 4961 * For getsockopt, it get the default sctp_sndrcvinfo structure. 4962 */ 4963 static int sctp_getsockopt_default_send_param(struct sock *sk, 4964 int len, char __user *optval, 4965 int __user *optlen) 4966 { 4967 struct sctp_sndrcvinfo info; 4968 struct sctp_association *asoc; 4969 struct sctp_sock *sp = sctp_sk(sk); 4970 4971 if (len < sizeof(struct sctp_sndrcvinfo)) 4972 return -EINVAL; 4973 4974 len = sizeof(struct sctp_sndrcvinfo); 4975 4976 if (copy_from_user(&info, optval, len)) 4977 return -EFAULT; 4978 4979 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 4980 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 4981 return -EINVAL; 4982 4983 if (asoc) { 4984 info.sinfo_stream = asoc->default_stream; 4985 info.sinfo_flags = asoc->default_flags; 4986 info.sinfo_ppid = asoc->default_ppid; 4987 info.sinfo_context = asoc->default_context; 4988 info.sinfo_timetolive = asoc->default_timetolive; 4989 } else { 4990 info.sinfo_stream = sp->default_stream; 4991 info.sinfo_flags = sp->default_flags; 4992 info.sinfo_ppid = sp->default_ppid; 4993 info.sinfo_context = sp->default_context; 4994 info.sinfo_timetolive = sp->default_timetolive; 4995 } 4996 4997 if (put_user(len, optlen)) 4998 return -EFAULT; 4999 if (copy_to_user(optval, &info, len)) 5000 return -EFAULT; 5001 5002 return 0; 5003 } 5004 5005 /* 5006 * 5007 * 7.1.5 SCTP_NODELAY 5008 * 5009 * Turn on/off any Nagle-like algorithm. This means that packets are 5010 * generally sent as soon as possible and no unnecessary delays are 5011 * introduced, at the cost of more packets in the network. Expects an 5012 * integer boolean flag. 5013 */ 5014 5015 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 5016 char __user *optval, int __user *optlen) 5017 { 5018 int val; 5019 5020 if (len < sizeof(int)) 5021 return -EINVAL; 5022 5023 len = sizeof(int); 5024 val = (sctp_sk(sk)->nodelay == 1); 5025 if (put_user(len, optlen)) 5026 return -EFAULT; 5027 if (copy_to_user(optval, &val, len)) 5028 return -EFAULT; 5029 return 0; 5030 } 5031 5032 /* 5033 * 5034 * 7.1.1 SCTP_RTOINFO 5035 * 5036 * The protocol parameters used to initialize and bound retransmission 5037 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5038 * and modify these parameters. 5039 * All parameters are time values, in milliseconds. A value of 0, when 5040 * modifying the parameters, indicates that the current value should not 5041 * be changed. 5042 * 5043 */ 5044 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5045 char __user *optval, 5046 int __user *optlen) { 5047 struct sctp_rtoinfo rtoinfo; 5048 struct sctp_association *asoc; 5049 5050 if (len < sizeof (struct sctp_rtoinfo)) 5051 return -EINVAL; 5052 5053 len = sizeof(struct sctp_rtoinfo); 5054 5055 if (copy_from_user(&rtoinfo, optval, len)) 5056 return -EFAULT; 5057 5058 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5059 5060 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5061 return -EINVAL; 5062 5063 /* Values corresponding to the specific association. */ 5064 if (asoc) { 5065 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5066 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5067 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5068 } else { 5069 /* Values corresponding to the endpoint. */ 5070 struct sctp_sock *sp = sctp_sk(sk); 5071 5072 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5073 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5074 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5075 } 5076 5077 if (put_user(len, optlen)) 5078 return -EFAULT; 5079 5080 if (copy_to_user(optval, &rtoinfo, len)) 5081 return -EFAULT; 5082 5083 return 0; 5084 } 5085 5086 /* 5087 * 5088 * 7.1.2 SCTP_ASSOCINFO 5089 * 5090 * This option is used to tune the maximum retransmission attempts 5091 * of the association. 5092 * Returns an error if the new association retransmission value is 5093 * greater than the sum of the retransmission value of the peer. 5094 * See [SCTP] for more information. 5095 * 5096 */ 5097 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5098 char __user *optval, 5099 int __user *optlen) 5100 { 5101 5102 struct sctp_assocparams assocparams; 5103 struct sctp_association *asoc; 5104 struct list_head *pos; 5105 int cnt = 0; 5106 5107 if (len < sizeof (struct sctp_assocparams)) 5108 return -EINVAL; 5109 5110 len = sizeof(struct sctp_assocparams); 5111 5112 if (copy_from_user(&assocparams, optval, len)) 5113 return -EFAULT; 5114 5115 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5116 5117 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5118 return -EINVAL; 5119 5120 /* Values correspoinding to the specific association */ 5121 if (asoc) { 5122 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5123 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5124 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5125 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5126 5127 list_for_each(pos, &asoc->peer.transport_addr_list) { 5128 cnt++; 5129 } 5130 5131 assocparams.sasoc_number_peer_destinations = cnt; 5132 } else { 5133 /* Values corresponding to the endpoint */ 5134 struct sctp_sock *sp = sctp_sk(sk); 5135 5136 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5137 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5138 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5139 assocparams.sasoc_cookie_life = 5140 sp->assocparams.sasoc_cookie_life; 5141 assocparams.sasoc_number_peer_destinations = 5142 sp->assocparams. 5143 sasoc_number_peer_destinations; 5144 } 5145 5146 if (put_user(len, optlen)) 5147 return -EFAULT; 5148 5149 if (copy_to_user(optval, &assocparams, len)) 5150 return -EFAULT; 5151 5152 return 0; 5153 } 5154 5155 /* 5156 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5157 * 5158 * This socket option is a boolean flag which turns on or off mapped V4 5159 * addresses. If this option is turned on and the socket is type 5160 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5161 * If this option is turned off, then no mapping will be done of V4 5162 * addresses and a user will receive both PF_INET6 and PF_INET type 5163 * addresses on the socket. 5164 */ 5165 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5166 char __user *optval, int __user *optlen) 5167 { 5168 int val; 5169 struct sctp_sock *sp = sctp_sk(sk); 5170 5171 if (len < sizeof(int)) 5172 return -EINVAL; 5173 5174 len = sizeof(int); 5175 val = sp->v4mapped; 5176 if (put_user(len, optlen)) 5177 return -EFAULT; 5178 if (copy_to_user(optval, &val, len)) 5179 return -EFAULT; 5180 5181 return 0; 5182 } 5183 5184 /* 5185 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5186 * (chapter and verse is quoted at sctp_setsockopt_context()) 5187 */ 5188 static int sctp_getsockopt_context(struct sock *sk, int len, 5189 char __user *optval, int __user *optlen) 5190 { 5191 struct sctp_assoc_value params; 5192 struct sctp_sock *sp; 5193 struct sctp_association *asoc; 5194 5195 if (len < sizeof(struct sctp_assoc_value)) 5196 return -EINVAL; 5197 5198 len = sizeof(struct sctp_assoc_value); 5199 5200 if (copy_from_user(¶ms, optval, len)) 5201 return -EFAULT; 5202 5203 sp = sctp_sk(sk); 5204 5205 if (params.assoc_id != 0) { 5206 asoc = sctp_id2assoc(sk, params.assoc_id); 5207 if (!asoc) 5208 return -EINVAL; 5209 params.assoc_value = asoc->default_rcv_context; 5210 } else { 5211 params.assoc_value = sp->default_rcv_context; 5212 } 5213 5214 if (put_user(len, optlen)) 5215 return -EFAULT; 5216 if (copy_to_user(optval, ¶ms, len)) 5217 return -EFAULT; 5218 5219 return 0; 5220 } 5221 5222 /* 5223 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5224 * This option will get or set the maximum size to put in any outgoing 5225 * SCTP DATA chunk. If a message is larger than this size it will be 5226 * fragmented by SCTP into the specified size. Note that the underlying 5227 * SCTP implementation may fragment into smaller sized chunks when the 5228 * PMTU of the underlying association is smaller than the value set by 5229 * the user. The default value for this option is '0' which indicates 5230 * the user is NOT limiting fragmentation and only the PMTU will effect 5231 * SCTP's choice of DATA chunk size. Note also that values set larger 5232 * than the maximum size of an IP datagram will effectively let SCTP 5233 * control fragmentation (i.e. the same as setting this option to 0). 5234 * 5235 * The following structure is used to access and modify this parameter: 5236 * 5237 * struct sctp_assoc_value { 5238 * sctp_assoc_t assoc_id; 5239 * uint32_t assoc_value; 5240 * }; 5241 * 5242 * assoc_id: This parameter is ignored for one-to-one style sockets. 5243 * For one-to-many style sockets this parameter indicates which 5244 * association the user is performing an action upon. Note that if 5245 * this field's value is zero then the endpoints default value is 5246 * changed (effecting future associations only). 5247 * assoc_value: This parameter specifies the maximum size in bytes. 5248 */ 5249 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5250 char __user *optval, int __user *optlen) 5251 { 5252 struct sctp_assoc_value params; 5253 struct sctp_association *asoc; 5254 5255 if (len == sizeof(int)) { 5256 pr_warn_ratelimited(DEPRECATED 5257 "%s (pid %d) " 5258 "Use of int in maxseg socket option.\n" 5259 "Use struct sctp_assoc_value instead\n", 5260 current->comm, task_pid_nr(current)); 5261 params.assoc_id = 0; 5262 } else if (len >= sizeof(struct sctp_assoc_value)) { 5263 len = sizeof(struct sctp_assoc_value); 5264 if (copy_from_user(¶ms, optval, sizeof(params))) 5265 return -EFAULT; 5266 } else 5267 return -EINVAL; 5268 5269 asoc = sctp_id2assoc(sk, params.assoc_id); 5270 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5271 return -EINVAL; 5272 5273 if (asoc) 5274 params.assoc_value = asoc->frag_point; 5275 else 5276 params.assoc_value = sctp_sk(sk)->user_frag; 5277 5278 if (put_user(len, optlen)) 5279 return -EFAULT; 5280 if (len == sizeof(int)) { 5281 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5282 return -EFAULT; 5283 } else { 5284 if (copy_to_user(optval, ¶ms, len)) 5285 return -EFAULT; 5286 } 5287 5288 return 0; 5289 } 5290 5291 /* 5292 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5293 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5294 */ 5295 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5296 char __user *optval, int __user *optlen) 5297 { 5298 int val; 5299 5300 if (len < sizeof(int)) 5301 return -EINVAL; 5302 5303 len = sizeof(int); 5304 5305 val = sctp_sk(sk)->frag_interleave; 5306 if (put_user(len, optlen)) 5307 return -EFAULT; 5308 if (copy_to_user(optval, &val, len)) 5309 return -EFAULT; 5310 5311 return 0; 5312 } 5313 5314 /* 5315 * 7.1.25. Set or Get the sctp partial delivery point 5316 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5317 */ 5318 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5319 char __user *optval, 5320 int __user *optlen) 5321 { 5322 u32 val; 5323 5324 if (len < sizeof(u32)) 5325 return -EINVAL; 5326 5327 len = sizeof(u32); 5328 5329 val = sctp_sk(sk)->pd_point; 5330 if (put_user(len, optlen)) 5331 return -EFAULT; 5332 if (copy_to_user(optval, &val, len)) 5333 return -EFAULT; 5334 5335 return 0; 5336 } 5337 5338 /* 5339 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5340 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5341 */ 5342 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5343 char __user *optval, 5344 int __user *optlen) 5345 { 5346 struct sctp_assoc_value params; 5347 struct sctp_sock *sp; 5348 struct sctp_association *asoc; 5349 5350 if (len == sizeof(int)) { 5351 pr_warn_ratelimited(DEPRECATED 5352 "%s (pid %d) " 5353 "Use of int in max_burst socket option.\n" 5354 "Use struct sctp_assoc_value instead\n", 5355 current->comm, task_pid_nr(current)); 5356 params.assoc_id = 0; 5357 } else if (len >= sizeof(struct sctp_assoc_value)) { 5358 len = sizeof(struct sctp_assoc_value); 5359 if (copy_from_user(¶ms, optval, len)) 5360 return -EFAULT; 5361 } else 5362 return -EINVAL; 5363 5364 sp = sctp_sk(sk); 5365 5366 if (params.assoc_id != 0) { 5367 asoc = sctp_id2assoc(sk, params.assoc_id); 5368 if (!asoc) 5369 return -EINVAL; 5370 params.assoc_value = asoc->max_burst; 5371 } else 5372 params.assoc_value = sp->max_burst; 5373 5374 if (len == sizeof(int)) { 5375 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5376 return -EFAULT; 5377 } else { 5378 if (copy_to_user(optval, ¶ms, len)) 5379 return -EFAULT; 5380 } 5381 5382 return 0; 5383 5384 } 5385 5386 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5387 char __user *optval, int __user *optlen) 5388 { 5389 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5390 struct sctp_hmacalgo __user *p = (void __user *)optval; 5391 struct sctp_hmac_algo_param *hmacs; 5392 __u16 data_len = 0; 5393 u32 num_idents; 5394 5395 if (!ep->auth_enable) 5396 return -EACCES; 5397 5398 hmacs = ep->auth_hmacs_list; 5399 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5400 5401 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5402 return -EINVAL; 5403 5404 len = sizeof(struct sctp_hmacalgo) + data_len; 5405 num_idents = data_len / sizeof(u16); 5406 5407 if (put_user(len, optlen)) 5408 return -EFAULT; 5409 if (put_user(num_idents, &p->shmac_num_idents)) 5410 return -EFAULT; 5411 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5412 return -EFAULT; 5413 return 0; 5414 } 5415 5416 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5417 char __user *optval, int __user *optlen) 5418 { 5419 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5420 struct sctp_authkeyid val; 5421 struct sctp_association *asoc; 5422 5423 if (!ep->auth_enable) 5424 return -EACCES; 5425 5426 if (len < sizeof(struct sctp_authkeyid)) 5427 return -EINVAL; 5428 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5429 return -EFAULT; 5430 5431 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5432 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5433 return -EINVAL; 5434 5435 if (asoc) 5436 val.scact_keynumber = asoc->active_key_id; 5437 else 5438 val.scact_keynumber = ep->active_key_id; 5439 5440 len = sizeof(struct sctp_authkeyid); 5441 if (put_user(len, optlen)) 5442 return -EFAULT; 5443 if (copy_to_user(optval, &val, len)) 5444 return -EFAULT; 5445 5446 return 0; 5447 } 5448 5449 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5450 char __user *optval, int __user *optlen) 5451 { 5452 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5453 struct sctp_authchunks __user *p = (void __user *)optval; 5454 struct sctp_authchunks val; 5455 struct sctp_association *asoc; 5456 struct sctp_chunks_param *ch; 5457 u32 num_chunks = 0; 5458 char __user *to; 5459 5460 if (!ep->auth_enable) 5461 return -EACCES; 5462 5463 if (len < sizeof(struct sctp_authchunks)) 5464 return -EINVAL; 5465 5466 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5467 return -EFAULT; 5468 5469 to = p->gauth_chunks; 5470 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5471 if (!asoc) 5472 return -EINVAL; 5473 5474 ch = asoc->peer.peer_chunks; 5475 if (!ch) 5476 goto num; 5477 5478 /* See if the user provided enough room for all the data */ 5479 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5480 if (len < num_chunks) 5481 return -EINVAL; 5482 5483 if (copy_to_user(to, ch->chunks, num_chunks)) 5484 return -EFAULT; 5485 num: 5486 len = sizeof(struct sctp_authchunks) + num_chunks; 5487 if (put_user(len, optlen)) 5488 return -EFAULT; 5489 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5490 return -EFAULT; 5491 return 0; 5492 } 5493 5494 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5495 char __user *optval, int __user *optlen) 5496 { 5497 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5498 struct sctp_authchunks __user *p = (void __user *)optval; 5499 struct sctp_authchunks val; 5500 struct sctp_association *asoc; 5501 struct sctp_chunks_param *ch; 5502 u32 num_chunks = 0; 5503 char __user *to; 5504 5505 if (!ep->auth_enable) 5506 return -EACCES; 5507 5508 if (len < sizeof(struct sctp_authchunks)) 5509 return -EINVAL; 5510 5511 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5512 return -EFAULT; 5513 5514 to = p->gauth_chunks; 5515 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5516 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5517 return -EINVAL; 5518 5519 if (asoc) 5520 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; 5521 else 5522 ch = ep->auth_chunk_list; 5523 5524 if (!ch) 5525 goto num; 5526 5527 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5528 if (len < sizeof(struct sctp_authchunks) + num_chunks) 5529 return -EINVAL; 5530 5531 if (copy_to_user(to, ch->chunks, num_chunks)) 5532 return -EFAULT; 5533 num: 5534 len = sizeof(struct sctp_authchunks) + num_chunks; 5535 if (put_user(len, optlen)) 5536 return -EFAULT; 5537 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5538 return -EFAULT; 5539 5540 return 0; 5541 } 5542 5543 /* 5544 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 5545 * This option gets the current number of associations that are attached 5546 * to a one-to-many style socket. The option value is an uint32_t. 5547 */ 5548 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 5549 char __user *optval, int __user *optlen) 5550 { 5551 struct sctp_sock *sp = sctp_sk(sk); 5552 struct sctp_association *asoc; 5553 u32 val = 0; 5554 5555 if (sctp_style(sk, TCP)) 5556 return -EOPNOTSUPP; 5557 5558 if (len < sizeof(u32)) 5559 return -EINVAL; 5560 5561 len = sizeof(u32); 5562 5563 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5564 val++; 5565 } 5566 5567 if (put_user(len, optlen)) 5568 return -EFAULT; 5569 if (copy_to_user(optval, &val, len)) 5570 return -EFAULT; 5571 5572 return 0; 5573 } 5574 5575 /* 5576 * 8.1.23 SCTP_AUTO_ASCONF 5577 * See the corresponding setsockopt entry as description 5578 */ 5579 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 5580 char __user *optval, int __user *optlen) 5581 { 5582 int val = 0; 5583 5584 if (len < sizeof(int)) 5585 return -EINVAL; 5586 5587 len = sizeof(int); 5588 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 5589 val = 1; 5590 if (put_user(len, optlen)) 5591 return -EFAULT; 5592 if (copy_to_user(optval, &val, len)) 5593 return -EFAULT; 5594 return 0; 5595 } 5596 5597 /* 5598 * 8.2.6. Get the Current Identifiers of Associations 5599 * (SCTP_GET_ASSOC_ID_LIST) 5600 * 5601 * This option gets the current list of SCTP association identifiers of 5602 * the SCTP associations handled by a one-to-many style socket. 5603 */ 5604 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 5605 char __user *optval, int __user *optlen) 5606 { 5607 struct sctp_sock *sp = sctp_sk(sk); 5608 struct sctp_association *asoc; 5609 struct sctp_assoc_ids *ids; 5610 u32 num = 0; 5611 5612 if (sctp_style(sk, TCP)) 5613 return -EOPNOTSUPP; 5614 5615 if (len < sizeof(struct sctp_assoc_ids)) 5616 return -EINVAL; 5617 5618 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5619 num++; 5620 } 5621 5622 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 5623 return -EINVAL; 5624 5625 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5626 5627 ids = kmalloc(len, GFP_KERNEL); 5628 if (unlikely(!ids)) 5629 return -ENOMEM; 5630 5631 ids->gaids_number_of_ids = num; 5632 num = 0; 5633 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5634 ids->gaids_assoc_id[num++] = asoc->assoc_id; 5635 } 5636 5637 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 5638 kfree(ids); 5639 return -EFAULT; 5640 } 5641 5642 kfree(ids); 5643 return 0; 5644 } 5645 5646 /* 5647 * SCTP_PEER_ADDR_THLDS 5648 * 5649 * This option allows us to fetch the partially failed threshold for one or all 5650 * transports in an association. See Section 6.1 of: 5651 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 5652 */ 5653 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 5654 char __user *optval, 5655 int len, 5656 int __user *optlen) 5657 { 5658 struct sctp_paddrthlds val; 5659 struct sctp_transport *trans; 5660 struct sctp_association *asoc; 5661 5662 if (len < sizeof(struct sctp_paddrthlds)) 5663 return -EINVAL; 5664 len = sizeof(struct sctp_paddrthlds); 5665 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 5666 return -EFAULT; 5667 5668 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 5669 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 5670 if (!asoc) 5671 return -ENOENT; 5672 5673 val.spt_pathpfthld = asoc->pf_retrans; 5674 val.spt_pathmaxrxt = asoc->pathmaxrxt; 5675 } else { 5676 trans = sctp_addr_id2transport(sk, &val.spt_address, 5677 val.spt_assoc_id); 5678 if (!trans) 5679 return -ENOENT; 5680 5681 val.spt_pathmaxrxt = trans->pathmaxrxt; 5682 val.spt_pathpfthld = trans->pf_retrans; 5683 } 5684 5685 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 5686 return -EFAULT; 5687 5688 return 0; 5689 } 5690 5691 /* 5692 * SCTP_GET_ASSOC_STATS 5693 * 5694 * This option retrieves local per endpoint statistics. It is modeled 5695 * after OpenSolaris' implementation 5696 */ 5697 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 5698 char __user *optval, 5699 int __user *optlen) 5700 { 5701 struct sctp_assoc_stats sas; 5702 struct sctp_association *asoc = NULL; 5703 5704 /* User must provide at least the assoc id */ 5705 if (len < sizeof(sctp_assoc_t)) 5706 return -EINVAL; 5707 5708 /* Allow the struct to grow and fill in as much as possible */ 5709 len = min_t(size_t, len, sizeof(sas)); 5710 5711 if (copy_from_user(&sas, optval, len)) 5712 return -EFAULT; 5713 5714 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 5715 if (!asoc) 5716 return -EINVAL; 5717 5718 sas.sas_rtxchunks = asoc->stats.rtxchunks; 5719 sas.sas_gapcnt = asoc->stats.gapcnt; 5720 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 5721 sas.sas_osacks = asoc->stats.osacks; 5722 sas.sas_isacks = asoc->stats.isacks; 5723 sas.sas_octrlchunks = asoc->stats.octrlchunks; 5724 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 5725 sas.sas_oodchunks = asoc->stats.oodchunks; 5726 sas.sas_iodchunks = asoc->stats.iodchunks; 5727 sas.sas_ouodchunks = asoc->stats.ouodchunks; 5728 sas.sas_iuodchunks = asoc->stats.iuodchunks; 5729 sas.sas_idupchunks = asoc->stats.idupchunks; 5730 sas.sas_opackets = asoc->stats.opackets; 5731 sas.sas_ipackets = asoc->stats.ipackets; 5732 5733 /* New high max rto observed, will return 0 if not a single 5734 * RTO update took place. obs_rto_ipaddr will be bogus 5735 * in such a case 5736 */ 5737 sas.sas_maxrto = asoc->stats.max_obs_rto; 5738 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 5739 sizeof(struct sockaddr_storage)); 5740 5741 /* Mark beginning of a new observation period */ 5742 asoc->stats.max_obs_rto = asoc->rto_min; 5743 5744 if (put_user(len, optlen)) 5745 return -EFAULT; 5746 5747 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 5748 5749 if (copy_to_user(optval, &sas, len)) 5750 return -EFAULT; 5751 5752 return 0; 5753 } 5754 5755 static int sctp_getsockopt(struct sock *sk, int level, int optname, 5756 char __user *optval, int __user *optlen) 5757 { 5758 int retval = 0; 5759 int len; 5760 5761 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 5762 5763 /* I can hardly begin to describe how wrong this is. This is 5764 * so broken as to be worse than useless. The API draft 5765 * REALLY is NOT helpful here... I am not convinced that the 5766 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 5767 * are at all well-founded. 5768 */ 5769 if (level != SOL_SCTP) { 5770 struct sctp_af *af = sctp_sk(sk)->pf->af; 5771 5772 retval = af->getsockopt(sk, level, optname, optval, optlen); 5773 return retval; 5774 } 5775 5776 if (get_user(len, optlen)) 5777 return -EFAULT; 5778 5779 lock_sock(sk); 5780 5781 switch (optname) { 5782 case SCTP_STATUS: 5783 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 5784 break; 5785 case SCTP_DISABLE_FRAGMENTS: 5786 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 5787 optlen); 5788 break; 5789 case SCTP_EVENTS: 5790 retval = sctp_getsockopt_events(sk, len, optval, optlen); 5791 break; 5792 case SCTP_AUTOCLOSE: 5793 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 5794 break; 5795 case SCTP_SOCKOPT_PEELOFF: 5796 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 5797 break; 5798 case SCTP_PEER_ADDR_PARAMS: 5799 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5800 optlen); 5801 break; 5802 case SCTP_DELAYED_SACK: 5803 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 5804 optlen); 5805 break; 5806 case SCTP_INITMSG: 5807 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 5808 break; 5809 case SCTP_GET_PEER_ADDRS: 5810 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 5811 optlen); 5812 break; 5813 case SCTP_GET_LOCAL_ADDRS: 5814 retval = sctp_getsockopt_local_addrs(sk, len, optval, 5815 optlen); 5816 break; 5817 case SCTP_SOCKOPT_CONNECTX3: 5818 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 5819 break; 5820 case SCTP_DEFAULT_SEND_PARAM: 5821 retval = sctp_getsockopt_default_send_param(sk, len, 5822 optval, optlen); 5823 break; 5824 case SCTP_PRIMARY_ADDR: 5825 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 5826 break; 5827 case SCTP_NODELAY: 5828 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 5829 break; 5830 case SCTP_RTOINFO: 5831 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 5832 break; 5833 case SCTP_ASSOCINFO: 5834 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 5835 break; 5836 case SCTP_I_WANT_MAPPED_V4_ADDR: 5837 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 5838 break; 5839 case SCTP_MAXSEG: 5840 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 5841 break; 5842 case SCTP_GET_PEER_ADDR_INFO: 5843 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 5844 optlen); 5845 break; 5846 case SCTP_ADAPTATION_LAYER: 5847 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 5848 optlen); 5849 break; 5850 case SCTP_CONTEXT: 5851 retval = sctp_getsockopt_context(sk, len, optval, optlen); 5852 break; 5853 case SCTP_FRAGMENT_INTERLEAVE: 5854 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 5855 optlen); 5856 break; 5857 case SCTP_PARTIAL_DELIVERY_POINT: 5858 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 5859 optlen); 5860 break; 5861 case SCTP_MAX_BURST: 5862 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 5863 break; 5864 case SCTP_AUTH_KEY: 5865 case SCTP_AUTH_CHUNK: 5866 case SCTP_AUTH_DELETE_KEY: 5867 retval = -EOPNOTSUPP; 5868 break; 5869 case SCTP_HMAC_IDENT: 5870 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 5871 break; 5872 case SCTP_AUTH_ACTIVE_KEY: 5873 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 5874 break; 5875 case SCTP_PEER_AUTH_CHUNKS: 5876 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 5877 optlen); 5878 break; 5879 case SCTP_LOCAL_AUTH_CHUNKS: 5880 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 5881 optlen); 5882 break; 5883 case SCTP_GET_ASSOC_NUMBER: 5884 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5885 break; 5886 case SCTP_GET_ASSOC_ID_LIST: 5887 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 5888 break; 5889 case SCTP_AUTO_ASCONF: 5890 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 5891 break; 5892 case SCTP_PEER_ADDR_THLDS: 5893 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 5894 break; 5895 case SCTP_GET_ASSOC_STATS: 5896 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 5897 break; 5898 default: 5899 retval = -ENOPROTOOPT; 5900 break; 5901 } 5902 5903 release_sock(sk); 5904 return retval; 5905 } 5906 5907 static void sctp_hash(struct sock *sk) 5908 { 5909 /* STUB */ 5910 } 5911 5912 static void sctp_unhash(struct sock *sk) 5913 { 5914 /* STUB */ 5915 } 5916 5917 /* Check if port is acceptable. Possibly find first available port. 5918 * 5919 * The port hash table (contained in the 'global' SCTP protocol storage 5920 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 5921 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 5922 * list (the list number is the port number hashed out, so as you 5923 * would expect from a hash function, all the ports in a given list have 5924 * such a number that hashes out to the same list number; you were 5925 * expecting that, right?); so each list has a set of ports, with a 5926 * link to the socket (struct sock) that uses it, the port number and 5927 * a fastreuse flag (FIXME: NPI ipg). 5928 */ 5929 static struct sctp_bind_bucket *sctp_bucket_create( 5930 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 5931 5932 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 5933 { 5934 struct sctp_bind_hashbucket *head; /* hash list */ 5935 struct sctp_bind_bucket *pp; 5936 unsigned short snum; 5937 int ret; 5938 5939 snum = ntohs(addr->v4.sin_port); 5940 5941 pr_debug("%s: begins, snum:%d\n", __func__, snum); 5942 5943 local_bh_disable(); 5944 5945 if (snum == 0) { 5946 /* Search for an available port. */ 5947 int low, high, remaining, index; 5948 unsigned int rover; 5949 struct net *net = sock_net(sk); 5950 5951 inet_get_local_port_range(net, &low, &high); 5952 remaining = (high - low) + 1; 5953 rover = prandom_u32() % remaining + low; 5954 5955 do { 5956 rover++; 5957 if ((rover < low) || (rover > high)) 5958 rover = low; 5959 if (inet_is_local_reserved_port(net, rover)) 5960 continue; 5961 index = sctp_phashfn(sock_net(sk), rover); 5962 head = &sctp_port_hashtable[index]; 5963 spin_lock(&head->lock); 5964 sctp_for_each_hentry(pp, &head->chain) 5965 if ((pp->port == rover) && 5966 net_eq(sock_net(sk), pp->net)) 5967 goto next; 5968 break; 5969 next: 5970 spin_unlock(&head->lock); 5971 } while (--remaining > 0); 5972 5973 /* Exhausted local port range during search? */ 5974 ret = 1; 5975 if (remaining <= 0) 5976 goto fail; 5977 5978 /* OK, here is the one we will use. HEAD (the port 5979 * hash table list entry) is non-NULL and we hold it's 5980 * mutex. 5981 */ 5982 snum = rover; 5983 } else { 5984 /* We are given an specific port number; we verify 5985 * that it is not being used. If it is used, we will 5986 * exahust the search in the hash list corresponding 5987 * to the port number (snum) - we detect that with the 5988 * port iterator, pp being NULL. 5989 */ 5990 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 5991 spin_lock(&head->lock); 5992 sctp_for_each_hentry(pp, &head->chain) { 5993 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 5994 goto pp_found; 5995 } 5996 } 5997 pp = NULL; 5998 goto pp_not_found; 5999 pp_found: 6000 if (!hlist_empty(&pp->owner)) { 6001 /* We had a port hash table hit - there is an 6002 * available port (pp != NULL) and it is being 6003 * used by other socket (pp->owner not empty); that other 6004 * socket is going to be sk2. 6005 */ 6006 int reuse = sk->sk_reuse; 6007 struct sock *sk2; 6008 6009 pr_debug("%s: found a possible match\n", __func__); 6010 6011 if (pp->fastreuse && sk->sk_reuse && 6012 sk->sk_state != SCTP_SS_LISTENING) 6013 goto success; 6014 6015 /* Run through the list of sockets bound to the port 6016 * (pp->port) [via the pointers bind_next and 6017 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 6018 * we get the endpoint they describe and run through 6019 * the endpoint's list of IP (v4 or v6) addresses, 6020 * comparing each of the addresses with the address of 6021 * the socket sk. If we find a match, then that means 6022 * that this port/socket (sk) combination are already 6023 * in an endpoint. 6024 */ 6025 sk_for_each_bound(sk2, &pp->owner) { 6026 struct sctp_endpoint *ep2; 6027 ep2 = sctp_sk(sk2)->ep; 6028 6029 if (sk == sk2 || 6030 (reuse && sk2->sk_reuse && 6031 sk2->sk_state != SCTP_SS_LISTENING)) 6032 continue; 6033 6034 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 6035 sctp_sk(sk2), sctp_sk(sk))) { 6036 ret = (long)sk2; 6037 goto fail_unlock; 6038 } 6039 } 6040 6041 pr_debug("%s: found a match\n", __func__); 6042 } 6043 pp_not_found: 6044 /* If there was a hash table miss, create a new port. */ 6045 ret = 1; 6046 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 6047 goto fail_unlock; 6048 6049 /* In either case (hit or miss), make sure fastreuse is 1 only 6050 * if sk->sk_reuse is too (that is, if the caller requested 6051 * SO_REUSEADDR on this socket -sk-). 6052 */ 6053 if (hlist_empty(&pp->owner)) { 6054 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 6055 pp->fastreuse = 1; 6056 else 6057 pp->fastreuse = 0; 6058 } else if (pp->fastreuse && 6059 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 6060 pp->fastreuse = 0; 6061 6062 /* We are set, so fill up all the data in the hash table 6063 * entry, tie the socket list information with the rest of the 6064 * sockets FIXME: Blurry, NPI (ipg). 6065 */ 6066 success: 6067 if (!sctp_sk(sk)->bind_hash) { 6068 inet_sk(sk)->inet_num = snum; 6069 sk_add_bind_node(sk, &pp->owner); 6070 sctp_sk(sk)->bind_hash = pp; 6071 } 6072 ret = 0; 6073 6074 fail_unlock: 6075 spin_unlock(&head->lock); 6076 6077 fail: 6078 local_bh_enable(); 6079 return ret; 6080 } 6081 6082 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6083 * port is requested. 6084 */ 6085 static int sctp_get_port(struct sock *sk, unsigned short snum) 6086 { 6087 union sctp_addr addr; 6088 struct sctp_af *af = sctp_sk(sk)->pf->af; 6089 6090 /* Set up a dummy address struct from the sk. */ 6091 af->from_sk(&addr, sk); 6092 addr.v4.sin_port = htons(snum); 6093 6094 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6095 return !!sctp_get_port_local(sk, &addr); 6096 } 6097 6098 /* 6099 * Move a socket to LISTENING state. 6100 */ 6101 static int sctp_listen_start(struct sock *sk, int backlog) 6102 { 6103 struct sctp_sock *sp = sctp_sk(sk); 6104 struct sctp_endpoint *ep = sp->ep; 6105 struct crypto_hash *tfm = NULL; 6106 char alg[32]; 6107 6108 /* Allocate HMAC for generating cookie. */ 6109 if (!sp->hmac && sp->sctp_hmac_alg) { 6110 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6111 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 6112 if (IS_ERR(tfm)) { 6113 net_info_ratelimited("failed to load transform for %s: %ld\n", 6114 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6115 return -ENOSYS; 6116 } 6117 sctp_sk(sk)->hmac = tfm; 6118 } 6119 6120 /* 6121 * If a bind() or sctp_bindx() is not called prior to a listen() 6122 * call that allows new associations to be accepted, the system 6123 * picks an ephemeral port and will choose an address set equivalent 6124 * to binding with a wildcard address. 6125 * 6126 * This is not currently spelled out in the SCTP sockets 6127 * extensions draft, but follows the practice as seen in TCP 6128 * sockets. 6129 * 6130 */ 6131 sk->sk_state = SCTP_SS_LISTENING; 6132 if (!ep->base.bind_addr.port) { 6133 if (sctp_autobind(sk)) 6134 return -EAGAIN; 6135 } else { 6136 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6137 sk->sk_state = SCTP_SS_CLOSED; 6138 return -EADDRINUSE; 6139 } 6140 } 6141 6142 sk->sk_max_ack_backlog = backlog; 6143 sctp_hash_endpoint(ep); 6144 return 0; 6145 } 6146 6147 /* 6148 * 4.1.3 / 5.1.3 listen() 6149 * 6150 * By default, new associations are not accepted for UDP style sockets. 6151 * An application uses listen() to mark a socket as being able to 6152 * accept new associations. 6153 * 6154 * On TCP style sockets, applications use listen() to ready the SCTP 6155 * endpoint for accepting inbound associations. 6156 * 6157 * On both types of endpoints a backlog of '0' disables listening. 6158 * 6159 * Move a socket to LISTENING state. 6160 */ 6161 int sctp_inet_listen(struct socket *sock, int backlog) 6162 { 6163 struct sock *sk = sock->sk; 6164 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6165 int err = -EINVAL; 6166 6167 if (unlikely(backlog < 0)) 6168 return err; 6169 6170 lock_sock(sk); 6171 6172 /* Peeled-off sockets are not allowed to listen(). */ 6173 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6174 goto out; 6175 6176 if (sock->state != SS_UNCONNECTED) 6177 goto out; 6178 6179 /* If backlog is zero, disable listening. */ 6180 if (!backlog) { 6181 if (sctp_sstate(sk, CLOSED)) 6182 goto out; 6183 6184 err = 0; 6185 sctp_unhash_endpoint(ep); 6186 sk->sk_state = SCTP_SS_CLOSED; 6187 if (sk->sk_reuse) 6188 sctp_sk(sk)->bind_hash->fastreuse = 1; 6189 goto out; 6190 } 6191 6192 /* If we are already listening, just update the backlog */ 6193 if (sctp_sstate(sk, LISTENING)) 6194 sk->sk_max_ack_backlog = backlog; 6195 else { 6196 err = sctp_listen_start(sk, backlog); 6197 if (err) 6198 goto out; 6199 } 6200 6201 err = 0; 6202 out: 6203 release_sock(sk); 6204 return err; 6205 } 6206 6207 /* 6208 * This function is done by modeling the current datagram_poll() and the 6209 * tcp_poll(). Note that, based on these implementations, we don't 6210 * lock the socket in this function, even though it seems that, 6211 * ideally, locking or some other mechanisms can be used to ensure 6212 * the integrity of the counters (sndbuf and wmem_alloc) used 6213 * in this place. We assume that we don't need locks either until proven 6214 * otherwise. 6215 * 6216 * Another thing to note is that we include the Async I/O support 6217 * here, again, by modeling the current TCP/UDP code. We don't have 6218 * a good way to test with it yet. 6219 */ 6220 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6221 { 6222 struct sock *sk = sock->sk; 6223 struct sctp_sock *sp = sctp_sk(sk); 6224 unsigned int mask; 6225 6226 poll_wait(file, sk_sleep(sk), wait); 6227 6228 /* A TCP-style listening socket becomes readable when the accept queue 6229 * is not empty. 6230 */ 6231 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6232 return (!list_empty(&sp->ep->asocs)) ? 6233 (POLLIN | POLLRDNORM) : 0; 6234 6235 mask = 0; 6236 6237 /* Is there any exceptional events? */ 6238 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6239 mask |= POLLERR | 6240 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 6241 if (sk->sk_shutdown & RCV_SHUTDOWN) 6242 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6243 if (sk->sk_shutdown == SHUTDOWN_MASK) 6244 mask |= POLLHUP; 6245 6246 /* Is it readable? Reconsider this code with TCP-style support. */ 6247 if (!skb_queue_empty(&sk->sk_receive_queue)) 6248 mask |= POLLIN | POLLRDNORM; 6249 6250 /* The association is either gone or not ready. */ 6251 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6252 return mask; 6253 6254 /* Is it writable? */ 6255 if (sctp_writeable(sk)) { 6256 mask |= POLLOUT | POLLWRNORM; 6257 } else { 6258 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6259 /* 6260 * Since the socket is not locked, the buffer 6261 * might be made available after the writeable check and 6262 * before the bit is set. This could cause a lost I/O 6263 * signal. tcp_poll() has a race breaker for this race 6264 * condition. Based on their implementation, we put 6265 * in the following code to cover it as well. 6266 */ 6267 if (sctp_writeable(sk)) 6268 mask |= POLLOUT | POLLWRNORM; 6269 } 6270 return mask; 6271 } 6272 6273 /******************************************************************** 6274 * 2nd Level Abstractions 6275 ********************************************************************/ 6276 6277 static struct sctp_bind_bucket *sctp_bucket_create( 6278 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6279 { 6280 struct sctp_bind_bucket *pp; 6281 6282 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6283 if (pp) { 6284 SCTP_DBG_OBJCNT_INC(bind_bucket); 6285 pp->port = snum; 6286 pp->fastreuse = 0; 6287 INIT_HLIST_HEAD(&pp->owner); 6288 pp->net = net; 6289 hlist_add_head(&pp->node, &head->chain); 6290 } 6291 return pp; 6292 } 6293 6294 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6295 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6296 { 6297 if (pp && hlist_empty(&pp->owner)) { 6298 __hlist_del(&pp->node); 6299 kmem_cache_free(sctp_bucket_cachep, pp); 6300 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6301 } 6302 } 6303 6304 /* Release this socket's reference to a local port. */ 6305 static inline void __sctp_put_port(struct sock *sk) 6306 { 6307 struct sctp_bind_hashbucket *head = 6308 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6309 inet_sk(sk)->inet_num)]; 6310 struct sctp_bind_bucket *pp; 6311 6312 spin_lock(&head->lock); 6313 pp = sctp_sk(sk)->bind_hash; 6314 __sk_del_bind_node(sk); 6315 sctp_sk(sk)->bind_hash = NULL; 6316 inet_sk(sk)->inet_num = 0; 6317 sctp_bucket_destroy(pp); 6318 spin_unlock(&head->lock); 6319 } 6320 6321 void sctp_put_port(struct sock *sk) 6322 { 6323 local_bh_disable(); 6324 __sctp_put_port(sk); 6325 local_bh_enable(); 6326 } 6327 6328 /* 6329 * The system picks an ephemeral port and choose an address set equivalent 6330 * to binding with a wildcard address. 6331 * One of those addresses will be the primary address for the association. 6332 * This automatically enables the multihoming capability of SCTP. 6333 */ 6334 static int sctp_autobind(struct sock *sk) 6335 { 6336 union sctp_addr autoaddr; 6337 struct sctp_af *af; 6338 __be16 port; 6339 6340 /* Initialize a local sockaddr structure to INADDR_ANY. */ 6341 af = sctp_sk(sk)->pf->af; 6342 6343 port = htons(inet_sk(sk)->inet_num); 6344 af->inaddr_any(&autoaddr, port); 6345 6346 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 6347 } 6348 6349 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 6350 * 6351 * From RFC 2292 6352 * 4.2 The cmsghdr Structure * 6353 * 6354 * When ancillary data is sent or received, any number of ancillary data 6355 * objects can be specified by the msg_control and msg_controllen members of 6356 * the msghdr structure, because each object is preceded by 6357 * a cmsghdr structure defining the object's length (the cmsg_len member). 6358 * Historically Berkeley-derived implementations have passed only one object 6359 * at a time, but this API allows multiple objects to be 6360 * passed in a single call to sendmsg() or recvmsg(). The following example 6361 * shows two ancillary data objects in a control buffer. 6362 * 6363 * |<--------------------------- msg_controllen -------------------------->| 6364 * | | 6365 * 6366 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 6367 * 6368 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 6369 * | | | 6370 * 6371 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 6372 * 6373 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 6374 * | | | | | 6375 * 6376 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6377 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 6378 * 6379 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 6380 * 6381 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6382 * ^ 6383 * | 6384 * 6385 * msg_control 6386 * points here 6387 */ 6388 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 6389 { 6390 struct cmsghdr *cmsg; 6391 struct msghdr *my_msg = (struct msghdr *)msg; 6392 6393 for (cmsg = CMSG_FIRSTHDR(msg); 6394 cmsg != NULL; 6395 cmsg = CMSG_NXTHDR(my_msg, cmsg)) { 6396 if (!CMSG_OK(my_msg, cmsg)) 6397 return -EINVAL; 6398 6399 /* Should we parse this header or ignore? */ 6400 if (cmsg->cmsg_level != IPPROTO_SCTP) 6401 continue; 6402 6403 /* Strictly check lengths following example in SCM code. */ 6404 switch (cmsg->cmsg_type) { 6405 case SCTP_INIT: 6406 /* SCTP Socket API Extension 6407 * 5.2.1 SCTP Initiation Structure (SCTP_INIT) 6408 * 6409 * This cmsghdr structure provides information for 6410 * initializing new SCTP associations with sendmsg(). 6411 * The SCTP_INITMSG socket option uses this same data 6412 * structure. This structure is not used for 6413 * recvmsg(). 6414 * 6415 * cmsg_level cmsg_type cmsg_data[] 6416 * ------------ ------------ ---------------------- 6417 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 6418 */ 6419 if (cmsg->cmsg_len != 6420 CMSG_LEN(sizeof(struct sctp_initmsg))) 6421 return -EINVAL; 6422 cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg); 6423 break; 6424 6425 case SCTP_SNDRCV: 6426 /* SCTP Socket API Extension 6427 * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV) 6428 * 6429 * This cmsghdr structure specifies SCTP options for 6430 * sendmsg() and describes SCTP header information 6431 * about a received message through recvmsg(). 6432 * 6433 * cmsg_level cmsg_type cmsg_data[] 6434 * ------------ ------------ ---------------------- 6435 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 6436 */ 6437 if (cmsg->cmsg_len != 6438 CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 6439 return -EINVAL; 6440 6441 cmsgs->info = 6442 (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 6443 6444 /* Minimally, validate the sinfo_flags. */ 6445 if (cmsgs->info->sinfo_flags & 6446 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6447 SCTP_ABORT | SCTP_EOF)) 6448 return -EINVAL; 6449 break; 6450 6451 default: 6452 return -EINVAL; 6453 } 6454 } 6455 return 0; 6456 } 6457 6458 /* 6459 * Wait for a packet.. 6460 * Note: This function is the same function as in core/datagram.c 6461 * with a few modifications to make lksctp work. 6462 */ 6463 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) 6464 { 6465 int error; 6466 DEFINE_WAIT(wait); 6467 6468 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6469 6470 /* Socket errors? */ 6471 error = sock_error(sk); 6472 if (error) 6473 goto out; 6474 6475 if (!skb_queue_empty(&sk->sk_receive_queue)) 6476 goto ready; 6477 6478 /* Socket shut down? */ 6479 if (sk->sk_shutdown & RCV_SHUTDOWN) 6480 goto out; 6481 6482 /* Sequenced packets can come disconnected. If so we report the 6483 * problem. 6484 */ 6485 error = -ENOTCONN; 6486 6487 /* Is there a good reason to think that we may receive some data? */ 6488 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 6489 goto out; 6490 6491 /* Handle signals. */ 6492 if (signal_pending(current)) 6493 goto interrupted; 6494 6495 /* Let another process have a go. Since we are going to sleep 6496 * anyway. Note: This may cause odd behaviors if the message 6497 * does not fit in the user's buffer, but this seems to be the 6498 * only way to honor MSG_DONTWAIT realistically. 6499 */ 6500 release_sock(sk); 6501 *timeo_p = schedule_timeout(*timeo_p); 6502 lock_sock(sk); 6503 6504 ready: 6505 finish_wait(sk_sleep(sk), &wait); 6506 return 0; 6507 6508 interrupted: 6509 error = sock_intr_errno(*timeo_p); 6510 6511 out: 6512 finish_wait(sk_sleep(sk), &wait); 6513 *err = error; 6514 return error; 6515 } 6516 6517 /* Receive a datagram. 6518 * Note: This is pretty much the same routine as in core/datagram.c 6519 * with a few changes to make lksctp work. 6520 */ 6521 static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 6522 int noblock, int *err) 6523 { 6524 int error; 6525 struct sk_buff *skb; 6526 long timeo; 6527 6528 timeo = sock_rcvtimeo(sk, noblock); 6529 6530 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 6531 MAX_SCHEDULE_TIMEOUT); 6532 6533 do { 6534 /* Again only user level code calls this function, 6535 * so nothing interrupt level 6536 * will suddenly eat the receive_queue. 6537 * 6538 * Look at current nfs client by the way... 6539 * However, this function was correct in any case. 8) 6540 */ 6541 if (flags & MSG_PEEK) { 6542 spin_lock_bh(&sk->sk_receive_queue.lock); 6543 skb = skb_peek(&sk->sk_receive_queue); 6544 if (skb) 6545 atomic_inc(&skb->users); 6546 spin_unlock_bh(&sk->sk_receive_queue.lock); 6547 } else { 6548 skb = skb_dequeue(&sk->sk_receive_queue); 6549 } 6550 6551 if (skb) 6552 return skb; 6553 6554 /* Caller is allowed not to check sk->sk_err before calling. */ 6555 error = sock_error(sk); 6556 if (error) 6557 goto no_packet; 6558 6559 if (sk->sk_shutdown & RCV_SHUTDOWN) 6560 break; 6561 6562 if (sk_can_busy_loop(sk) && 6563 sk_busy_loop(sk, noblock)) 6564 continue; 6565 6566 /* User doesn't want to wait. */ 6567 error = -EAGAIN; 6568 if (!timeo) 6569 goto no_packet; 6570 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 6571 6572 return NULL; 6573 6574 no_packet: 6575 *err = error; 6576 return NULL; 6577 } 6578 6579 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 6580 static void __sctp_write_space(struct sctp_association *asoc) 6581 { 6582 struct sock *sk = asoc->base.sk; 6583 struct socket *sock = sk->sk_socket; 6584 6585 if ((sctp_wspace(asoc) > 0) && sock) { 6586 if (waitqueue_active(&asoc->wait)) 6587 wake_up_interruptible(&asoc->wait); 6588 6589 if (sctp_writeable(sk)) { 6590 wait_queue_head_t *wq = sk_sleep(sk); 6591 6592 if (wq && waitqueue_active(wq)) 6593 wake_up_interruptible(wq); 6594 6595 /* Note that we try to include the Async I/O support 6596 * here by modeling from the current TCP/UDP code. 6597 * We have not tested with it yet. 6598 */ 6599 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6600 sock_wake_async(sock, 6601 SOCK_WAKE_SPACE, POLL_OUT); 6602 } 6603 } 6604 } 6605 6606 static void sctp_wake_up_waiters(struct sock *sk, 6607 struct sctp_association *asoc) 6608 { 6609 struct sctp_association *tmp = asoc; 6610 6611 /* We do accounting for the sndbuf space per association, 6612 * so we only need to wake our own association. 6613 */ 6614 if (asoc->ep->sndbuf_policy) 6615 return __sctp_write_space(asoc); 6616 6617 /* If association goes down and is just flushing its 6618 * outq, then just normally notify others. 6619 */ 6620 if (asoc->base.dead) 6621 return sctp_write_space(sk); 6622 6623 /* Accounting for the sndbuf space is per socket, so we 6624 * need to wake up others, try to be fair and in case of 6625 * other associations, let them have a go first instead 6626 * of just doing a sctp_write_space() call. 6627 * 6628 * Note that we reach sctp_wake_up_waiters() only when 6629 * associations free up queued chunks, thus we are under 6630 * lock and the list of associations on a socket is 6631 * guaranteed not to change. 6632 */ 6633 for (tmp = list_next_entry(tmp, asocs); 1; 6634 tmp = list_next_entry(tmp, asocs)) { 6635 /* Manually skip the head element. */ 6636 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) 6637 continue; 6638 /* Wake up association. */ 6639 __sctp_write_space(tmp); 6640 /* We've reached the end. */ 6641 if (tmp == asoc) 6642 break; 6643 } 6644 } 6645 6646 /* Do accounting for the sndbuf space. 6647 * Decrement the used sndbuf space of the corresponding association by the 6648 * data size which was just transmitted(freed). 6649 */ 6650 static void sctp_wfree(struct sk_buff *skb) 6651 { 6652 struct sctp_association *asoc; 6653 struct sctp_chunk *chunk; 6654 struct sock *sk; 6655 6656 /* Get the saved chunk pointer. */ 6657 chunk = *((struct sctp_chunk **)(skb->cb)); 6658 asoc = chunk->asoc; 6659 sk = asoc->base.sk; 6660 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 6661 sizeof(struct sk_buff) + 6662 sizeof(struct sctp_chunk); 6663 6664 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6665 6666 /* 6667 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 6668 */ 6669 sk->sk_wmem_queued -= skb->truesize; 6670 sk_mem_uncharge(sk, skb->truesize); 6671 6672 sock_wfree(skb); 6673 sctp_wake_up_waiters(sk, asoc); 6674 6675 sctp_association_put(asoc); 6676 } 6677 6678 /* Do accounting for the receive space on the socket. 6679 * Accounting for the association is done in ulpevent.c 6680 * We set this as a destructor for the cloned data skbs so that 6681 * accounting is done at the correct time. 6682 */ 6683 void sctp_sock_rfree(struct sk_buff *skb) 6684 { 6685 struct sock *sk = skb->sk; 6686 struct sctp_ulpevent *event = sctp_skb2event(skb); 6687 6688 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6689 6690 /* 6691 * Mimic the behavior of sock_rfree 6692 */ 6693 sk_mem_uncharge(sk, event->rmem_len); 6694 } 6695 6696 6697 /* Helper function to wait for space in the sndbuf. */ 6698 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6699 size_t msg_len) 6700 { 6701 struct sock *sk = asoc->base.sk; 6702 int err = 0; 6703 long current_timeo = *timeo_p; 6704 DEFINE_WAIT(wait); 6705 6706 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6707 *timeo_p, msg_len); 6708 6709 /* Increment the association's refcnt. */ 6710 sctp_association_hold(asoc); 6711 6712 /* Wait on the association specific sndbuf space. */ 6713 for (;;) { 6714 prepare_to_wait_exclusive(&asoc->wait, &wait, 6715 TASK_INTERRUPTIBLE); 6716 if (!*timeo_p) 6717 goto do_nonblock; 6718 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6719 asoc->base.dead) 6720 goto do_error; 6721 if (signal_pending(current)) 6722 goto do_interrupted; 6723 if (msg_len <= sctp_wspace(asoc)) 6724 break; 6725 6726 /* Let another process have a go. Since we are going 6727 * to sleep anyway. 6728 */ 6729 release_sock(sk); 6730 current_timeo = schedule_timeout(current_timeo); 6731 BUG_ON(sk != asoc->base.sk); 6732 lock_sock(sk); 6733 6734 *timeo_p = current_timeo; 6735 } 6736 6737 out: 6738 finish_wait(&asoc->wait, &wait); 6739 6740 /* Release the association's refcnt. */ 6741 sctp_association_put(asoc); 6742 6743 return err; 6744 6745 do_error: 6746 err = -EPIPE; 6747 goto out; 6748 6749 do_interrupted: 6750 err = sock_intr_errno(*timeo_p); 6751 goto out; 6752 6753 do_nonblock: 6754 err = -EAGAIN; 6755 goto out; 6756 } 6757 6758 void sctp_data_ready(struct sock *sk) 6759 { 6760 struct socket_wq *wq; 6761 6762 rcu_read_lock(); 6763 wq = rcu_dereference(sk->sk_wq); 6764 if (wq_has_sleeper(wq)) 6765 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 6766 POLLRDNORM | POLLRDBAND); 6767 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6768 rcu_read_unlock(); 6769 } 6770 6771 /* If socket sndbuf has changed, wake up all per association waiters. */ 6772 void sctp_write_space(struct sock *sk) 6773 { 6774 struct sctp_association *asoc; 6775 6776 /* Wake up the tasks in each wait queue. */ 6777 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 6778 __sctp_write_space(asoc); 6779 } 6780 } 6781 6782 /* Is there any sndbuf space available on the socket? 6783 * 6784 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 6785 * associations on the same socket. For a UDP-style socket with 6786 * multiple associations, it is possible for it to be "unwriteable" 6787 * prematurely. I assume that this is acceptable because 6788 * a premature "unwriteable" is better than an accidental "writeable" which 6789 * would cause an unwanted block under certain circumstances. For the 1-1 6790 * UDP-style sockets or TCP-style sockets, this code should work. 6791 * - Daisy 6792 */ 6793 static int sctp_writeable(struct sock *sk) 6794 { 6795 int amt = 0; 6796 6797 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 6798 if (amt < 0) 6799 amt = 0; 6800 return amt; 6801 } 6802 6803 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 6804 * returns immediately with EINPROGRESS. 6805 */ 6806 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 6807 { 6808 struct sock *sk = asoc->base.sk; 6809 int err = 0; 6810 long current_timeo = *timeo_p; 6811 DEFINE_WAIT(wait); 6812 6813 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 6814 6815 /* Increment the association's refcnt. */ 6816 sctp_association_hold(asoc); 6817 6818 for (;;) { 6819 prepare_to_wait_exclusive(&asoc->wait, &wait, 6820 TASK_INTERRUPTIBLE); 6821 if (!*timeo_p) 6822 goto do_nonblock; 6823 if (sk->sk_shutdown & RCV_SHUTDOWN) 6824 break; 6825 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6826 asoc->base.dead) 6827 goto do_error; 6828 if (signal_pending(current)) 6829 goto do_interrupted; 6830 6831 if (sctp_state(asoc, ESTABLISHED)) 6832 break; 6833 6834 /* Let another process have a go. Since we are going 6835 * to sleep anyway. 6836 */ 6837 release_sock(sk); 6838 current_timeo = schedule_timeout(current_timeo); 6839 lock_sock(sk); 6840 6841 *timeo_p = current_timeo; 6842 } 6843 6844 out: 6845 finish_wait(&asoc->wait, &wait); 6846 6847 /* Release the association's refcnt. */ 6848 sctp_association_put(asoc); 6849 6850 return err; 6851 6852 do_error: 6853 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 6854 err = -ETIMEDOUT; 6855 else 6856 err = -ECONNREFUSED; 6857 goto out; 6858 6859 do_interrupted: 6860 err = sock_intr_errno(*timeo_p); 6861 goto out; 6862 6863 do_nonblock: 6864 err = -EINPROGRESS; 6865 goto out; 6866 } 6867 6868 static int sctp_wait_for_accept(struct sock *sk, long timeo) 6869 { 6870 struct sctp_endpoint *ep; 6871 int err = 0; 6872 DEFINE_WAIT(wait); 6873 6874 ep = sctp_sk(sk)->ep; 6875 6876 6877 for (;;) { 6878 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 6879 TASK_INTERRUPTIBLE); 6880 6881 if (list_empty(&ep->asocs)) { 6882 release_sock(sk); 6883 timeo = schedule_timeout(timeo); 6884 lock_sock(sk); 6885 } 6886 6887 err = -EINVAL; 6888 if (!sctp_sstate(sk, LISTENING)) 6889 break; 6890 6891 err = 0; 6892 if (!list_empty(&ep->asocs)) 6893 break; 6894 6895 err = sock_intr_errno(timeo); 6896 if (signal_pending(current)) 6897 break; 6898 6899 err = -EAGAIN; 6900 if (!timeo) 6901 break; 6902 } 6903 6904 finish_wait(sk_sleep(sk), &wait); 6905 6906 return err; 6907 } 6908 6909 static void sctp_wait_for_close(struct sock *sk, long timeout) 6910 { 6911 DEFINE_WAIT(wait); 6912 6913 do { 6914 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6915 if (list_empty(&sctp_sk(sk)->ep->asocs)) 6916 break; 6917 release_sock(sk); 6918 timeout = schedule_timeout(timeout); 6919 lock_sock(sk); 6920 } while (!signal_pending(current) && timeout); 6921 6922 finish_wait(sk_sleep(sk), &wait); 6923 } 6924 6925 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 6926 { 6927 struct sk_buff *frag; 6928 6929 if (!skb->data_len) 6930 goto done; 6931 6932 /* Don't forget the fragments. */ 6933 skb_walk_frags(skb, frag) 6934 sctp_skb_set_owner_r_frag(frag, sk); 6935 6936 done: 6937 sctp_skb_set_owner_r(skb, sk); 6938 } 6939 6940 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 6941 struct sctp_association *asoc) 6942 { 6943 struct inet_sock *inet = inet_sk(sk); 6944 struct inet_sock *newinet; 6945 6946 newsk->sk_type = sk->sk_type; 6947 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6948 newsk->sk_flags = sk->sk_flags; 6949 newsk->sk_no_check_tx = sk->sk_no_check_tx; 6950 newsk->sk_no_check_rx = sk->sk_no_check_rx; 6951 newsk->sk_reuse = sk->sk_reuse; 6952 6953 newsk->sk_shutdown = sk->sk_shutdown; 6954 newsk->sk_destruct = sctp_destruct_sock; 6955 newsk->sk_family = sk->sk_family; 6956 newsk->sk_protocol = IPPROTO_SCTP; 6957 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 6958 newsk->sk_sndbuf = sk->sk_sndbuf; 6959 newsk->sk_rcvbuf = sk->sk_rcvbuf; 6960 newsk->sk_lingertime = sk->sk_lingertime; 6961 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 6962 newsk->sk_sndtimeo = sk->sk_sndtimeo; 6963 6964 newinet = inet_sk(newsk); 6965 6966 /* Initialize sk's sport, dport, rcv_saddr and daddr for 6967 * getsockname() and getpeername() 6968 */ 6969 newinet->inet_sport = inet->inet_sport; 6970 newinet->inet_saddr = inet->inet_saddr; 6971 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 6972 newinet->inet_dport = htons(asoc->peer.port); 6973 newinet->pmtudisc = inet->pmtudisc; 6974 newinet->inet_id = asoc->next_tsn ^ jiffies; 6975 6976 newinet->uc_ttl = inet->uc_ttl; 6977 newinet->mc_loop = 1; 6978 newinet->mc_ttl = 1; 6979 newinet->mc_index = 0; 6980 newinet->mc_list = NULL; 6981 } 6982 6983 /* Populate the fields of the newsk from the oldsk and migrate the assoc 6984 * and its messages to the newsk. 6985 */ 6986 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 6987 struct sctp_association *assoc, 6988 sctp_socket_type_t type) 6989 { 6990 struct sctp_sock *oldsp = sctp_sk(oldsk); 6991 struct sctp_sock *newsp = sctp_sk(newsk); 6992 struct sctp_bind_bucket *pp; /* hash list port iterator */ 6993 struct sctp_endpoint *newep = newsp->ep; 6994 struct sk_buff *skb, *tmp; 6995 struct sctp_ulpevent *event; 6996 struct sctp_bind_hashbucket *head; 6997 struct list_head tmplist; 6998 6999 /* Migrate socket buffer sizes and all the socket level options to the 7000 * new socket. 7001 */ 7002 newsk->sk_sndbuf = oldsk->sk_sndbuf; 7003 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 7004 /* Brute force copy old sctp opt. */ 7005 if (oldsp->do_auto_asconf) { 7006 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); 7007 inet_sk_copy_descendant(newsk, oldsk); 7008 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); 7009 } else 7010 inet_sk_copy_descendant(newsk, oldsk); 7011 7012 /* Restore the ep value that was overwritten with the above structure 7013 * copy. 7014 */ 7015 newsp->ep = newep; 7016 newsp->hmac = NULL; 7017 7018 /* Hook this new socket in to the bind_hash list. */ 7019 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 7020 inet_sk(oldsk)->inet_num)]; 7021 local_bh_disable(); 7022 spin_lock(&head->lock); 7023 pp = sctp_sk(oldsk)->bind_hash; 7024 sk_add_bind_node(newsk, &pp->owner); 7025 sctp_sk(newsk)->bind_hash = pp; 7026 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 7027 spin_unlock(&head->lock); 7028 local_bh_enable(); 7029 7030 /* Copy the bind_addr list from the original endpoint to the new 7031 * endpoint so that we can handle restarts properly 7032 */ 7033 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 7034 &oldsp->ep->base.bind_addr, GFP_KERNEL); 7035 7036 /* Move any messages in the old socket's receive queue that are for the 7037 * peeled off association to the new socket's receive queue. 7038 */ 7039 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 7040 event = sctp_skb2event(skb); 7041 if (event->asoc == assoc) { 7042 __skb_unlink(skb, &oldsk->sk_receive_queue); 7043 __skb_queue_tail(&newsk->sk_receive_queue, skb); 7044 sctp_skb_set_owner_r_frag(skb, newsk); 7045 } 7046 } 7047 7048 /* Clean up any messages pending delivery due to partial 7049 * delivery. Three cases: 7050 * 1) No partial deliver; no work. 7051 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 7052 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 7053 */ 7054 skb_queue_head_init(&newsp->pd_lobby); 7055 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 7056 7057 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 7058 struct sk_buff_head *queue; 7059 7060 /* Decide which queue to move pd_lobby skbs to. */ 7061 if (assoc->ulpq.pd_mode) { 7062 queue = &newsp->pd_lobby; 7063 } else 7064 queue = &newsk->sk_receive_queue; 7065 7066 /* Walk through the pd_lobby, looking for skbs that 7067 * need moved to the new socket. 7068 */ 7069 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 7070 event = sctp_skb2event(skb); 7071 if (event->asoc == assoc) { 7072 __skb_unlink(skb, &oldsp->pd_lobby); 7073 __skb_queue_tail(queue, skb); 7074 sctp_skb_set_owner_r_frag(skb, newsk); 7075 } 7076 } 7077 7078 /* Clear up any skbs waiting for the partial 7079 * delivery to finish. 7080 */ 7081 if (assoc->ulpq.pd_mode) 7082 sctp_clear_pd(oldsk, NULL); 7083 7084 } 7085 7086 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 7087 sctp_skb_set_owner_r_frag(skb, newsk); 7088 7089 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 7090 sctp_skb_set_owner_r_frag(skb, newsk); 7091 7092 /* Set the type of socket to indicate that it is peeled off from the 7093 * original UDP-style socket or created with the accept() call on a 7094 * TCP-style socket.. 7095 */ 7096 newsp->type = type; 7097 7098 /* Mark the new socket "in-use" by the user so that any packets 7099 * that may arrive on the association after we've moved it are 7100 * queued to the backlog. This prevents a potential race between 7101 * backlog processing on the old socket and new-packet processing 7102 * on the new socket. 7103 * 7104 * The caller has just allocated newsk so we can guarantee that other 7105 * paths won't try to lock it and then oldsk. 7106 */ 7107 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7108 sctp_assoc_migrate(assoc, newsk); 7109 7110 /* If the association on the newsk is already closed before accept() 7111 * is called, set RCV_SHUTDOWN flag. 7112 */ 7113 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7114 newsk->sk_shutdown |= RCV_SHUTDOWN; 7115 7116 newsk->sk_state = SCTP_SS_ESTABLISHED; 7117 release_sock(newsk); 7118 } 7119 7120 7121 /* This proto struct describes the ULP interface for SCTP. */ 7122 struct proto sctp_prot = { 7123 .name = "SCTP", 7124 .owner = THIS_MODULE, 7125 .close = sctp_close, 7126 .connect = sctp_connect, 7127 .disconnect = sctp_disconnect, 7128 .accept = sctp_accept, 7129 .ioctl = sctp_ioctl, 7130 .init = sctp_init_sock, 7131 .destroy = sctp_destroy_sock, 7132 .shutdown = sctp_shutdown, 7133 .setsockopt = sctp_setsockopt, 7134 .getsockopt = sctp_getsockopt, 7135 .sendmsg = sctp_sendmsg, 7136 .recvmsg = sctp_recvmsg, 7137 .bind = sctp_bind, 7138 .backlog_rcv = sctp_backlog_rcv, 7139 .hash = sctp_hash, 7140 .unhash = sctp_unhash, 7141 .get_port = sctp_get_port, 7142 .obj_size = sizeof(struct sctp_sock), 7143 .sysctl_mem = sysctl_sctp_mem, 7144 .sysctl_rmem = sysctl_sctp_rmem, 7145 .sysctl_wmem = sysctl_sctp_wmem, 7146 .memory_pressure = &sctp_memory_pressure, 7147 .enter_memory_pressure = sctp_enter_memory_pressure, 7148 .memory_allocated = &sctp_memory_allocated, 7149 .sockets_allocated = &sctp_sockets_allocated, 7150 }; 7151 7152 #if IS_ENABLED(CONFIG_IPV6) 7153 7154 struct proto sctpv6_prot = { 7155 .name = "SCTPv6", 7156 .owner = THIS_MODULE, 7157 .close = sctp_close, 7158 .connect = sctp_connect, 7159 .disconnect = sctp_disconnect, 7160 .accept = sctp_accept, 7161 .ioctl = sctp_ioctl, 7162 .init = sctp_init_sock, 7163 .destroy = sctp_destroy_sock, 7164 .shutdown = sctp_shutdown, 7165 .setsockopt = sctp_setsockopt, 7166 .getsockopt = sctp_getsockopt, 7167 .sendmsg = sctp_sendmsg, 7168 .recvmsg = sctp_recvmsg, 7169 .bind = sctp_bind, 7170 .backlog_rcv = sctp_backlog_rcv, 7171 .hash = sctp_hash, 7172 .unhash = sctp_unhash, 7173 .get_port = sctp_get_port, 7174 .obj_size = sizeof(struct sctp6_sock), 7175 .sysctl_mem = sysctl_sctp_mem, 7176 .sysctl_rmem = sysctl_sctp_rmem, 7177 .sysctl_wmem = sysctl_sctp_wmem, 7178 .memory_pressure = &sctp_memory_pressure, 7179 .enter_memory_pressure = sctp_enter_memory_pressure, 7180 .memory_allocated = &sctp_memory_allocated, 7181 .sockets_allocated = &sctp_sockets_allocated, 7182 }; 7183 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7184