1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, see 32 * <http://www.gnu.org/licenses/>. 33 * 34 * Please send any bug reports or fixes you make to the 35 * email address(es): 36 * lksctp developers <linux-sctp@vger.kernel.org> 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Narasimha Budihal <narsi@refcode.org> 41 * Karl Knutson <karl@athena.chicago.il.us> 42 * Jon Grimm <jgrimm@us.ibm.com> 43 * Xingang Guo <xingang.guo@intel.com> 44 * Daisy Chang <daisyc@us.ibm.com> 45 * Sridhar Samudrala <samudrala@us.ibm.com> 46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 47 * Ardelle Fan <ardelle.fan@intel.com> 48 * Ryan Layer <rmlayer@us.ibm.com> 49 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 50 * Kevin Gao <kevin.gao@intel.com> 51 */ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <linux/types.h> 56 #include <linux/kernel.h> 57 #include <linux/wait.h> 58 #include <linux/time.h> 59 #include <linux/ip.h> 60 #include <linux/capability.h> 61 #include <linux/fcntl.h> 62 #include <linux/poll.h> 63 #include <linux/init.h> 64 #include <linux/crypto.h> 65 #include <linux/slab.h> 66 #include <linux/file.h> 67 #include <linux/compat.h> 68 69 #include <net/ip.h> 70 #include <net/icmp.h> 71 #include <net/route.h> 72 #include <net/ipv6.h> 73 #include <net/inet_common.h> 74 75 #include <linux/socket.h> /* for sa_family_t */ 76 #include <linux/export.h> 77 #include <net/sock.h> 78 #include <net/sctp/sctp.h> 79 #include <net/sctp/sm.h> 80 81 /* Forward declarations for internal helper functions. */ 82 static int sctp_writeable(struct sock *sk); 83 static void sctp_wfree(struct sk_buff *skb); 84 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, 85 size_t msg_len); 86 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 87 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 88 static int sctp_wait_for_accept(struct sock *sk, long timeo); 89 static void sctp_wait_for_close(struct sock *sk, long timeo); 90 static void sctp_destruct_sock(struct sock *sk); 91 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 92 union sctp_addr *addr, int len); 93 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 94 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 95 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 96 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 97 static int sctp_send_asconf(struct sctp_association *asoc, 98 struct sctp_chunk *chunk); 99 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 100 static int sctp_autobind(struct sock *sk); 101 static void sctp_sock_migrate(struct sock *, struct sock *, 102 struct sctp_association *, sctp_socket_type_t); 103 104 extern struct kmem_cache *sctp_bucket_cachep; 105 extern long sysctl_sctp_mem[3]; 106 extern int sysctl_sctp_rmem[3]; 107 extern int sysctl_sctp_wmem[3]; 108 109 static int sctp_memory_pressure; 110 static atomic_long_t sctp_memory_allocated; 111 struct percpu_counter sctp_sockets_allocated; 112 113 static void sctp_enter_memory_pressure(struct sock *sk) 114 { 115 sctp_memory_pressure = 1; 116 } 117 118 119 /* Get the sndbuf space available at the time on the association. */ 120 static inline int sctp_wspace(struct sctp_association *asoc) 121 { 122 int amt; 123 124 if (asoc->ep->sndbuf_policy) 125 amt = asoc->sndbuf_used; 126 else 127 amt = sk_wmem_alloc_get(asoc->base.sk); 128 129 if (amt >= asoc->base.sk->sk_sndbuf) { 130 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 131 amt = 0; 132 else { 133 amt = sk_stream_wspace(asoc->base.sk); 134 if (amt < 0) 135 amt = 0; 136 } 137 } else { 138 amt = asoc->base.sk->sk_sndbuf - amt; 139 } 140 return amt; 141 } 142 143 /* Increment the used sndbuf space count of the corresponding association by 144 * the size of the outgoing data chunk. 145 * Also, set the skb destructor for sndbuf accounting later. 146 * 147 * Since it is always 1-1 between chunk and skb, and also a new skb is always 148 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 149 * destructor in the data chunk skb for the purpose of the sndbuf space 150 * tracking. 151 */ 152 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 153 { 154 struct sctp_association *asoc = chunk->asoc; 155 struct sock *sk = asoc->base.sk; 156 157 /* The sndbuf space is tracked per association. */ 158 sctp_association_hold(asoc); 159 160 skb_set_owner_w(chunk->skb, sk); 161 162 chunk->skb->destructor = sctp_wfree; 163 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 164 *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; 165 166 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 167 sizeof(struct sk_buff) + 168 sizeof(struct sctp_chunk); 169 170 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 171 sk->sk_wmem_queued += chunk->skb->truesize; 172 sk_mem_charge(sk, chunk->skb->truesize); 173 } 174 175 /* Verify that this is a valid address. */ 176 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 177 int len) 178 { 179 struct sctp_af *af; 180 181 /* Verify basic sockaddr. */ 182 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 183 if (!af) 184 return -EINVAL; 185 186 /* Is this a valid SCTP address? */ 187 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 188 return -EINVAL; 189 190 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 191 return -EINVAL; 192 193 return 0; 194 } 195 196 /* Look up the association by its id. If this is not a UDP-style 197 * socket, the ID field is always ignored. 198 */ 199 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 200 { 201 struct sctp_association *asoc = NULL; 202 203 /* If this is not a UDP-style socket, assoc id should be ignored. */ 204 if (!sctp_style(sk, UDP)) { 205 /* Return NULL if the socket state is not ESTABLISHED. It 206 * could be a TCP-style listening socket or a socket which 207 * hasn't yet called connect() to establish an association. 208 */ 209 if (!sctp_sstate(sk, ESTABLISHED)) 210 return NULL; 211 212 /* Get the first and the only association from the list. */ 213 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 214 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 215 struct sctp_association, asocs); 216 return asoc; 217 } 218 219 /* Otherwise this is a UDP-style socket. */ 220 if (!id || (id == (sctp_assoc_t)-1)) 221 return NULL; 222 223 spin_lock_bh(&sctp_assocs_id_lock); 224 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 225 spin_unlock_bh(&sctp_assocs_id_lock); 226 227 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 228 return NULL; 229 230 return asoc; 231 } 232 233 /* Look up the transport from an address and an assoc id. If both address and 234 * id are specified, the associations matching the address and the id should be 235 * the same. 236 */ 237 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 238 struct sockaddr_storage *addr, 239 sctp_assoc_t id) 240 { 241 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 242 struct sctp_transport *transport; 243 union sctp_addr *laddr = (union sctp_addr *)addr; 244 245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 246 laddr, 247 &transport); 248 249 if (!addr_asoc) 250 return NULL; 251 252 id_asoc = sctp_id2assoc(sk, id); 253 if (id_asoc && (id_asoc != addr_asoc)) 254 return NULL; 255 256 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 257 (union sctp_addr *)addr); 258 259 return transport; 260 } 261 262 /* API 3.1.2 bind() - UDP Style Syntax 263 * The syntax of bind() is, 264 * 265 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 266 * 267 * sd - the socket descriptor returned by socket(). 268 * addr - the address structure (struct sockaddr_in or struct 269 * sockaddr_in6 [RFC 2553]), 270 * addr_len - the size of the address structure. 271 */ 272 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 273 { 274 int retval = 0; 275 276 lock_sock(sk); 277 278 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 279 addr, addr_len); 280 281 /* Disallow binding twice. */ 282 if (!sctp_sk(sk)->ep->base.bind_addr.port) 283 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 284 addr_len); 285 else 286 retval = -EINVAL; 287 288 release_sock(sk); 289 290 return retval; 291 } 292 293 static long sctp_get_port_local(struct sock *, union sctp_addr *); 294 295 /* Verify this is a valid sockaddr. */ 296 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 297 union sctp_addr *addr, int len) 298 { 299 struct sctp_af *af; 300 301 /* Check minimum size. */ 302 if (len < sizeof (struct sockaddr)) 303 return NULL; 304 305 /* V4 mapped address are really of AF_INET family */ 306 if (addr->sa.sa_family == AF_INET6 && 307 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 308 if (!opt->pf->af_supported(AF_INET, opt)) 309 return NULL; 310 } else { 311 /* Does this PF support this AF? */ 312 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 313 return NULL; 314 } 315 316 /* If we get this far, af is valid. */ 317 af = sctp_get_af_specific(addr->sa.sa_family); 318 319 if (len < af->sockaddr_len) 320 return NULL; 321 322 return af; 323 } 324 325 /* Bind a local address either to an endpoint or to an association. */ 326 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 327 { 328 struct net *net = sock_net(sk); 329 struct sctp_sock *sp = sctp_sk(sk); 330 struct sctp_endpoint *ep = sp->ep; 331 struct sctp_bind_addr *bp = &ep->base.bind_addr; 332 struct sctp_af *af; 333 unsigned short snum; 334 int ret = 0; 335 336 /* Common sockaddr verification. */ 337 af = sctp_sockaddr_af(sp, addr, len); 338 if (!af) { 339 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 340 __func__, sk, addr, len); 341 return -EINVAL; 342 } 343 344 snum = ntohs(addr->v4.sin_port); 345 346 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 347 __func__, sk, &addr->sa, bp->port, snum, len); 348 349 /* PF specific bind() address verification. */ 350 if (!sp->pf->bind_verify(sp, addr)) 351 return -EADDRNOTAVAIL; 352 353 /* We must either be unbound, or bind to the same port. 354 * It's OK to allow 0 ports if we are already bound. 355 * We'll just inhert an already bound port in this case 356 */ 357 if (bp->port) { 358 if (!snum) 359 snum = bp->port; 360 else if (snum != bp->port) { 361 pr_debug("%s: new port %d doesn't match existing port " 362 "%d\n", __func__, snum, bp->port); 363 return -EINVAL; 364 } 365 } 366 367 if (snum && snum < PROT_SOCK && 368 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 369 return -EACCES; 370 371 /* See if the address matches any of the addresses we may have 372 * already bound before checking against other endpoints. 373 */ 374 if (sctp_bind_addr_match(bp, addr, sp)) 375 return -EINVAL; 376 377 /* Make sure we are allowed to bind here. 378 * The function sctp_get_port_local() does duplicate address 379 * detection. 380 */ 381 addr->v4.sin_port = htons(snum); 382 if ((ret = sctp_get_port_local(sk, addr))) { 383 return -EADDRINUSE; 384 } 385 386 /* Refresh ephemeral port. */ 387 if (!bp->port) 388 bp->port = inet_sk(sk)->inet_num; 389 390 /* Add the address to the bind address list. 391 * Use GFP_ATOMIC since BHs will be disabled. 392 */ 393 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); 394 395 /* Copy back into socket for getsockname() use. */ 396 if (!ret) { 397 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 398 af->to_sk_saddr(addr, sk); 399 } 400 401 return ret; 402 } 403 404 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 405 * 406 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 407 * at any one time. If a sender, after sending an ASCONF chunk, decides 408 * it needs to transfer another ASCONF Chunk, it MUST wait until the 409 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 410 * subsequent ASCONF. Note this restriction binds each side, so at any 411 * time two ASCONF may be in-transit on any given association (one sent 412 * from each endpoint). 413 */ 414 static int sctp_send_asconf(struct sctp_association *asoc, 415 struct sctp_chunk *chunk) 416 { 417 struct net *net = sock_net(asoc->base.sk); 418 int retval = 0; 419 420 /* If there is an outstanding ASCONF chunk, queue it for later 421 * transmission. 422 */ 423 if (asoc->addip_last_asconf) { 424 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 425 goto out; 426 } 427 428 /* Hold the chunk until an ASCONF_ACK is received. */ 429 sctp_chunk_hold(chunk); 430 retval = sctp_primitive_ASCONF(net, asoc, chunk); 431 if (retval) 432 sctp_chunk_free(chunk); 433 else 434 asoc->addip_last_asconf = chunk; 435 436 out: 437 return retval; 438 } 439 440 /* Add a list of addresses as bind addresses to local endpoint or 441 * association. 442 * 443 * Basically run through each address specified in the addrs/addrcnt 444 * array/length pair, determine if it is IPv6 or IPv4 and call 445 * sctp_do_bind() on it. 446 * 447 * If any of them fails, then the operation will be reversed and the 448 * ones that were added will be removed. 449 * 450 * Only sctp_setsockopt_bindx() is supposed to call this function. 451 */ 452 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 453 { 454 int cnt; 455 int retval = 0; 456 void *addr_buf; 457 struct sockaddr *sa_addr; 458 struct sctp_af *af; 459 460 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 461 addrs, addrcnt); 462 463 addr_buf = addrs; 464 for (cnt = 0; cnt < addrcnt; cnt++) { 465 /* The list may contain either IPv4 or IPv6 address; 466 * determine the address length for walking thru the list. 467 */ 468 sa_addr = addr_buf; 469 af = sctp_get_af_specific(sa_addr->sa_family); 470 if (!af) { 471 retval = -EINVAL; 472 goto err_bindx_add; 473 } 474 475 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 476 af->sockaddr_len); 477 478 addr_buf += af->sockaddr_len; 479 480 err_bindx_add: 481 if (retval < 0) { 482 /* Failed. Cleanup the ones that have been added */ 483 if (cnt > 0) 484 sctp_bindx_rem(sk, addrs, cnt); 485 return retval; 486 } 487 } 488 489 return retval; 490 } 491 492 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 493 * associations that are part of the endpoint indicating that a list of local 494 * addresses are added to the endpoint. 495 * 496 * If any of the addresses is already in the bind address list of the 497 * association, we do not send the chunk for that association. But it will not 498 * affect other associations. 499 * 500 * Only sctp_setsockopt_bindx() is supposed to call this function. 501 */ 502 static int sctp_send_asconf_add_ip(struct sock *sk, 503 struct sockaddr *addrs, 504 int addrcnt) 505 { 506 struct net *net = sock_net(sk); 507 struct sctp_sock *sp; 508 struct sctp_endpoint *ep; 509 struct sctp_association *asoc; 510 struct sctp_bind_addr *bp; 511 struct sctp_chunk *chunk; 512 struct sctp_sockaddr_entry *laddr; 513 union sctp_addr *addr; 514 union sctp_addr saveaddr; 515 void *addr_buf; 516 struct sctp_af *af; 517 struct list_head *p; 518 int i; 519 int retval = 0; 520 521 if (!net->sctp.addip_enable) 522 return retval; 523 524 sp = sctp_sk(sk); 525 ep = sp->ep; 526 527 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 528 __func__, sk, addrs, addrcnt); 529 530 list_for_each_entry(asoc, &ep->asocs, asocs) { 531 if (!asoc->peer.asconf_capable) 532 continue; 533 534 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 535 continue; 536 537 if (!sctp_state(asoc, ESTABLISHED)) 538 continue; 539 540 /* Check if any address in the packed array of addresses is 541 * in the bind address list of the association. If so, 542 * do not send the asconf chunk to its peer, but continue with 543 * other associations. 544 */ 545 addr_buf = addrs; 546 for (i = 0; i < addrcnt; i++) { 547 addr = addr_buf; 548 af = sctp_get_af_specific(addr->v4.sin_family); 549 if (!af) { 550 retval = -EINVAL; 551 goto out; 552 } 553 554 if (sctp_assoc_lookup_laddr(asoc, addr)) 555 break; 556 557 addr_buf += af->sockaddr_len; 558 } 559 if (i < addrcnt) 560 continue; 561 562 /* Use the first valid address in bind addr list of 563 * association as Address Parameter of ASCONF CHUNK. 564 */ 565 bp = &asoc->base.bind_addr; 566 p = bp->address_list.next; 567 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 568 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 569 addrcnt, SCTP_PARAM_ADD_IP); 570 if (!chunk) { 571 retval = -ENOMEM; 572 goto out; 573 } 574 575 /* Add the new addresses to the bind address list with 576 * use_as_src set to 0. 577 */ 578 addr_buf = addrs; 579 for (i = 0; i < addrcnt; i++) { 580 addr = addr_buf; 581 af = sctp_get_af_specific(addr->v4.sin_family); 582 memcpy(&saveaddr, addr, af->sockaddr_len); 583 retval = sctp_add_bind_addr(bp, &saveaddr, 584 SCTP_ADDR_NEW, GFP_ATOMIC); 585 addr_buf += af->sockaddr_len; 586 } 587 if (asoc->src_out_of_asoc_ok) { 588 struct sctp_transport *trans; 589 590 list_for_each_entry(trans, 591 &asoc->peer.transport_addr_list, transports) { 592 /* Clear the source and route cache */ 593 dst_release(trans->dst); 594 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 595 2*asoc->pathmtu, 4380)); 596 trans->ssthresh = asoc->peer.i.a_rwnd; 597 trans->rto = asoc->rto_initial; 598 sctp_max_rto(asoc, trans); 599 trans->rtt = trans->srtt = trans->rttvar = 0; 600 sctp_transport_route(trans, NULL, 601 sctp_sk(asoc->base.sk)); 602 } 603 } 604 retval = sctp_send_asconf(asoc, chunk); 605 } 606 607 out: 608 return retval; 609 } 610 611 /* Remove a list of addresses from bind addresses list. Do not remove the 612 * last address. 613 * 614 * Basically run through each address specified in the addrs/addrcnt 615 * array/length pair, determine if it is IPv6 or IPv4 and call 616 * sctp_del_bind() on it. 617 * 618 * If any of them fails, then the operation will be reversed and the 619 * ones that were removed will be added back. 620 * 621 * At least one address has to be left; if only one address is 622 * available, the operation will return -EBUSY. 623 * 624 * Only sctp_setsockopt_bindx() is supposed to call this function. 625 */ 626 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 627 { 628 struct sctp_sock *sp = sctp_sk(sk); 629 struct sctp_endpoint *ep = sp->ep; 630 int cnt; 631 struct sctp_bind_addr *bp = &ep->base.bind_addr; 632 int retval = 0; 633 void *addr_buf; 634 union sctp_addr *sa_addr; 635 struct sctp_af *af; 636 637 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 638 __func__, sk, addrs, addrcnt); 639 640 addr_buf = addrs; 641 for (cnt = 0; cnt < addrcnt; cnt++) { 642 /* If the bind address list is empty or if there is only one 643 * bind address, there is nothing more to be removed (we need 644 * at least one address here). 645 */ 646 if (list_empty(&bp->address_list) || 647 (sctp_list_single_entry(&bp->address_list))) { 648 retval = -EBUSY; 649 goto err_bindx_rem; 650 } 651 652 sa_addr = addr_buf; 653 af = sctp_get_af_specific(sa_addr->sa.sa_family); 654 if (!af) { 655 retval = -EINVAL; 656 goto err_bindx_rem; 657 } 658 659 if (!af->addr_valid(sa_addr, sp, NULL)) { 660 retval = -EADDRNOTAVAIL; 661 goto err_bindx_rem; 662 } 663 664 if (sa_addr->v4.sin_port && 665 sa_addr->v4.sin_port != htons(bp->port)) { 666 retval = -EINVAL; 667 goto err_bindx_rem; 668 } 669 670 if (!sa_addr->v4.sin_port) 671 sa_addr->v4.sin_port = htons(bp->port); 672 673 /* FIXME - There is probably a need to check if sk->sk_saddr and 674 * sk->sk_rcv_addr are currently set to one of the addresses to 675 * be removed. This is something which needs to be looked into 676 * when we are fixing the outstanding issues with multi-homing 677 * socket routing and failover schemes. Refer to comments in 678 * sctp_do_bind(). -daisy 679 */ 680 retval = sctp_del_bind_addr(bp, sa_addr); 681 682 addr_buf += af->sockaddr_len; 683 err_bindx_rem: 684 if (retval < 0) { 685 /* Failed. Add the ones that has been removed back */ 686 if (cnt > 0) 687 sctp_bindx_add(sk, addrs, cnt); 688 return retval; 689 } 690 } 691 692 return retval; 693 } 694 695 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 696 * the associations that are part of the endpoint indicating that a list of 697 * local addresses are removed from the endpoint. 698 * 699 * If any of the addresses is already in the bind address list of the 700 * association, we do not send the chunk for that association. But it will not 701 * affect other associations. 702 * 703 * Only sctp_setsockopt_bindx() is supposed to call this function. 704 */ 705 static int sctp_send_asconf_del_ip(struct sock *sk, 706 struct sockaddr *addrs, 707 int addrcnt) 708 { 709 struct net *net = sock_net(sk); 710 struct sctp_sock *sp; 711 struct sctp_endpoint *ep; 712 struct sctp_association *asoc; 713 struct sctp_transport *transport; 714 struct sctp_bind_addr *bp; 715 struct sctp_chunk *chunk; 716 union sctp_addr *laddr; 717 void *addr_buf; 718 struct sctp_af *af; 719 struct sctp_sockaddr_entry *saddr; 720 int i; 721 int retval = 0; 722 int stored = 0; 723 724 chunk = NULL; 725 if (!net->sctp.addip_enable) 726 return retval; 727 728 sp = sctp_sk(sk); 729 ep = sp->ep; 730 731 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 732 __func__, sk, addrs, addrcnt); 733 734 list_for_each_entry(asoc, &ep->asocs, asocs) { 735 736 if (!asoc->peer.asconf_capable) 737 continue; 738 739 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 740 continue; 741 742 if (!sctp_state(asoc, ESTABLISHED)) 743 continue; 744 745 /* Check if any address in the packed array of addresses is 746 * not present in the bind address list of the association. 747 * If so, do not send the asconf chunk to its peer, but 748 * continue with other associations. 749 */ 750 addr_buf = addrs; 751 for (i = 0; i < addrcnt; i++) { 752 laddr = addr_buf; 753 af = sctp_get_af_specific(laddr->v4.sin_family); 754 if (!af) { 755 retval = -EINVAL; 756 goto out; 757 } 758 759 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 760 break; 761 762 addr_buf += af->sockaddr_len; 763 } 764 if (i < addrcnt) 765 continue; 766 767 /* Find one address in the association's bind address list 768 * that is not in the packed array of addresses. This is to 769 * make sure that we do not delete all the addresses in the 770 * association. 771 */ 772 bp = &asoc->base.bind_addr; 773 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 774 addrcnt, sp); 775 if ((laddr == NULL) && (addrcnt == 1)) { 776 if (asoc->asconf_addr_del_pending) 777 continue; 778 asoc->asconf_addr_del_pending = 779 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 780 if (asoc->asconf_addr_del_pending == NULL) { 781 retval = -ENOMEM; 782 goto out; 783 } 784 asoc->asconf_addr_del_pending->sa.sa_family = 785 addrs->sa_family; 786 asoc->asconf_addr_del_pending->v4.sin_port = 787 htons(bp->port); 788 if (addrs->sa_family == AF_INET) { 789 struct sockaddr_in *sin; 790 791 sin = (struct sockaddr_in *)addrs; 792 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 793 } else if (addrs->sa_family == AF_INET6) { 794 struct sockaddr_in6 *sin6; 795 796 sin6 = (struct sockaddr_in6 *)addrs; 797 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 798 } 799 800 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 801 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 802 asoc->asconf_addr_del_pending); 803 804 asoc->src_out_of_asoc_ok = 1; 805 stored = 1; 806 goto skip_mkasconf; 807 } 808 809 if (laddr == NULL) 810 return -EINVAL; 811 812 /* We do not need RCU protection throughout this loop 813 * because this is done under a socket lock from the 814 * setsockopt call. 815 */ 816 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 817 SCTP_PARAM_DEL_IP); 818 if (!chunk) { 819 retval = -ENOMEM; 820 goto out; 821 } 822 823 skip_mkasconf: 824 /* Reset use_as_src flag for the addresses in the bind address 825 * list that are to be deleted. 826 */ 827 addr_buf = addrs; 828 for (i = 0; i < addrcnt; i++) { 829 laddr = addr_buf; 830 af = sctp_get_af_specific(laddr->v4.sin_family); 831 list_for_each_entry(saddr, &bp->address_list, list) { 832 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 833 saddr->state = SCTP_ADDR_DEL; 834 } 835 addr_buf += af->sockaddr_len; 836 } 837 838 /* Update the route and saddr entries for all the transports 839 * as some of the addresses in the bind address list are 840 * about to be deleted and cannot be used as source addresses. 841 */ 842 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 843 transports) { 844 dst_release(transport->dst); 845 sctp_transport_route(transport, NULL, 846 sctp_sk(asoc->base.sk)); 847 } 848 849 if (stored) 850 /* We don't need to transmit ASCONF */ 851 continue; 852 retval = sctp_send_asconf(asoc, chunk); 853 } 854 out: 855 return retval; 856 } 857 858 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 859 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 860 { 861 struct sock *sk = sctp_opt2sk(sp); 862 union sctp_addr *addr; 863 struct sctp_af *af; 864 865 /* It is safe to write port space in caller. */ 866 addr = &addrw->a; 867 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 868 af = sctp_get_af_specific(addr->sa.sa_family); 869 if (!af) 870 return -EINVAL; 871 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 872 return -EINVAL; 873 874 if (addrw->state == SCTP_ADDR_NEW) 875 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 876 else 877 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 878 } 879 880 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 881 * 882 * API 8.1 883 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 884 * int flags); 885 * 886 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 887 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 888 * or IPv6 addresses. 889 * 890 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 891 * Section 3.1.2 for this usage. 892 * 893 * addrs is a pointer to an array of one or more socket addresses. Each 894 * address is contained in its appropriate structure (i.e. struct 895 * sockaddr_in or struct sockaddr_in6) the family of the address type 896 * must be used to distinguish the address length (note that this 897 * representation is termed a "packed array" of addresses). The caller 898 * specifies the number of addresses in the array with addrcnt. 899 * 900 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 901 * -1, and sets errno to the appropriate error code. 902 * 903 * For SCTP, the port given in each socket address must be the same, or 904 * sctp_bindx() will fail, setting errno to EINVAL. 905 * 906 * The flags parameter is formed from the bitwise OR of zero or more of 907 * the following currently defined flags: 908 * 909 * SCTP_BINDX_ADD_ADDR 910 * 911 * SCTP_BINDX_REM_ADDR 912 * 913 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 914 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 915 * addresses from the association. The two flags are mutually exclusive; 916 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 917 * not remove all addresses from an association; sctp_bindx() will 918 * reject such an attempt with EINVAL. 919 * 920 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 921 * additional addresses with an endpoint after calling bind(). Or use 922 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 923 * socket is associated with so that no new association accepted will be 924 * associated with those addresses. If the endpoint supports dynamic 925 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 926 * endpoint to send the appropriate message to the peer to change the 927 * peers address lists. 928 * 929 * Adding and removing addresses from a connected association is 930 * optional functionality. Implementations that do not support this 931 * functionality should return EOPNOTSUPP. 932 * 933 * Basically do nothing but copying the addresses from user to kernel 934 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 935 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 936 * from userspace. 937 * 938 * We don't use copy_from_user() for optimization: we first do the 939 * sanity checks (buffer size -fast- and access check-healthy 940 * pointer); if all of those succeed, then we can alloc the memory 941 * (expensive operation) needed to copy the data to kernel. Then we do 942 * the copying without checking the user space area 943 * (__copy_from_user()). 944 * 945 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 946 * it. 947 * 948 * sk The sk of the socket 949 * addrs The pointer to the addresses in user land 950 * addrssize Size of the addrs buffer 951 * op Operation to perform (add or remove, see the flags of 952 * sctp_bindx) 953 * 954 * Returns 0 if ok, <0 errno code on error. 955 */ 956 static int sctp_setsockopt_bindx(struct sock *sk, 957 struct sockaddr __user *addrs, 958 int addrs_size, int op) 959 { 960 struct sockaddr *kaddrs; 961 int err; 962 int addrcnt = 0; 963 int walk_size = 0; 964 struct sockaddr *sa_addr; 965 void *addr_buf; 966 struct sctp_af *af; 967 968 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 969 __func__, sk, addrs, addrs_size, op); 970 971 if (unlikely(addrs_size <= 0)) 972 return -EINVAL; 973 974 /* Check the user passed a healthy pointer. */ 975 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 976 return -EFAULT; 977 978 /* Alloc space for the address array in kernel memory. */ 979 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 980 if (unlikely(!kaddrs)) 981 return -ENOMEM; 982 983 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 984 kfree(kaddrs); 985 return -EFAULT; 986 } 987 988 /* Walk through the addrs buffer and count the number of addresses. */ 989 addr_buf = kaddrs; 990 while (walk_size < addrs_size) { 991 if (walk_size + sizeof(sa_family_t) > addrs_size) { 992 kfree(kaddrs); 993 return -EINVAL; 994 } 995 996 sa_addr = addr_buf; 997 af = sctp_get_af_specific(sa_addr->sa_family); 998 999 /* If the address family is not supported or if this address 1000 * causes the address buffer to overflow return EINVAL. 1001 */ 1002 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1003 kfree(kaddrs); 1004 return -EINVAL; 1005 } 1006 addrcnt++; 1007 addr_buf += af->sockaddr_len; 1008 walk_size += af->sockaddr_len; 1009 } 1010 1011 /* Do the work. */ 1012 switch (op) { 1013 case SCTP_BINDX_ADD_ADDR: 1014 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1015 if (err) 1016 goto out; 1017 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1018 break; 1019 1020 case SCTP_BINDX_REM_ADDR: 1021 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1022 if (err) 1023 goto out; 1024 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1025 break; 1026 1027 default: 1028 err = -EINVAL; 1029 break; 1030 } 1031 1032 out: 1033 kfree(kaddrs); 1034 1035 return err; 1036 } 1037 1038 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1039 * 1040 * Common routine for handling connect() and sctp_connectx(). 1041 * Connect will come in with just a single address. 1042 */ 1043 static int __sctp_connect(struct sock *sk, 1044 struct sockaddr *kaddrs, 1045 int addrs_size, 1046 sctp_assoc_t *assoc_id) 1047 { 1048 struct net *net = sock_net(sk); 1049 struct sctp_sock *sp; 1050 struct sctp_endpoint *ep; 1051 struct sctp_association *asoc = NULL; 1052 struct sctp_association *asoc2; 1053 struct sctp_transport *transport; 1054 union sctp_addr to; 1055 struct sctp_af *af; 1056 sctp_scope_t scope; 1057 long timeo; 1058 int err = 0; 1059 int addrcnt = 0; 1060 int walk_size = 0; 1061 union sctp_addr *sa_addr = NULL; 1062 void *addr_buf; 1063 unsigned short port; 1064 unsigned int f_flags = 0; 1065 1066 sp = sctp_sk(sk); 1067 ep = sp->ep; 1068 1069 /* connect() cannot be done on a socket that is already in ESTABLISHED 1070 * state - UDP-style peeled off socket or a TCP-style socket that 1071 * is already connected. 1072 * It cannot be done even on a TCP-style listening socket. 1073 */ 1074 if (sctp_sstate(sk, ESTABLISHED) || 1075 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1076 err = -EISCONN; 1077 goto out_free; 1078 } 1079 1080 /* Walk through the addrs buffer and count the number of addresses. */ 1081 addr_buf = kaddrs; 1082 while (walk_size < addrs_size) { 1083 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1084 err = -EINVAL; 1085 goto out_free; 1086 } 1087 1088 sa_addr = addr_buf; 1089 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1090 1091 /* If the address family is not supported or if this address 1092 * causes the address buffer to overflow return EINVAL. 1093 */ 1094 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1095 err = -EINVAL; 1096 goto out_free; 1097 } 1098 1099 port = ntohs(sa_addr->v4.sin_port); 1100 1101 /* Save current address so we can work with it */ 1102 memcpy(&to, sa_addr, af->sockaddr_len); 1103 1104 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1105 if (err) 1106 goto out_free; 1107 1108 /* Make sure the destination port is correctly set 1109 * in all addresses. 1110 */ 1111 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1112 err = -EINVAL; 1113 goto out_free; 1114 } 1115 1116 /* Check if there already is a matching association on the 1117 * endpoint (other than the one created here). 1118 */ 1119 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1120 if (asoc2 && asoc2 != asoc) { 1121 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1122 err = -EISCONN; 1123 else 1124 err = -EALREADY; 1125 goto out_free; 1126 } 1127 1128 /* If we could not find a matching association on the endpoint, 1129 * make sure that there is no peeled-off association matching 1130 * the peer address even on another socket. 1131 */ 1132 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1133 err = -EADDRNOTAVAIL; 1134 goto out_free; 1135 } 1136 1137 if (!asoc) { 1138 /* If a bind() or sctp_bindx() is not called prior to 1139 * an sctp_connectx() call, the system picks an 1140 * ephemeral port and will choose an address set 1141 * equivalent to binding with a wildcard address. 1142 */ 1143 if (!ep->base.bind_addr.port) { 1144 if (sctp_autobind(sk)) { 1145 err = -EAGAIN; 1146 goto out_free; 1147 } 1148 } else { 1149 /* 1150 * If an unprivileged user inherits a 1-many 1151 * style socket with open associations on a 1152 * privileged port, it MAY be permitted to 1153 * accept new associations, but it SHOULD NOT 1154 * be permitted to open new associations. 1155 */ 1156 if (ep->base.bind_addr.port < PROT_SOCK && 1157 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1158 err = -EACCES; 1159 goto out_free; 1160 } 1161 } 1162 1163 scope = sctp_scope(&to); 1164 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1165 if (!asoc) { 1166 err = -ENOMEM; 1167 goto out_free; 1168 } 1169 1170 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1171 GFP_KERNEL); 1172 if (err < 0) { 1173 goto out_free; 1174 } 1175 1176 } 1177 1178 /* Prime the peer's transport structures. */ 1179 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1180 SCTP_UNKNOWN); 1181 if (!transport) { 1182 err = -ENOMEM; 1183 goto out_free; 1184 } 1185 1186 addrcnt++; 1187 addr_buf += af->sockaddr_len; 1188 walk_size += af->sockaddr_len; 1189 } 1190 1191 /* In case the user of sctp_connectx() wants an association 1192 * id back, assign one now. 1193 */ 1194 if (assoc_id) { 1195 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1196 if (err < 0) 1197 goto out_free; 1198 } 1199 1200 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1201 if (err < 0) { 1202 goto out_free; 1203 } 1204 1205 /* Initialize sk's dport and daddr for getpeername() */ 1206 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1207 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1208 af->to_sk_daddr(sa_addr, sk); 1209 sk->sk_err = 0; 1210 1211 /* in-kernel sockets don't generally have a file allocated to them 1212 * if all they do is call sock_create_kern(). 1213 */ 1214 if (sk->sk_socket->file) 1215 f_flags = sk->sk_socket->file->f_flags; 1216 1217 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1218 1219 err = sctp_wait_for_connect(asoc, &timeo); 1220 if ((err == 0 || err == -EINPROGRESS) && assoc_id) 1221 *assoc_id = asoc->assoc_id; 1222 1223 /* Don't free association on exit. */ 1224 asoc = NULL; 1225 1226 out_free: 1227 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1228 __func__, asoc, kaddrs, err); 1229 1230 if (asoc) { 1231 /* sctp_primitive_ASSOCIATE may have added this association 1232 * To the hash table, try to unhash it, just in case, its a noop 1233 * if it wasn't hashed so we're safe 1234 */ 1235 sctp_unhash_established(asoc); 1236 sctp_association_free(asoc); 1237 } 1238 return err; 1239 } 1240 1241 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1242 * 1243 * API 8.9 1244 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1245 * sctp_assoc_t *asoc); 1246 * 1247 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1248 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1249 * or IPv6 addresses. 1250 * 1251 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1252 * Section 3.1.2 for this usage. 1253 * 1254 * addrs is a pointer to an array of one or more socket addresses. Each 1255 * address is contained in its appropriate structure (i.e. struct 1256 * sockaddr_in or struct sockaddr_in6) the family of the address type 1257 * must be used to distengish the address length (note that this 1258 * representation is termed a "packed array" of addresses). The caller 1259 * specifies the number of addresses in the array with addrcnt. 1260 * 1261 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1262 * the association id of the new association. On failure, sctp_connectx() 1263 * returns -1, and sets errno to the appropriate error code. The assoc_id 1264 * is not touched by the kernel. 1265 * 1266 * For SCTP, the port given in each socket address must be the same, or 1267 * sctp_connectx() will fail, setting errno to EINVAL. 1268 * 1269 * An application can use sctp_connectx to initiate an association with 1270 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1271 * allows a caller to specify multiple addresses at which a peer can be 1272 * reached. The way the SCTP stack uses the list of addresses to set up 1273 * the association is implementation dependent. This function only 1274 * specifies that the stack will try to make use of all the addresses in 1275 * the list when needed. 1276 * 1277 * Note that the list of addresses passed in is only used for setting up 1278 * the association. It does not necessarily equal the set of addresses 1279 * the peer uses for the resulting association. If the caller wants to 1280 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1281 * retrieve them after the association has been set up. 1282 * 1283 * Basically do nothing but copying the addresses from user to kernel 1284 * land and invoking either sctp_connectx(). This is used for tunneling 1285 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1286 * 1287 * We don't use copy_from_user() for optimization: we first do the 1288 * sanity checks (buffer size -fast- and access check-healthy 1289 * pointer); if all of those succeed, then we can alloc the memory 1290 * (expensive operation) needed to copy the data to kernel. Then we do 1291 * the copying without checking the user space area 1292 * (__copy_from_user()). 1293 * 1294 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1295 * it. 1296 * 1297 * sk The sk of the socket 1298 * addrs The pointer to the addresses in user land 1299 * addrssize Size of the addrs buffer 1300 * 1301 * Returns >=0 if ok, <0 errno code on error. 1302 */ 1303 static int __sctp_setsockopt_connectx(struct sock *sk, 1304 struct sockaddr __user *addrs, 1305 int addrs_size, 1306 sctp_assoc_t *assoc_id) 1307 { 1308 int err = 0; 1309 struct sockaddr *kaddrs; 1310 1311 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1312 __func__, sk, addrs, addrs_size); 1313 1314 if (unlikely(addrs_size <= 0)) 1315 return -EINVAL; 1316 1317 /* Check the user passed a healthy pointer. */ 1318 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1319 return -EFAULT; 1320 1321 /* Alloc space for the address array in kernel memory. */ 1322 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 1323 if (unlikely(!kaddrs)) 1324 return -ENOMEM; 1325 1326 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1327 err = -EFAULT; 1328 } else { 1329 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1330 } 1331 1332 kfree(kaddrs); 1333 1334 return err; 1335 } 1336 1337 /* 1338 * This is an older interface. It's kept for backward compatibility 1339 * to the option that doesn't provide association id. 1340 */ 1341 static int sctp_setsockopt_connectx_old(struct sock *sk, 1342 struct sockaddr __user *addrs, 1343 int addrs_size) 1344 { 1345 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1346 } 1347 1348 /* 1349 * New interface for the API. The since the API is done with a socket 1350 * option, to make it simple we feed back the association id is as a return 1351 * indication to the call. Error is always negative and association id is 1352 * always positive. 1353 */ 1354 static int sctp_setsockopt_connectx(struct sock *sk, 1355 struct sockaddr __user *addrs, 1356 int addrs_size) 1357 { 1358 sctp_assoc_t assoc_id = 0; 1359 int err = 0; 1360 1361 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1362 1363 if (err) 1364 return err; 1365 else 1366 return assoc_id; 1367 } 1368 1369 /* 1370 * New (hopefully final) interface for the API. 1371 * We use the sctp_getaddrs_old structure so that use-space library 1372 * can avoid any unnecessary allocations. The only different part 1373 * is that we store the actual length of the address buffer into the 1374 * addrs_num structure member. That way we can re-use the existing 1375 * code. 1376 */ 1377 #ifdef CONFIG_COMPAT 1378 struct compat_sctp_getaddrs_old { 1379 sctp_assoc_t assoc_id; 1380 s32 addr_num; 1381 compat_uptr_t addrs; /* struct sockaddr * */ 1382 }; 1383 #endif 1384 1385 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1386 char __user *optval, 1387 int __user *optlen) 1388 { 1389 struct sctp_getaddrs_old param; 1390 sctp_assoc_t assoc_id = 0; 1391 int err = 0; 1392 1393 #ifdef CONFIG_COMPAT 1394 if (is_compat_task()) { 1395 struct compat_sctp_getaddrs_old param32; 1396 1397 if (len < sizeof(param32)) 1398 return -EINVAL; 1399 if (copy_from_user(¶m32, optval, sizeof(param32))) 1400 return -EFAULT; 1401 1402 param.assoc_id = param32.assoc_id; 1403 param.addr_num = param32.addr_num; 1404 param.addrs = compat_ptr(param32.addrs); 1405 } else 1406 #endif 1407 { 1408 if (len < sizeof(param)) 1409 return -EINVAL; 1410 if (copy_from_user(¶m, optval, sizeof(param))) 1411 return -EFAULT; 1412 } 1413 1414 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1415 param.addrs, param.addr_num, 1416 &assoc_id); 1417 if (err == 0 || err == -EINPROGRESS) { 1418 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1419 return -EFAULT; 1420 if (put_user(sizeof(assoc_id), optlen)) 1421 return -EFAULT; 1422 } 1423 1424 return err; 1425 } 1426 1427 /* API 3.1.4 close() - UDP Style Syntax 1428 * Applications use close() to perform graceful shutdown (as described in 1429 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1430 * by a UDP-style socket. 1431 * 1432 * The syntax is 1433 * 1434 * ret = close(int sd); 1435 * 1436 * sd - the socket descriptor of the associations to be closed. 1437 * 1438 * To gracefully shutdown a specific association represented by the 1439 * UDP-style socket, an application should use the sendmsg() call, 1440 * passing no user data, but including the appropriate flag in the 1441 * ancillary data (see Section xxxx). 1442 * 1443 * If sd in the close() call is a branched-off socket representing only 1444 * one association, the shutdown is performed on that association only. 1445 * 1446 * 4.1.6 close() - TCP Style Syntax 1447 * 1448 * Applications use close() to gracefully close down an association. 1449 * 1450 * The syntax is: 1451 * 1452 * int close(int sd); 1453 * 1454 * sd - the socket descriptor of the association to be closed. 1455 * 1456 * After an application calls close() on a socket descriptor, no further 1457 * socket operations will succeed on that descriptor. 1458 * 1459 * API 7.1.4 SO_LINGER 1460 * 1461 * An application using the TCP-style socket can use this option to 1462 * perform the SCTP ABORT primitive. The linger option structure is: 1463 * 1464 * struct linger { 1465 * int l_onoff; // option on/off 1466 * int l_linger; // linger time 1467 * }; 1468 * 1469 * To enable the option, set l_onoff to 1. If the l_linger value is set 1470 * to 0, calling close() is the same as the ABORT primitive. If the 1471 * value is set to a negative value, the setsockopt() call will return 1472 * an error. If the value is set to a positive value linger_time, the 1473 * close() can be blocked for at most linger_time ms. If the graceful 1474 * shutdown phase does not finish during this period, close() will 1475 * return but the graceful shutdown phase continues in the system. 1476 */ 1477 static void sctp_close(struct sock *sk, long timeout) 1478 { 1479 struct net *net = sock_net(sk); 1480 struct sctp_endpoint *ep; 1481 struct sctp_association *asoc; 1482 struct list_head *pos, *temp; 1483 unsigned int data_was_unread; 1484 1485 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1486 1487 lock_sock(sk); 1488 sk->sk_shutdown = SHUTDOWN_MASK; 1489 sk->sk_state = SCTP_SS_CLOSING; 1490 1491 ep = sctp_sk(sk)->ep; 1492 1493 /* Clean up any skbs sitting on the receive queue. */ 1494 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1495 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1496 1497 /* Walk all associations on an endpoint. */ 1498 list_for_each_safe(pos, temp, &ep->asocs) { 1499 asoc = list_entry(pos, struct sctp_association, asocs); 1500 1501 if (sctp_style(sk, TCP)) { 1502 /* A closed association can still be in the list if 1503 * it belongs to a TCP-style listening socket that is 1504 * not yet accepted. If so, free it. If not, send an 1505 * ABORT or SHUTDOWN based on the linger options. 1506 */ 1507 if (sctp_state(asoc, CLOSED)) { 1508 sctp_unhash_established(asoc); 1509 sctp_association_free(asoc); 1510 continue; 1511 } 1512 } 1513 1514 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1515 !skb_queue_empty(&asoc->ulpq.reasm) || 1516 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1517 struct sctp_chunk *chunk; 1518 1519 chunk = sctp_make_abort_user(asoc, NULL, 0); 1520 if (chunk) 1521 sctp_primitive_ABORT(net, asoc, chunk); 1522 } else 1523 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1524 } 1525 1526 /* On a TCP-style socket, block for at most linger_time if set. */ 1527 if (sctp_style(sk, TCP) && timeout) 1528 sctp_wait_for_close(sk, timeout); 1529 1530 /* This will run the backlog queue. */ 1531 release_sock(sk); 1532 1533 /* Supposedly, no process has access to the socket, but 1534 * the net layers still may. 1535 */ 1536 local_bh_disable(); 1537 bh_lock_sock(sk); 1538 1539 /* Hold the sock, since sk_common_release() will put sock_put() 1540 * and we have just a little more cleanup. 1541 */ 1542 sock_hold(sk); 1543 sk_common_release(sk); 1544 1545 bh_unlock_sock(sk); 1546 local_bh_enable(); 1547 1548 sock_put(sk); 1549 1550 SCTP_DBG_OBJCNT_DEC(sock); 1551 } 1552 1553 /* Handle EPIPE error. */ 1554 static int sctp_error(struct sock *sk, int flags, int err) 1555 { 1556 if (err == -EPIPE) 1557 err = sock_error(sk) ? : -EPIPE; 1558 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1559 send_sig(SIGPIPE, current, 0); 1560 return err; 1561 } 1562 1563 /* API 3.1.3 sendmsg() - UDP Style Syntax 1564 * 1565 * An application uses sendmsg() and recvmsg() calls to transmit data to 1566 * and receive data from its peer. 1567 * 1568 * ssize_t sendmsg(int socket, const struct msghdr *message, 1569 * int flags); 1570 * 1571 * socket - the socket descriptor of the endpoint. 1572 * message - pointer to the msghdr structure which contains a single 1573 * user message and possibly some ancillary data. 1574 * 1575 * See Section 5 for complete description of the data 1576 * structures. 1577 * 1578 * flags - flags sent or received with the user message, see Section 1579 * 5 for complete description of the flags. 1580 * 1581 * Note: This function could use a rewrite especially when explicit 1582 * connect support comes in. 1583 */ 1584 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1585 1586 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); 1587 1588 static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, 1589 struct msghdr *msg, size_t msg_len) 1590 { 1591 struct net *net = sock_net(sk); 1592 struct sctp_sock *sp; 1593 struct sctp_endpoint *ep; 1594 struct sctp_association *new_asoc = NULL, *asoc = NULL; 1595 struct sctp_transport *transport, *chunk_tp; 1596 struct sctp_chunk *chunk; 1597 union sctp_addr to; 1598 struct sockaddr *msg_name = NULL; 1599 struct sctp_sndrcvinfo default_sinfo; 1600 struct sctp_sndrcvinfo *sinfo; 1601 struct sctp_initmsg *sinit; 1602 sctp_assoc_t associd = 0; 1603 sctp_cmsgs_t cmsgs = { NULL }; 1604 int err; 1605 sctp_scope_t scope; 1606 long timeo; 1607 __u16 sinfo_flags = 0; 1608 struct sctp_datamsg *datamsg; 1609 int msg_flags = msg->msg_flags; 1610 1611 err = 0; 1612 sp = sctp_sk(sk); 1613 ep = sp->ep; 1614 1615 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1616 msg, msg_len, ep); 1617 1618 /* We cannot send a message over a TCP-style listening socket. */ 1619 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1620 err = -EPIPE; 1621 goto out_nounlock; 1622 } 1623 1624 /* Parse out the SCTP CMSGs. */ 1625 err = sctp_msghdr_parse(msg, &cmsgs); 1626 if (err) { 1627 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1628 goto out_nounlock; 1629 } 1630 1631 /* Fetch the destination address for this packet. This 1632 * address only selects the association--it is not necessarily 1633 * the address we will send to. 1634 * For a peeled-off socket, msg_name is ignored. 1635 */ 1636 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1637 int msg_namelen = msg->msg_namelen; 1638 1639 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1640 msg_namelen); 1641 if (err) 1642 return err; 1643 1644 if (msg_namelen > sizeof(to)) 1645 msg_namelen = sizeof(to); 1646 memcpy(&to, msg->msg_name, msg_namelen); 1647 msg_name = msg->msg_name; 1648 } 1649 1650 sinfo = cmsgs.info; 1651 sinit = cmsgs.init; 1652 1653 /* Did the user specify SNDRCVINFO? */ 1654 if (sinfo) { 1655 sinfo_flags = sinfo->sinfo_flags; 1656 associd = sinfo->sinfo_assoc_id; 1657 } 1658 1659 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1660 msg_len, sinfo_flags); 1661 1662 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1663 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1664 err = -EINVAL; 1665 goto out_nounlock; 1666 } 1667 1668 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1669 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1670 * If SCTP_ABORT is set, the message length could be non zero with 1671 * the msg_iov set to the user abort reason. 1672 */ 1673 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1674 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1675 err = -EINVAL; 1676 goto out_nounlock; 1677 } 1678 1679 /* If SCTP_ADDR_OVER is set, there must be an address 1680 * specified in msg_name. 1681 */ 1682 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1683 err = -EINVAL; 1684 goto out_nounlock; 1685 } 1686 1687 transport = NULL; 1688 1689 pr_debug("%s: about to look up association\n", __func__); 1690 1691 lock_sock(sk); 1692 1693 /* If a msg_name has been specified, assume this is to be used. */ 1694 if (msg_name) { 1695 /* Look for a matching association on the endpoint. */ 1696 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1697 if (!asoc) { 1698 /* If we could not find a matching association on the 1699 * endpoint, make sure that it is not a TCP-style 1700 * socket that already has an association or there is 1701 * no peeled-off association on another socket. 1702 */ 1703 if ((sctp_style(sk, TCP) && 1704 sctp_sstate(sk, ESTABLISHED)) || 1705 sctp_endpoint_is_peeled_off(ep, &to)) { 1706 err = -EADDRNOTAVAIL; 1707 goto out_unlock; 1708 } 1709 } 1710 } else { 1711 asoc = sctp_id2assoc(sk, associd); 1712 if (!asoc) { 1713 err = -EPIPE; 1714 goto out_unlock; 1715 } 1716 } 1717 1718 if (asoc) { 1719 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1720 1721 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1722 * socket that has an association in CLOSED state. This can 1723 * happen when an accepted socket has an association that is 1724 * already CLOSED. 1725 */ 1726 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1727 err = -EPIPE; 1728 goto out_unlock; 1729 } 1730 1731 if (sinfo_flags & SCTP_EOF) { 1732 pr_debug("%s: shutting down association:%p\n", 1733 __func__, asoc); 1734 1735 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1736 err = 0; 1737 goto out_unlock; 1738 } 1739 if (sinfo_flags & SCTP_ABORT) { 1740 1741 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1742 if (!chunk) { 1743 err = -ENOMEM; 1744 goto out_unlock; 1745 } 1746 1747 pr_debug("%s: aborting association:%p\n", 1748 __func__, asoc); 1749 1750 sctp_primitive_ABORT(net, asoc, chunk); 1751 err = 0; 1752 goto out_unlock; 1753 } 1754 } 1755 1756 /* Do we need to create the association? */ 1757 if (!asoc) { 1758 pr_debug("%s: there is no association yet\n", __func__); 1759 1760 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1761 err = -EINVAL; 1762 goto out_unlock; 1763 } 1764 1765 /* Check for invalid stream against the stream counts, 1766 * either the default or the user specified stream counts. 1767 */ 1768 if (sinfo) { 1769 if (!sinit || !sinit->sinit_num_ostreams) { 1770 /* Check against the defaults. */ 1771 if (sinfo->sinfo_stream >= 1772 sp->initmsg.sinit_num_ostreams) { 1773 err = -EINVAL; 1774 goto out_unlock; 1775 } 1776 } else { 1777 /* Check against the requested. */ 1778 if (sinfo->sinfo_stream >= 1779 sinit->sinit_num_ostreams) { 1780 err = -EINVAL; 1781 goto out_unlock; 1782 } 1783 } 1784 } 1785 1786 /* 1787 * API 3.1.2 bind() - UDP Style Syntax 1788 * If a bind() or sctp_bindx() is not called prior to a 1789 * sendmsg() call that initiates a new association, the 1790 * system picks an ephemeral port and will choose an address 1791 * set equivalent to binding with a wildcard address. 1792 */ 1793 if (!ep->base.bind_addr.port) { 1794 if (sctp_autobind(sk)) { 1795 err = -EAGAIN; 1796 goto out_unlock; 1797 } 1798 } else { 1799 /* 1800 * If an unprivileged user inherits a one-to-many 1801 * style socket with open associations on a privileged 1802 * port, it MAY be permitted to accept new associations, 1803 * but it SHOULD NOT be permitted to open new 1804 * associations. 1805 */ 1806 if (ep->base.bind_addr.port < PROT_SOCK && 1807 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1808 err = -EACCES; 1809 goto out_unlock; 1810 } 1811 } 1812 1813 scope = sctp_scope(&to); 1814 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1815 if (!new_asoc) { 1816 err = -ENOMEM; 1817 goto out_unlock; 1818 } 1819 asoc = new_asoc; 1820 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1821 if (err < 0) { 1822 err = -ENOMEM; 1823 goto out_free; 1824 } 1825 1826 /* If the SCTP_INIT ancillary data is specified, set all 1827 * the association init values accordingly. 1828 */ 1829 if (sinit) { 1830 if (sinit->sinit_num_ostreams) { 1831 asoc->c.sinit_num_ostreams = 1832 sinit->sinit_num_ostreams; 1833 } 1834 if (sinit->sinit_max_instreams) { 1835 asoc->c.sinit_max_instreams = 1836 sinit->sinit_max_instreams; 1837 } 1838 if (sinit->sinit_max_attempts) { 1839 asoc->max_init_attempts 1840 = sinit->sinit_max_attempts; 1841 } 1842 if (sinit->sinit_max_init_timeo) { 1843 asoc->max_init_timeo = 1844 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1845 } 1846 } 1847 1848 /* Prime the peer's transport structures. */ 1849 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1850 if (!transport) { 1851 err = -ENOMEM; 1852 goto out_free; 1853 } 1854 } 1855 1856 /* ASSERT: we have a valid association at this point. */ 1857 pr_debug("%s: we have a valid association\n", __func__); 1858 1859 if (!sinfo) { 1860 /* If the user didn't specify SNDRCVINFO, make up one with 1861 * some defaults. 1862 */ 1863 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1864 default_sinfo.sinfo_stream = asoc->default_stream; 1865 default_sinfo.sinfo_flags = asoc->default_flags; 1866 default_sinfo.sinfo_ppid = asoc->default_ppid; 1867 default_sinfo.sinfo_context = asoc->default_context; 1868 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1869 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1870 sinfo = &default_sinfo; 1871 } 1872 1873 /* API 7.1.7, the sndbuf size per association bounds the 1874 * maximum size of data that can be sent in a single send call. 1875 */ 1876 if (msg_len > sk->sk_sndbuf) { 1877 err = -EMSGSIZE; 1878 goto out_free; 1879 } 1880 1881 if (asoc->pmtu_pending) 1882 sctp_assoc_pending_pmtu(sk, asoc); 1883 1884 /* If fragmentation is disabled and the message length exceeds the 1885 * association fragmentation point, return EMSGSIZE. The I-D 1886 * does not specify what this error is, but this looks like 1887 * a great fit. 1888 */ 1889 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1890 err = -EMSGSIZE; 1891 goto out_free; 1892 } 1893 1894 /* Check for invalid stream. */ 1895 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1896 err = -EINVAL; 1897 goto out_free; 1898 } 1899 1900 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1901 if (!sctp_wspace(asoc)) { 1902 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); 1903 if (err) 1904 goto out_free; 1905 } 1906 1907 /* If an address is passed with the sendto/sendmsg call, it is used 1908 * to override the primary destination address in the TCP model, or 1909 * when SCTP_ADDR_OVER flag is set in the UDP model. 1910 */ 1911 if ((sctp_style(sk, TCP) && msg_name) || 1912 (sinfo_flags & SCTP_ADDR_OVER)) { 1913 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 1914 if (!chunk_tp) { 1915 err = -EINVAL; 1916 goto out_free; 1917 } 1918 } else 1919 chunk_tp = NULL; 1920 1921 /* Auto-connect, if we aren't connected already. */ 1922 if (sctp_state(asoc, CLOSED)) { 1923 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1924 if (err < 0) 1925 goto out_free; 1926 1927 pr_debug("%s: we associated primitively\n", __func__); 1928 } 1929 1930 /* Break the message into multiple chunks of maximum size. */ 1931 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); 1932 if (IS_ERR(datamsg)) { 1933 err = PTR_ERR(datamsg); 1934 goto out_free; 1935 } 1936 1937 /* Now send the (possibly) fragmented message. */ 1938 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1939 sctp_chunk_hold(chunk); 1940 1941 /* Do accounting for the write space. */ 1942 sctp_set_owner_w(chunk); 1943 1944 chunk->transport = chunk_tp; 1945 } 1946 1947 /* Send it to the lower layers. Note: all chunks 1948 * must either fail or succeed. The lower layer 1949 * works that way today. Keep it that way or this 1950 * breaks. 1951 */ 1952 err = sctp_primitive_SEND(net, asoc, datamsg); 1953 /* Did the lower layer accept the chunk? */ 1954 if (err) { 1955 sctp_datamsg_free(datamsg); 1956 goto out_free; 1957 } 1958 1959 pr_debug("%s: we sent primitively\n", __func__); 1960 1961 sctp_datamsg_put(datamsg); 1962 err = msg_len; 1963 1964 /* If we are already past ASSOCIATE, the lower 1965 * layers are responsible for association cleanup. 1966 */ 1967 goto out_unlock; 1968 1969 out_free: 1970 if (new_asoc) { 1971 sctp_unhash_established(asoc); 1972 sctp_association_free(asoc); 1973 } 1974 out_unlock: 1975 release_sock(sk); 1976 1977 out_nounlock: 1978 return sctp_error(sk, msg_flags, err); 1979 1980 #if 0 1981 do_sock_err: 1982 if (msg_len) 1983 err = msg_len; 1984 else 1985 err = sock_error(sk); 1986 goto out; 1987 1988 do_interrupted: 1989 if (msg_len) 1990 err = msg_len; 1991 goto out; 1992 #endif /* 0 */ 1993 } 1994 1995 /* This is an extended version of skb_pull() that removes the data from the 1996 * start of a skb even when data is spread across the list of skb's in the 1997 * frag_list. len specifies the total amount of data that needs to be removed. 1998 * when 'len' bytes could be removed from the skb, it returns 0. 1999 * If 'len' exceeds the total skb length, it returns the no. of bytes that 2000 * could not be removed. 2001 */ 2002 static int sctp_skb_pull(struct sk_buff *skb, int len) 2003 { 2004 struct sk_buff *list; 2005 int skb_len = skb_headlen(skb); 2006 int rlen; 2007 2008 if (len <= skb_len) { 2009 __skb_pull(skb, len); 2010 return 0; 2011 } 2012 len -= skb_len; 2013 __skb_pull(skb, skb_len); 2014 2015 skb_walk_frags(skb, list) { 2016 rlen = sctp_skb_pull(list, len); 2017 skb->len -= (len-rlen); 2018 skb->data_len -= (len-rlen); 2019 2020 if (!rlen) 2021 return 0; 2022 2023 len = rlen; 2024 } 2025 2026 return len; 2027 } 2028 2029 /* API 3.1.3 recvmsg() - UDP Style Syntax 2030 * 2031 * ssize_t recvmsg(int socket, struct msghdr *message, 2032 * int flags); 2033 * 2034 * socket - the socket descriptor of the endpoint. 2035 * message - pointer to the msghdr structure which contains a single 2036 * user message and possibly some ancillary data. 2037 * 2038 * See Section 5 for complete description of the data 2039 * structures. 2040 * 2041 * flags - flags sent or received with the user message, see Section 2042 * 5 for complete description of the flags. 2043 */ 2044 static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); 2045 2046 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, 2047 struct msghdr *msg, size_t len, int noblock, 2048 int flags, int *addr_len) 2049 { 2050 struct sctp_ulpevent *event = NULL; 2051 struct sctp_sock *sp = sctp_sk(sk); 2052 struct sk_buff *skb; 2053 int copied; 2054 int err = 0; 2055 int skb_len; 2056 2057 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2058 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2059 addr_len); 2060 2061 lock_sock(sk); 2062 2063 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { 2064 err = -ENOTCONN; 2065 goto out; 2066 } 2067 2068 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2069 if (!skb) 2070 goto out; 2071 2072 /* Get the total length of the skb including any skb's in the 2073 * frag_list. 2074 */ 2075 skb_len = skb->len; 2076 2077 copied = skb_len; 2078 if (copied > len) 2079 copied = len; 2080 2081 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 2082 2083 event = sctp_skb2event(skb); 2084 2085 if (err) 2086 goto out_free; 2087 2088 sock_recv_ts_and_drops(msg, sk, skb); 2089 if (sctp_ulpevent_is_notification(event)) { 2090 msg->msg_flags |= MSG_NOTIFICATION; 2091 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2092 } else { 2093 sp->pf->skb_msgname(skb, msg->msg_name, addr_len); 2094 } 2095 2096 /* Check if we allow SCTP_SNDRCVINFO. */ 2097 if (sp->subscribe.sctp_data_io_event) 2098 sctp_ulpevent_read_sndrcvinfo(event, msg); 2099 #if 0 2100 /* FIXME: we should be calling IP/IPv6 layers. */ 2101 if (sk->sk_protinfo.af_inet.cmsg_flags) 2102 ip_cmsg_recv(msg, skb); 2103 #endif 2104 2105 err = copied; 2106 2107 /* If skb's length exceeds the user's buffer, update the skb and 2108 * push it back to the receive_queue so that the next call to 2109 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2110 */ 2111 if (skb_len > copied) { 2112 msg->msg_flags &= ~MSG_EOR; 2113 if (flags & MSG_PEEK) 2114 goto out_free; 2115 sctp_skb_pull(skb, copied); 2116 skb_queue_head(&sk->sk_receive_queue, skb); 2117 2118 /* When only partial message is copied to the user, increase 2119 * rwnd by that amount. If all the data in the skb is read, 2120 * rwnd is updated when the event is freed. 2121 */ 2122 if (!sctp_ulpevent_is_notification(event)) 2123 sctp_assoc_rwnd_increase(event->asoc, copied); 2124 goto out; 2125 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2126 (event->msg_flags & MSG_EOR)) 2127 msg->msg_flags |= MSG_EOR; 2128 else 2129 msg->msg_flags &= ~MSG_EOR; 2130 2131 out_free: 2132 if (flags & MSG_PEEK) { 2133 /* Release the skb reference acquired after peeking the skb in 2134 * sctp_skb_recv_datagram(). 2135 */ 2136 kfree_skb(skb); 2137 } else { 2138 /* Free the event which includes releasing the reference to 2139 * the owner of the skb, freeing the skb and updating the 2140 * rwnd. 2141 */ 2142 sctp_ulpevent_free(event); 2143 } 2144 out: 2145 release_sock(sk); 2146 return err; 2147 } 2148 2149 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2150 * 2151 * This option is a on/off flag. If enabled no SCTP message 2152 * fragmentation will be performed. Instead if a message being sent 2153 * exceeds the current PMTU size, the message will NOT be sent and 2154 * instead a error will be indicated to the user. 2155 */ 2156 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2157 char __user *optval, 2158 unsigned int optlen) 2159 { 2160 int val; 2161 2162 if (optlen < sizeof(int)) 2163 return -EINVAL; 2164 2165 if (get_user(val, (int __user *)optval)) 2166 return -EFAULT; 2167 2168 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2169 2170 return 0; 2171 } 2172 2173 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2174 unsigned int optlen) 2175 { 2176 struct sctp_association *asoc; 2177 struct sctp_ulpevent *event; 2178 2179 if (optlen > sizeof(struct sctp_event_subscribe)) 2180 return -EINVAL; 2181 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2182 return -EFAULT; 2183 2184 /* 2185 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2186 * if there is no data to be sent or retransmit, the stack will 2187 * immediately send up this notification. 2188 */ 2189 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2190 &sctp_sk(sk)->subscribe)) { 2191 asoc = sctp_id2assoc(sk, 0); 2192 2193 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2194 event = sctp_ulpevent_make_sender_dry_event(asoc, 2195 GFP_ATOMIC); 2196 if (!event) 2197 return -ENOMEM; 2198 2199 sctp_ulpq_tail_event(&asoc->ulpq, event); 2200 } 2201 } 2202 2203 return 0; 2204 } 2205 2206 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2207 * 2208 * This socket option is applicable to the UDP-style socket only. When 2209 * set it will cause associations that are idle for more than the 2210 * specified number of seconds to automatically close. An association 2211 * being idle is defined an association that has NOT sent or received 2212 * user data. The special value of '0' indicates that no automatic 2213 * close of any associations should be performed. The option expects an 2214 * integer defining the number of seconds of idle time before an 2215 * association is closed. 2216 */ 2217 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2218 unsigned int optlen) 2219 { 2220 struct sctp_sock *sp = sctp_sk(sk); 2221 struct net *net = sock_net(sk); 2222 2223 /* Applicable to UDP-style socket only */ 2224 if (sctp_style(sk, TCP)) 2225 return -EOPNOTSUPP; 2226 if (optlen != sizeof(int)) 2227 return -EINVAL; 2228 if (copy_from_user(&sp->autoclose, optval, optlen)) 2229 return -EFAULT; 2230 2231 if (sp->autoclose > net->sctp.max_autoclose) 2232 sp->autoclose = net->sctp.max_autoclose; 2233 2234 return 0; 2235 } 2236 2237 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2238 * 2239 * Applications can enable or disable heartbeats for any peer address of 2240 * an association, modify an address's heartbeat interval, force a 2241 * heartbeat to be sent immediately, and adjust the address's maximum 2242 * number of retransmissions sent before an address is considered 2243 * unreachable. The following structure is used to access and modify an 2244 * address's parameters: 2245 * 2246 * struct sctp_paddrparams { 2247 * sctp_assoc_t spp_assoc_id; 2248 * struct sockaddr_storage spp_address; 2249 * uint32_t spp_hbinterval; 2250 * uint16_t spp_pathmaxrxt; 2251 * uint32_t spp_pathmtu; 2252 * uint32_t spp_sackdelay; 2253 * uint32_t spp_flags; 2254 * }; 2255 * 2256 * spp_assoc_id - (one-to-many style socket) This is filled in the 2257 * application, and identifies the association for 2258 * this query. 2259 * spp_address - This specifies which address is of interest. 2260 * spp_hbinterval - This contains the value of the heartbeat interval, 2261 * in milliseconds. If a value of zero 2262 * is present in this field then no changes are to 2263 * be made to this parameter. 2264 * spp_pathmaxrxt - This contains the maximum number of 2265 * retransmissions before this address shall be 2266 * considered unreachable. If a value of zero 2267 * is present in this field then no changes are to 2268 * be made to this parameter. 2269 * spp_pathmtu - When Path MTU discovery is disabled the value 2270 * specified here will be the "fixed" path mtu. 2271 * Note that if the spp_address field is empty 2272 * then all associations on this address will 2273 * have this fixed path mtu set upon them. 2274 * 2275 * spp_sackdelay - When delayed sack is enabled, this value specifies 2276 * the number of milliseconds that sacks will be delayed 2277 * for. This value will apply to all addresses of an 2278 * association if the spp_address field is empty. Note 2279 * also, that if delayed sack is enabled and this 2280 * value is set to 0, no change is made to the last 2281 * recorded delayed sack timer value. 2282 * 2283 * spp_flags - These flags are used to control various features 2284 * on an association. The flag field may contain 2285 * zero or more of the following options. 2286 * 2287 * SPP_HB_ENABLE - Enable heartbeats on the 2288 * specified address. Note that if the address 2289 * field is empty all addresses for the association 2290 * have heartbeats enabled upon them. 2291 * 2292 * SPP_HB_DISABLE - Disable heartbeats on the 2293 * speicifed address. Note that if the address 2294 * field is empty all addresses for the association 2295 * will have their heartbeats disabled. Note also 2296 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2297 * mutually exclusive, only one of these two should 2298 * be specified. Enabling both fields will have 2299 * undetermined results. 2300 * 2301 * SPP_HB_DEMAND - Request a user initiated heartbeat 2302 * to be made immediately. 2303 * 2304 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2305 * heartbeat delayis to be set to the value of 0 2306 * milliseconds. 2307 * 2308 * SPP_PMTUD_ENABLE - This field will enable PMTU 2309 * discovery upon the specified address. Note that 2310 * if the address feild is empty then all addresses 2311 * on the association are effected. 2312 * 2313 * SPP_PMTUD_DISABLE - This field will disable PMTU 2314 * discovery upon the specified address. Note that 2315 * if the address feild is empty then all addresses 2316 * on the association are effected. Not also that 2317 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2318 * exclusive. Enabling both will have undetermined 2319 * results. 2320 * 2321 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2322 * on delayed sack. The time specified in spp_sackdelay 2323 * is used to specify the sack delay for this address. Note 2324 * that if spp_address is empty then all addresses will 2325 * enable delayed sack and take on the sack delay 2326 * value specified in spp_sackdelay. 2327 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2328 * off delayed sack. If the spp_address field is blank then 2329 * delayed sack is disabled for the entire association. Note 2330 * also that this field is mutually exclusive to 2331 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2332 * results. 2333 */ 2334 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2335 struct sctp_transport *trans, 2336 struct sctp_association *asoc, 2337 struct sctp_sock *sp, 2338 int hb_change, 2339 int pmtud_change, 2340 int sackdelay_change) 2341 { 2342 int error; 2343 2344 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2345 struct net *net = sock_net(trans->asoc->base.sk); 2346 2347 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2348 if (error) 2349 return error; 2350 } 2351 2352 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2353 * this field is ignored. Note also that a value of zero indicates 2354 * the current setting should be left unchanged. 2355 */ 2356 if (params->spp_flags & SPP_HB_ENABLE) { 2357 2358 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2359 * set. This lets us use 0 value when this flag 2360 * is set. 2361 */ 2362 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2363 params->spp_hbinterval = 0; 2364 2365 if (params->spp_hbinterval || 2366 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2367 if (trans) { 2368 trans->hbinterval = 2369 msecs_to_jiffies(params->spp_hbinterval); 2370 } else if (asoc) { 2371 asoc->hbinterval = 2372 msecs_to_jiffies(params->spp_hbinterval); 2373 } else { 2374 sp->hbinterval = params->spp_hbinterval; 2375 } 2376 } 2377 } 2378 2379 if (hb_change) { 2380 if (trans) { 2381 trans->param_flags = 2382 (trans->param_flags & ~SPP_HB) | hb_change; 2383 } else if (asoc) { 2384 asoc->param_flags = 2385 (asoc->param_flags & ~SPP_HB) | hb_change; 2386 } else { 2387 sp->param_flags = 2388 (sp->param_flags & ~SPP_HB) | hb_change; 2389 } 2390 } 2391 2392 /* When Path MTU discovery is disabled the value specified here will 2393 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2394 * include the flag SPP_PMTUD_DISABLE for this field to have any 2395 * effect). 2396 */ 2397 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2398 if (trans) { 2399 trans->pathmtu = params->spp_pathmtu; 2400 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2401 } else if (asoc) { 2402 asoc->pathmtu = params->spp_pathmtu; 2403 sctp_frag_point(asoc, params->spp_pathmtu); 2404 } else { 2405 sp->pathmtu = params->spp_pathmtu; 2406 } 2407 } 2408 2409 if (pmtud_change) { 2410 if (trans) { 2411 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2412 (params->spp_flags & SPP_PMTUD_ENABLE); 2413 trans->param_flags = 2414 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2415 if (update) { 2416 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2417 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2418 } 2419 } else if (asoc) { 2420 asoc->param_flags = 2421 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2422 } else { 2423 sp->param_flags = 2424 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2425 } 2426 } 2427 2428 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2429 * value of this field is ignored. Note also that a value of zero 2430 * indicates the current setting should be left unchanged. 2431 */ 2432 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2433 if (trans) { 2434 trans->sackdelay = 2435 msecs_to_jiffies(params->spp_sackdelay); 2436 } else if (asoc) { 2437 asoc->sackdelay = 2438 msecs_to_jiffies(params->spp_sackdelay); 2439 } else { 2440 sp->sackdelay = params->spp_sackdelay; 2441 } 2442 } 2443 2444 if (sackdelay_change) { 2445 if (trans) { 2446 trans->param_flags = 2447 (trans->param_flags & ~SPP_SACKDELAY) | 2448 sackdelay_change; 2449 } else if (asoc) { 2450 asoc->param_flags = 2451 (asoc->param_flags & ~SPP_SACKDELAY) | 2452 sackdelay_change; 2453 } else { 2454 sp->param_flags = 2455 (sp->param_flags & ~SPP_SACKDELAY) | 2456 sackdelay_change; 2457 } 2458 } 2459 2460 /* Note that a value of zero indicates the current setting should be 2461 left unchanged. 2462 */ 2463 if (params->spp_pathmaxrxt) { 2464 if (trans) { 2465 trans->pathmaxrxt = params->spp_pathmaxrxt; 2466 } else if (asoc) { 2467 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2468 } else { 2469 sp->pathmaxrxt = params->spp_pathmaxrxt; 2470 } 2471 } 2472 2473 return 0; 2474 } 2475 2476 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2477 char __user *optval, 2478 unsigned int optlen) 2479 { 2480 struct sctp_paddrparams params; 2481 struct sctp_transport *trans = NULL; 2482 struct sctp_association *asoc = NULL; 2483 struct sctp_sock *sp = sctp_sk(sk); 2484 int error; 2485 int hb_change, pmtud_change, sackdelay_change; 2486 2487 if (optlen != sizeof(struct sctp_paddrparams)) 2488 return -EINVAL; 2489 2490 if (copy_from_user(¶ms, optval, optlen)) 2491 return -EFAULT; 2492 2493 /* Validate flags and value parameters. */ 2494 hb_change = params.spp_flags & SPP_HB; 2495 pmtud_change = params.spp_flags & SPP_PMTUD; 2496 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2497 2498 if (hb_change == SPP_HB || 2499 pmtud_change == SPP_PMTUD || 2500 sackdelay_change == SPP_SACKDELAY || 2501 params.spp_sackdelay > 500 || 2502 (params.spp_pathmtu && 2503 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2504 return -EINVAL; 2505 2506 /* If an address other than INADDR_ANY is specified, and 2507 * no transport is found, then the request is invalid. 2508 */ 2509 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 2510 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2511 params.spp_assoc_id); 2512 if (!trans) 2513 return -EINVAL; 2514 } 2515 2516 /* Get association, if assoc_id != 0 and the socket is a one 2517 * to many style socket, and an association was not found, then 2518 * the id was invalid. 2519 */ 2520 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2521 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2522 return -EINVAL; 2523 2524 /* Heartbeat demand can only be sent on a transport or 2525 * association, but not a socket. 2526 */ 2527 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2528 return -EINVAL; 2529 2530 /* Process parameters. */ 2531 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2532 hb_change, pmtud_change, 2533 sackdelay_change); 2534 2535 if (error) 2536 return error; 2537 2538 /* If changes are for association, also apply parameters to each 2539 * transport. 2540 */ 2541 if (!trans && asoc) { 2542 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2543 transports) { 2544 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2545 hb_change, pmtud_change, 2546 sackdelay_change); 2547 } 2548 } 2549 2550 return 0; 2551 } 2552 2553 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) 2554 { 2555 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; 2556 } 2557 2558 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) 2559 { 2560 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; 2561 } 2562 2563 /* 2564 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2565 * 2566 * This option will effect the way delayed acks are performed. This 2567 * option allows you to get or set the delayed ack time, in 2568 * milliseconds. It also allows changing the delayed ack frequency. 2569 * Changing the frequency to 1 disables the delayed sack algorithm. If 2570 * the assoc_id is 0, then this sets or gets the endpoints default 2571 * values. If the assoc_id field is non-zero, then the set or get 2572 * effects the specified association for the one to many model (the 2573 * assoc_id field is ignored by the one to one model). Note that if 2574 * sack_delay or sack_freq are 0 when setting this option, then the 2575 * current values will remain unchanged. 2576 * 2577 * struct sctp_sack_info { 2578 * sctp_assoc_t sack_assoc_id; 2579 * uint32_t sack_delay; 2580 * uint32_t sack_freq; 2581 * }; 2582 * 2583 * sack_assoc_id - This parameter, indicates which association the user 2584 * is performing an action upon. Note that if this field's value is 2585 * zero then the endpoints default value is changed (effecting future 2586 * associations only). 2587 * 2588 * sack_delay - This parameter contains the number of milliseconds that 2589 * the user is requesting the delayed ACK timer be set to. Note that 2590 * this value is defined in the standard to be between 200 and 500 2591 * milliseconds. 2592 * 2593 * sack_freq - This parameter contains the number of packets that must 2594 * be received before a sack is sent without waiting for the delay 2595 * timer to expire. The default value for this is 2, setting this 2596 * value to 1 will disable the delayed sack algorithm. 2597 */ 2598 2599 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2600 char __user *optval, unsigned int optlen) 2601 { 2602 struct sctp_sack_info params; 2603 struct sctp_transport *trans = NULL; 2604 struct sctp_association *asoc = NULL; 2605 struct sctp_sock *sp = sctp_sk(sk); 2606 2607 if (optlen == sizeof(struct sctp_sack_info)) { 2608 if (copy_from_user(¶ms, optval, optlen)) 2609 return -EFAULT; 2610 2611 if (params.sack_delay == 0 && params.sack_freq == 0) 2612 return 0; 2613 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2614 pr_warn_ratelimited(DEPRECATED 2615 "%s (pid %d) " 2616 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 2617 "Use struct sctp_sack_info instead\n", 2618 current->comm, task_pid_nr(current)); 2619 if (copy_from_user(¶ms, optval, optlen)) 2620 return -EFAULT; 2621 2622 if (params.sack_delay == 0) 2623 params.sack_freq = 1; 2624 else 2625 params.sack_freq = 0; 2626 } else 2627 return -EINVAL; 2628 2629 /* Validate value parameter. */ 2630 if (params.sack_delay > 500) 2631 return -EINVAL; 2632 2633 /* Get association, if sack_assoc_id != 0 and the socket is a one 2634 * to many style socket, and an association was not found, then 2635 * the id was invalid. 2636 */ 2637 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2638 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2639 return -EINVAL; 2640 2641 if (params.sack_delay) { 2642 if (asoc) { 2643 asoc->sackdelay = 2644 msecs_to_jiffies(params.sack_delay); 2645 asoc->param_flags = 2646 sctp_spp_sackdelay_enable(asoc->param_flags); 2647 } else { 2648 sp->sackdelay = params.sack_delay; 2649 sp->param_flags = 2650 sctp_spp_sackdelay_enable(sp->param_flags); 2651 } 2652 } 2653 2654 if (params.sack_freq == 1) { 2655 if (asoc) { 2656 asoc->param_flags = 2657 sctp_spp_sackdelay_disable(asoc->param_flags); 2658 } else { 2659 sp->param_flags = 2660 sctp_spp_sackdelay_disable(sp->param_flags); 2661 } 2662 } else if (params.sack_freq > 1) { 2663 if (asoc) { 2664 asoc->sackfreq = params.sack_freq; 2665 asoc->param_flags = 2666 sctp_spp_sackdelay_enable(asoc->param_flags); 2667 } else { 2668 sp->sackfreq = params.sack_freq; 2669 sp->param_flags = 2670 sctp_spp_sackdelay_enable(sp->param_flags); 2671 } 2672 } 2673 2674 /* If change is for association, also apply to each transport. */ 2675 if (asoc) { 2676 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2677 transports) { 2678 if (params.sack_delay) { 2679 trans->sackdelay = 2680 msecs_to_jiffies(params.sack_delay); 2681 trans->param_flags = 2682 sctp_spp_sackdelay_enable(trans->param_flags); 2683 } 2684 if (params.sack_freq == 1) { 2685 trans->param_flags = 2686 sctp_spp_sackdelay_disable(trans->param_flags); 2687 } else if (params.sack_freq > 1) { 2688 trans->sackfreq = params.sack_freq; 2689 trans->param_flags = 2690 sctp_spp_sackdelay_enable(trans->param_flags); 2691 } 2692 } 2693 } 2694 2695 return 0; 2696 } 2697 2698 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2699 * 2700 * Applications can specify protocol parameters for the default association 2701 * initialization. The option name argument to setsockopt() and getsockopt() 2702 * is SCTP_INITMSG. 2703 * 2704 * Setting initialization parameters is effective only on an unconnected 2705 * socket (for UDP-style sockets only future associations are effected 2706 * by the change). With TCP-style sockets, this option is inherited by 2707 * sockets derived from a listener socket. 2708 */ 2709 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2710 { 2711 struct sctp_initmsg sinit; 2712 struct sctp_sock *sp = sctp_sk(sk); 2713 2714 if (optlen != sizeof(struct sctp_initmsg)) 2715 return -EINVAL; 2716 if (copy_from_user(&sinit, optval, optlen)) 2717 return -EFAULT; 2718 2719 if (sinit.sinit_num_ostreams) 2720 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2721 if (sinit.sinit_max_instreams) 2722 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2723 if (sinit.sinit_max_attempts) 2724 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2725 if (sinit.sinit_max_init_timeo) 2726 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2727 2728 return 0; 2729 } 2730 2731 /* 2732 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2733 * 2734 * Applications that wish to use the sendto() system call may wish to 2735 * specify a default set of parameters that would normally be supplied 2736 * through the inclusion of ancillary data. This socket option allows 2737 * such an application to set the default sctp_sndrcvinfo structure. 2738 * The application that wishes to use this socket option simply passes 2739 * in to this call the sctp_sndrcvinfo structure defined in Section 2740 * 5.2.2) The input parameters accepted by this call include 2741 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2742 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2743 * to this call if the caller is using the UDP model. 2744 */ 2745 static int sctp_setsockopt_default_send_param(struct sock *sk, 2746 char __user *optval, 2747 unsigned int optlen) 2748 { 2749 struct sctp_sndrcvinfo info; 2750 struct sctp_association *asoc; 2751 struct sctp_sock *sp = sctp_sk(sk); 2752 2753 if (optlen != sizeof(struct sctp_sndrcvinfo)) 2754 return -EINVAL; 2755 if (copy_from_user(&info, optval, optlen)) 2756 return -EFAULT; 2757 2758 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2759 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2760 return -EINVAL; 2761 2762 if (asoc) { 2763 asoc->default_stream = info.sinfo_stream; 2764 asoc->default_flags = info.sinfo_flags; 2765 asoc->default_ppid = info.sinfo_ppid; 2766 asoc->default_context = info.sinfo_context; 2767 asoc->default_timetolive = info.sinfo_timetolive; 2768 } else { 2769 sp->default_stream = info.sinfo_stream; 2770 sp->default_flags = info.sinfo_flags; 2771 sp->default_ppid = info.sinfo_ppid; 2772 sp->default_context = info.sinfo_context; 2773 sp->default_timetolive = info.sinfo_timetolive; 2774 } 2775 2776 return 0; 2777 } 2778 2779 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2780 * 2781 * Requests that the local SCTP stack use the enclosed peer address as 2782 * the association primary. The enclosed address must be one of the 2783 * association peer's addresses. 2784 */ 2785 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2786 unsigned int optlen) 2787 { 2788 struct sctp_prim prim; 2789 struct sctp_transport *trans; 2790 2791 if (optlen != sizeof(struct sctp_prim)) 2792 return -EINVAL; 2793 2794 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2795 return -EFAULT; 2796 2797 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2798 if (!trans) 2799 return -EINVAL; 2800 2801 sctp_assoc_set_primary(trans->asoc, trans); 2802 2803 return 0; 2804 } 2805 2806 /* 2807 * 7.1.5 SCTP_NODELAY 2808 * 2809 * Turn on/off any Nagle-like algorithm. This means that packets are 2810 * generally sent as soon as possible and no unnecessary delays are 2811 * introduced, at the cost of more packets in the network. Expects an 2812 * integer boolean flag. 2813 */ 2814 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2815 unsigned int optlen) 2816 { 2817 int val; 2818 2819 if (optlen < sizeof(int)) 2820 return -EINVAL; 2821 if (get_user(val, (int __user *)optval)) 2822 return -EFAULT; 2823 2824 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2825 return 0; 2826 } 2827 2828 /* 2829 * 2830 * 7.1.1 SCTP_RTOINFO 2831 * 2832 * The protocol parameters used to initialize and bound retransmission 2833 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2834 * and modify these parameters. 2835 * All parameters are time values, in milliseconds. A value of 0, when 2836 * modifying the parameters, indicates that the current value should not 2837 * be changed. 2838 * 2839 */ 2840 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2841 { 2842 struct sctp_rtoinfo rtoinfo; 2843 struct sctp_association *asoc; 2844 unsigned long rto_min, rto_max; 2845 struct sctp_sock *sp = sctp_sk(sk); 2846 2847 if (optlen != sizeof (struct sctp_rtoinfo)) 2848 return -EINVAL; 2849 2850 if (copy_from_user(&rtoinfo, optval, optlen)) 2851 return -EFAULT; 2852 2853 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 2854 2855 /* Set the values to the specific association */ 2856 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 2857 return -EINVAL; 2858 2859 rto_max = rtoinfo.srto_max; 2860 rto_min = rtoinfo.srto_min; 2861 2862 if (rto_max) 2863 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 2864 else 2865 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 2866 2867 if (rto_min) 2868 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 2869 else 2870 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 2871 2872 if (rto_min > rto_max) 2873 return -EINVAL; 2874 2875 if (asoc) { 2876 if (rtoinfo.srto_initial != 0) 2877 asoc->rto_initial = 2878 msecs_to_jiffies(rtoinfo.srto_initial); 2879 asoc->rto_max = rto_max; 2880 asoc->rto_min = rto_min; 2881 } else { 2882 /* If there is no association or the association-id = 0 2883 * set the values to the endpoint. 2884 */ 2885 if (rtoinfo.srto_initial != 0) 2886 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 2887 sp->rtoinfo.srto_max = rto_max; 2888 sp->rtoinfo.srto_min = rto_min; 2889 } 2890 2891 return 0; 2892 } 2893 2894 /* 2895 * 2896 * 7.1.2 SCTP_ASSOCINFO 2897 * 2898 * This option is used to tune the maximum retransmission attempts 2899 * of the association. 2900 * Returns an error if the new association retransmission value is 2901 * greater than the sum of the retransmission value of the peer. 2902 * See [SCTP] for more information. 2903 * 2904 */ 2905 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 2906 { 2907 2908 struct sctp_assocparams assocparams; 2909 struct sctp_association *asoc; 2910 2911 if (optlen != sizeof(struct sctp_assocparams)) 2912 return -EINVAL; 2913 if (copy_from_user(&assocparams, optval, optlen)) 2914 return -EFAULT; 2915 2916 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 2917 2918 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 2919 return -EINVAL; 2920 2921 /* Set the values to the specific association */ 2922 if (asoc) { 2923 if (assocparams.sasoc_asocmaxrxt != 0) { 2924 __u32 path_sum = 0; 2925 int paths = 0; 2926 struct sctp_transport *peer_addr; 2927 2928 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 2929 transports) { 2930 path_sum += peer_addr->pathmaxrxt; 2931 paths++; 2932 } 2933 2934 /* Only validate asocmaxrxt if we have more than 2935 * one path/transport. We do this because path 2936 * retransmissions are only counted when we have more 2937 * then one path. 2938 */ 2939 if (paths > 1 && 2940 assocparams.sasoc_asocmaxrxt > path_sum) 2941 return -EINVAL; 2942 2943 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 2944 } 2945 2946 if (assocparams.sasoc_cookie_life != 0) 2947 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 2948 } else { 2949 /* Set the values to the endpoint */ 2950 struct sctp_sock *sp = sctp_sk(sk); 2951 2952 if (assocparams.sasoc_asocmaxrxt != 0) 2953 sp->assocparams.sasoc_asocmaxrxt = 2954 assocparams.sasoc_asocmaxrxt; 2955 if (assocparams.sasoc_cookie_life != 0) 2956 sp->assocparams.sasoc_cookie_life = 2957 assocparams.sasoc_cookie_life; 2958 } 2959 return 0; 2960 } 2961 2962 /* 2963 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 2964 * 2965 * This socket option is a boolean flag which turns on or off mapped V4 2966 * addresses. If this option is turned on and the socket is type 2967 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 2968 * If this option is turned off, then no mapping will be done of V4 2969 * addresses and a user will receive both PF_INET6 and PF_INET type 2970 * addresses on the socket. 2971 */ 2972 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 2973 { 2974 int val; 2975 struct sctp_sock *sp = sctp_sk(sk); 2976 2977 if (optlen < sizeof(int)) 2978 return -EINVAL; 2979 if (get_user(val, (int __user *)optval)) 2980 return -EFAULT; 2981 if (val) 2982 sp->v4mapped = 1; 2983 else 2984 sp->v4mapped = 0; 2985 2986 return 0; 2987 } 2988 2989 /* 2990 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 2991 * This option will get or set the maximum size to put in any outgoing 2992 * SCTP DATA chunk. If a message is larger than this size it will be 2993 * fragmented by SCTP into the specified size. Note that the underlying 2994 * SCTP implementation may fragment into smaller sized chunks when the 2995 * PMTU of the underlying association is smaller than the value set by 2996 * the user. The default value for this option is '0' which indicates 2997 * the user is NOT limiting fragmentation and only the PMTU will effect 2998 * SCTP's choice of DATA chunk size. Note also that values set larger 2999 * than the maximum size of an IP datagram will effectively let SCTP 3000 * control fragmentation (i.e. the same as setting this option to 0). 3001 * 3002 * The following structure is used to access and modify this parameter: 3003 * 3004 * struct sctp_assoc_value { 3005 * sctp_assoc_t assoc_id; 3006 * uint32_t assoc_value; 3007 * }; 3008 * 3009 * assoc_id: This parameter is ignored for one-to-one style sockets. 3010 * For one-to-many style sockets this parameter indicates which 3011 * association the user is performing an action upon. Note that if 3012 * this field's value is zero then the endpoints default value is 3013 * changed (effecting future associations only). 3014 * assoc_value: This parameter specifies the maximum size in bytes. 3015 */ 3016 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3017 { 3018 struct sctp_assoc_value params; 3019 struct sctp_association *asoc; 3020 struct sctp_sock *sp = sctp_sk(sk); 3021 int val; 3022 3023 if (optlen == sizeof(int)) { 3024 pr_warn_ratelimited(DEPRECATED 3025 "%s (pid %d) " 3026 "Use of int in maxseg socket option.\n" 3027 "Use struct sctp_assoc_value instead\n", 3028 current->comm, task_pid_nr(current)); 3029 if (copy_from_user(&val, optval, optlen)) 3030 return -EFAULT; 3031 params.assoc_id = 0; 3032 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3033 if (copy_from_user(¶ms, optval, optlen)) 3034 return -EFAULT; 3035 val = params.assoc_value; 3036 } else 3037 return -EINVAL; 3038 3039 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) 3040 return -EINVAL; 3041 3042 asoc = sctp_id2assoc(sk, params.assoc_id); 3043 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 3044 return -EINVAL; 3045 3046 if (asoc) { 3047 if (val == 0) { 3048 val = asoc->pathmtu; 3049 val -= sp->pf->af->net_header_len; 3050 val -= sizeof(struct sctphdr) + 3051 sizeof(struct sctp_data_chunk); 3052 } 3053 asoc->user_frag = val; 3054 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3055 } else { 3056 sp->user_frag = val; 3057 } 3058 3059 return 0; 3060 } 3061 3062 3063 /* 3064 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3065 * 3066 * Requests that the peer mark the enclosed address as the association 3067 * primary. The enclosed address must be one of the association's 3068 * locally bound addresses. The following structure is used to make a 3069 * set primary request: 3070 */ 3071 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3072 unsigned int optlen) 3073 { 3074 struct net *net = sock_net(sk); 3075 struct sctp_sock *sp; 3076 struct sctp_association *asoc = NULL; 3077 struct sctp_setpeerprim prim; 3078 struct sctp_chunk *chunk; 3079 struct sctp_af *af; 3080 int err; 3081 3082 sp = sctp_sk(sk); 3083 3084 if (!net->sctp.addip_enable) 3085 return -EPERM; 3086 3087 if (optlen != sizeof(struct sctp_setpeerprim)) 3088 return -EINVAL; 3089 3090 if (copy_from_user(&prim, optval, optlen)) 3091 return -EFAULT; 3092 3093 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3094 if (!asoc) 3095 return -EINVAL; 3096 3097 if (!asoc->peer.asconf_capable) 3098 return -EPERM; 3099 3100 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3101 return -EPERM; 3102 3103 if (!sctp_state(asoc, ESTABLISHED)) 3104 return -ENOTCONN; 3105 3106 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3107 if (!af) 3108 return -EINVAL; 3109 3110 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3111 return -EADDRNOTAVAIL; 3112 3113 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3114 return -EADDRNOTAVAIL; 3115 3116 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3117 chunk = sctp_make_asconf_set_prim(asoc, 3118 (union sctp_addr *)&prim.sspp_addr); 3119 if (!chunk) 3120 return -ENOMEM; 3121 3122 err = sctp_send_asconf(asoc, chunk); 3123 3124 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3125 3126 return err; 3127 } 3128 3129 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3130 unsigned int optlen) 3131 { 3132 struct sctp_setadaptation adaptation; 3133 3134 if (optlen != sizeof(struct sctp_setadaptation)) 3135 return -EINVAL; 3136 if (copy_from_user(&adaptation, optval, optlen)) 3137 return -EFAULT; 3138 3139 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3140 3141 return 0; 3142 } 3143 3144 /* 3145 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3146 * 3147 * The context field in the sctp_sndrcvinfo structure is normally only 3148 * used when a failed message is retrieved holding the value that was 3149 * sent down on the actual send call. This option allows the setting of 3150 * a default context on an association basis that will be received on 3151 * reading messages from the peer. This is especially helpful in the 3152 * one-2-many model for an application to keep some reference to an 3153 * internal state machine that is processing messages on the 3154 * association. Note that the setting of this value only effects 3155 * received messages from the peer and does not effect the value that is 3156 * saved with outbound messages. 3157 */ 3158 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3159 unsigned int optlen) 3160 { 3161 struct sctp_assoc_value params; 3162 struct sctp_sock *sp; 3163 struct sctp_association *asoc; 3164 3165 if (optlen != sizeof(struct sctp_assoc_value)) 3166 return -EINVAL; 3167 if (copy_from_user(¶ms, optval, optlen)) 3168 return -EFAULT; 3169 3170 sp = sctp_sk(sk); 3171 3172 if (params.assoc_id != 0) { 3173 asoc = sctp_id2assoc(sk, params.assoc_id); 3174 if (!asoc) 3175 return -EINVAL; 3176 asoc->default_rcv_context = params.assoc_value; 3177 } else { 3178 sp->default_rcv_context = params.assoc_value; 3179 } 3180 3181 return 0; 3182 } 3183 3184 /* 3185 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3186 * 3187 * This options will at a minimum specify if the implementation is doing 3188 * fragmented interleave. Fragmented interleave, for a one to many 3189 * socket, is when subsequent calls to receive a message may return 3190 * parts of messages from different associations. Some implementations 3191 * may allow you to turn this value on or off. If so, when turned off, 3192 * no fragment interleave will occur (which will cause a head of line 3193 * blocking amongst multiple associations sharing the same one to many 3194 * socket). When this option is turned on, then each receive call may 3195 * come from a different association (thus the user must receive data 3196 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3197 * association each receive belongs to. 3198 * 3199 * This option takes a boolean value. A non-zero value indicates that 3200 * fragmented interleave is on. A value of zero indicates that 3201 * fragmented interleave is off. 3202 * 3203 * Note that it is important that an implementation that allows this 3204 * option to be turned on, have it off by default. Otherwise an unaware 3205 * application using the one to many model may become confused and act 3206 * incorrectly. 3207 */ 3208 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3209 char __user *optval, 3210 unsigned int optlen) 3211 { 3212 int val; 3213 3214 if (optlen != sizeof(int)) 3215 return -EINVAL; 3216 if (get_user(val, (int __user *)optval)) 3217 return -EFAULT; 3218 3219 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; 3220 3221 return 0; 3222 } 3223 3224 /* 3225 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3226 * (SCTP_PARTIAL_DELIVERY_POINT) 3227 * 3228 * This option will set or get the SCTP partial delivery point. This 3229 * point is the size of a message where the partial delivery API will be 3230 * invoked to help free up rwnd space for the peer. Setting this to a 3231 * lower value will cause partial deliveries to happen more often. The 3232 * calls argument is an integer that sets or gets the partial delivery 3233 * point. Note also that the call will fail if the user attempts to set 3234 * this value larger than the socket receive buffer size. 3235 * 3236 * Note that any single message having a length smaller than or equal to 3237 * the SCTP partial delivery point will be delivered in one single read 3238 * call as long as the user provided buffer is large enough to hold the 3239 * message. 3240 */ 3241 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3242 char __user *optval, 3243 unsigned int optlen) 3244 { 3245 u32 val; 3246 3247 if (optlen != sizeof(u32)) 3248 return -EINVAL; 3249 if (get_user(val, (int __user *)optval)) 3250 return -EFAULT; 3251 3252 /* Note: We double the receive buffer from what the user sets 3253 * it to be, also initial rwnd is based on rcvbuf/2. 3254 */ 3255 if (val > (sk->sk_rcvbuf >> 1)) 3256 return -EINVAL; 3257 3258 sctp_sk(sk)->pd_point = val; 3259 3260 return 0; /* is this the right error code? */ 3261 } 3262 3263 /* 3264 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3265 * 3266 * This option will allow a user to change the maximum burst of packets 3267 * that can be emitted by this association. Note that the default value 3268 * is 4, and some implementations may restrict this setting so that it 3269 * can only be lowered. 3270 * 3271 * NOTE: This text doesn't seem right. Do this on a socket basis with 3272 * future associations inheriting the socket value. 3273 */ 3274 static int sctp_setsockopt_maxburst(struct sock *sk, 3275 char __user *optval, 3276 unsigned int optlen) 3277 { 3278 struct sctp_assoc_value params; 3279 struct sctp_sock *sp; 3280 struct sctp_association *asoc; 3281 int val; 3282 int assoc_id = 0; 3283 3284 if (optlen == sizeof(int)) { 3285 pr_warn_ratelimited(DEPRECATED 3286 "%s (pid %d) " 3287 "Use of int in max_burst socket option deprecated.\n" 3288 "Use struct sctp_assoc_value instead\n", 3289 current->comm, task_pid_nr(current)); 3290 if (copy_from_user(&val, optval, optlen)) 3291 return -EFAULT; 3292 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3293 if (copy_from_user(¶ms, optval, optlen)) 3294 return -EFAULT; 3295 val = params.assoc_value; 3296 assoc_id = params.assoc_id; 3297 } else 3298 return -EINVAL; 3299 3300 sp = sctp_sk(sk); 3301 3302 if (assoc_id != 0) { 3303 asoc = sctp_id2assoc(sk, assoc_id); 3304 if (!asoc) 3305 return -EINVAL; 3306 asoc->max_burst = val; 3307 } else 3308 sp->max_burst = val; 3309 3310 return 0; 3311 } 3312 3313 /* 3314 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3315 * 3316 * This set option adds a chunk type that the user is requesting to be 3317 * received only in an authenticated way. Changes to the list of chunks 3318 * will only effect future associations on the socket. 3319 */ 3320 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3321 char __user *optval, 3322 unsigned int optlen) 3323 { 3324 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3325 struct sctp_authchunk val; 3326 3327 if (!ep->auth_enable) 3328 return -EACCES; 3329 3330 if (optlen != sizeof(struct sctp_authchunk)) 3331 return -EINVAL; 3332 if (copy_from_user(&val, optval, optlen)) 3333 return -EFAULT; 3334 3335 switch (val.sauth_chunk) { 3336 case SCTP_CID_INIT: 3337 case SCTP_CID_INIT_ACK: 3338 case SCTP_CID_SHUTDOWN_COMPLETE: 3339 case SCTP_CID_AUTH: 3340 return -EINVAL; 3341 } 3342 3343 /* add this chunk id to the endpoint */ 3344 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); 3345 } 3346 3347 /* 3348 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3349 * 3350 * This option gets or sets the list of HMAC algorithms that the local 3351 * endpoint requires the peer to use. 3352 */ 3353 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3354 char __user *optval, 3355 unsigned int optlen) 3356 { 3357 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3358 struct sctp_hmacalgo *hmacs; 3359 u32 idents; 3360 int err; 3361 3362 if (!ep->auth_enable) 3363 return -EACCES; 3364 3365 if (optlen < sizeof(struct sctp_hmacalgo)) 3366 return -EINVAL; 3367 3368 hmacs = memdup_user(optval, optlen); 3369 if (IS_ERR(hmacs)) 3370 return PTR_ERR(hmacs); 3371 3372 idents = hmacs->shmac_num_idents; 3373 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3374 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3375 err = -EINVAL; 3376 goto out; 3377 } 3378 3379 err = sctp_auth_ep_set_hmacs(ep, hmacs); 3380 out: 3381 kfree(hmacs); 3382 return err; 3383 } 3384 3385 /* 3386 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3387 * 3388 * This option will set a shared secret key which is used to build an 3389 * association shared key. 3390 */ 3391 static int sctp_setsockopt_auth_key(struct sock *sk, 3392 char __user *optval, 3393 unsigned int optlen) 3394 { 3395 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3396 struct sctp_authkey *authkey; 3397 struct sctp_association *asoc; 3398 int ret; 3399 3400 if (!ep->auth_enable) 3401 return -EACCES; 3402 3403 if (optlen <= sizeof(struct sctp_authkey)) 3404 return -EINVAL; 3405 3406 authkey = memdup_user(optval, optlen); 3407 if (IS_ERR(authkey)) 3408 return PTR_ERR(authkey); 3409 3410 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3411 ret = -EINVAL; 3412 goto out; 3413 } 3414 3415 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3416 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3417 ret = -EINVAL; 3418 goto out; 3419 } 3420 3421 ret = sctp_auth_set_key(ep, asoc, authkey); 3422 out: 3423 kzfree(authkey); 3424 return ret; 3425 } 3426 3427 /* 3428 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3429 * 3430 * This option will get or set the active shared key to be used to build 3431 * the association shared key. 3432 */ 3433 static int sctp_setsockopt_active_key(struct sock *sk, 3434 char __user *optval, 3435 unsigned int optlen) 3436 { 3437 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3438 struct sctp_authkeyid val; 3439 struct sctp_association *asoc; 3440 3441 if (!ep->auth_enable) 3442 return -EACCES; 3443 3444 if (optlen != sizeof(struct sctp_authkeyid)) 3445 return -EINVAL; 3446 if (copy_from_user(&val, optval, optlen)) 3447 return -EFAULT; 3448 3449 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3450 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3451 return -EINVAL; 3452 3453 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3454 } 3455 3456 /* 3457 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3458 * 3459 * This set option will delete a shared secret key from use. 3460 */ 3461 static int sctp_setsockopt_del_key(struct sock *sk, 3462 char __user *optval, 3463 unsigned int optlen) 3464 { 3465 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3466 struct sctp_authkeyid val; 3467 struct sctp_association *asoc; 3468 3469 if (!ep->auth_enable) 3470 return -EACCES; 3471 3472 if (optlen != sizeof(struct sctp_authkeyid)) 3473 return -EINVAL; 3474 if (copy_from_user(&val, optval, optlen)) 3475 return -EFAULT; 3476 3477 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3478 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3479 return -EINVAL; 3480 3481 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3482 3483 } 3484 3485 /* 3486 * 8.1.23 SCTP_AUTO_ASCONF 3487 * 3488 * This option will enable or disable the use of the automatic generation of 3489 * ASCONF chunks to add and delete addresses to an existing association. Note 3490 * that this option has two caveats namely: a) it only affects sockets that 3491 * are bound to all addresses available to the SCTP stack, and b) the system 3492 * administrator may have an overriding control that turns the ASCONF feature 3493 * off no matter what setting the socket option may have. 3494 * This option expects an integer boolean flag, where a non-zero value turns on 3495 * the option, and a zero value turns off the option. 3496 * Note. In this implementation, socket operation overrides default parameter 3497 * being set by sysctl as well as FreeBSD implementation 3498 */ 3499 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3500 unsigned int optlen) 3501 { 3502 int val; 3503 struct sctp_sock *sp = sctp_sk(sk); 3504 3505 if (optlen < sizeof(int)) 3506 return -EINVAL; 3507 if (get_user(val, (int __user *)optval)) 3508 return -EFAULT; 3509 if (!sctp_is_ep_boundall(sk) && val) 3510 return -EINVAL; 3511 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3512 return 0; 3513 3514 if (val == 0 && sp->do_auto_asconf) { 3515 list_del(&sp->auto_asconf_list); 3516 sp->do_auto_asconf = 0; 3517 } else if (val && !sp->do_auto_asconf) { 3518 list_add_tail(&sp->auto_asconf_list, 3519 &sock_net(sk)->sctp.auto_asconf_splist); 3520 sp->do_auto_asconf = 1; 3521 } 3522 return 0; 3523 } 3524 3525 3526 /* 3527 * SCTP_PEER_ADDR_THLDS 3528 * 3529 * This option allows us to alter the partially failed threshold for one or all 3530 * transports in an association. See Section 6.1 of: 3531 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3532 */ 3533 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3534 char __user *optval, 3535 unsigned int optlen) 3536 { 3537 struct sctp_paddrthlds val; 3538 struct sctp_transport *trans; 3539 struct sctp_association *asoc; 3540 3541 if (optlen < sizeof(struct sctp_paddrthlds)) 3542 return -EINVAL; 3543 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3544 sizeof(struct sctp_paddrthlds))) 3545 return -EFAULT; 3546 3547 3548 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3549 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3550 if (!asoc) 3551 return -ENOENT; 3552 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3553 transports) { 3554 if (val.spt_pathmaxrxt) 3555 trans->pathmaxrxt = val.spt_pathmaxrxt; 3556 trans->pf_retrans = val.spt_pathpfthld; 3557 } 3558 3559 if (val.spt_pathmaxrxt) 3560 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3561 asoc->pf_retrans = val.spt_pathpfthld; 3562 } else { 3563 trans = sctp_addr_id2transport(sk, &val.spt_address, 3564 val.spt_assoc_id); 3565 if (!trans) 3566 return -ENOENT; 3567 3568 if (val.spt_pathmaxrxt) 3569 trans->pathmaxrxt = val.spt_pathmaxrxt; 3570 trans->pf_retrans = val.spt_pathpfthld; 3571 } 3572 3573 return 0; 3574 } 3575 3576 /* API 6.2 setsockopt(), getsockopt() 3577 * 3578 * Applications use setsockopt() and getsockopt() to set or retrieve 3579 * socket options. Socket options are used to change the default 3580 * behavior of sockets calls. They are described in Section 7. 3581 * 3582 * The syntax is: 3583 * 3584 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 3585 * int __user *optlen); 3586 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 3587 * int optlen); 3588 * 3589 * sd - the socket descript. 3590 * level - set to IPPROTO_SCTP for all SCTP options. 3591 * optname - the option name. 3592 * optval - the buffer to store the value of the option. 3593 * optlen - the size of the buffer. 3594 */ 3595 static int sctp_setsockopt(struct sock *sk, int level, int optname, 3596 char __user *optval, unsigned int optlen) 3597 { 3598 int retval = 0; 3599 3600 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 3601 3602 /* I can hardly begin to describe how wrong this is. This is 3603 * so broken as to be worse than useless. The API draft 3604 * REALLY is NOT helpful here... I am not convinced that the 3605 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 3606 * are at all well-founded. 3607 */ 3608 if (level != SOL_SCTP) { 3609 struct sctp_af *af = sctp_sk(sk)->pf->af; 3610 retval = af->setsockopt(sk, level, optname, optval, optlen); 3611 goto out_nounlock; 3612 } 3613 3614 lock_sock(sk); 3615 3616 switch (optname) { 3617 case SCTP_SOCKOPT_BINDX_ADD: 3618 /* 'optlen' is the size of the addresses buffer. */ 3619 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3620 optlen, SCTP_BINDX_ADD_ADDR); 3621 break; 3622 3623 case SCTP_SOCKOPT_BINDX_REM: 3624 /* 'optlen' is the size of the addresses buffer. */ 3625 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 3626 optlen, SCTP_BINDX_REM_ADDR); 3627 break; 3628 3629 case SCTP_SOCKOPT_CONNECTX_OLD: 3630 /* 'optlen' is the size of the addresses buffer. */ 3631 retval = sctp_setsockopt_connectx_old(sk, 3632 (struct sockaddr __user *)optval, 3633 optlen); 3634 break; 3635 3636 case SCTP_SOCKOPT_CONNECTX: 3637 /* 'optlen' is the size of the addresses buffer. */ 3638 retval = sctp_setsockopt_connectx(sk, 3639 (struct sockaddr __user *)optval, 3640 optlen); 3641 break; 3642 3643 case SCTP_DISABLE_FRAGMENTS: 3644 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 3645 break; 3646 3647 case SCTP_EVENTS: 3648 retval = sctp_setsockopt_events(sk, optval, optlen); 3649 break; 3650 3651 case SCTP_AUTOCLOSE: 3652 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 3653 break; 3654 3655 case SCTP_PEER_ADDR_PARAMS: 3656 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3657 break; 3658 3659 case SCTP_DELAYED_SACK: 3660 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 3661 break; 3662 case SCTP_PARTIAL_DELIVERY_POINT: 3663 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3664 break; 3665 3666 case SCTP_INITMSG: 3667 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 3668 break; 3669 case SCTP_DEFAULT_SEND_PARAM: 3670 retval = sctp_setsockopt_default_send_param(sk, optval, 3671 optlen); 3672 break; 3673 case SCTP_PRIMARY_ADDR: 3674 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 3675 break; 3676 case SCTP_SET_PEER_PRIMARY_ADDR: 3677 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 3678 break; 3679 case SCTP_NODELAY: 3680 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 3681 break; 3682 case SCTP_RTOINFO: 3683 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 3684 break; 3685 case SCTP_ASSOCINFO: 3686 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 3687 break; 3688 case SCTP_I_WANT_MAPPED_V4_ADDR: 3689 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 3690 break; 3691 case SCTP_MAXSEG: 3692 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 3693 break; 3694 case SCTP_ADAPTATION_LAYER: 3695 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 3696 break; 3697 case SCTP_CONTEXT: 3698 retval = sctp_setsockopt_context(sk, optval, optlen); 3699 break; 3700 case SCTP_FRAGMENT_INTERLEAVE: 3701 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 3702 break; 3703 case SCTP_MAX_BURST: 3704 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 3705 break; 3706 case SCTP_AUTH_CHUNK: 3707 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 3708 break; 3709 case SCTP_HMAC_IDENT: 3710 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 3711 break; 3712 case SCTP_AUTH_KEY: 3713 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 3714 break; 3715 case SCTP_AUTH_ACTIVE_KEY: 3716 retval = sctp_setsockopt_active_key(sk, optval, optlen); 3717 break; 3718 case SCTP_AUTH_DELETE_KEY: 3719 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3720 break; 3721 case SCTP_AUTO_ASCONF: 3722 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 3723 break; 3724 case SCTP_PEER_ADDR_THLDS: 3725 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 3726 break; 3727 default: 3728 retval = -ENOPROTOOPT; 3729 break; 3730 } 3731 3732 release_sock(sk); 3733 3734 out_nounlock: 3735 return retval; 3736 } 3737 3738 /* API 3.1.6 connect() - UDP Style Syntax 3739 * 3740 * An application may use the connect() call in the UDP model to initiate an 3741 * association without sending data. 3742 * 3743 * The syntax is: 3744 * 3745 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 3746 * 3747 * sd: the socket descriptor to have a new association added to. 3748 * 3749 * nam: the address structure (either struct sockaddr_in or struct 3750 * sockaddr_in6 defined in RFC2553 [7]). 3751 * 3752 * len: the size of the address. 3753 */ 3754 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 3755 int addr_len) 3756 { 3757 int err = 0; 3758 struct sctp_af *af; 3759 3760 lock_sock(sk); 3761 3762 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 3763 addr, addr_len); 3764 3765 /* Validate addr_len before calling common connect/connectx routine. */ 3766 af = sctp_get_af_specific(addr->sa_family); 3767 if (!af || addr_len < af->sockaddr_len) { 3768 err = -EINVAL; 3769 } else { 3770 /* Pass correct addr len to common routine (so it knows there 3771 * is only one address being passed. 3772 */ 3773 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 3774 } 3775 3776 release_sock(sk); 3777 return err; 3778 } 3779 3780 /* FIXME: Write comments. */ 3781 static int sctp_disconnect(struct sock *sk, int flags) 3782 { 3783 return -EOPNOTSUPP; /* STUB */ 3784 } 3785 3786 /* 4.1.4 accept() - TCP Style Syntax 3787 * 3788 * Applications use accept() call to remove an established SCTP 3789 * association from the accept queue of the endpoint. A new socket 3790 * descriptor will be returned from accept() to represent the newly 3791 * formed association. 3792 */ 3793 static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 3794 { 3795 struct sctp_sock *sp; 3796 struct sctp_endpoint *ep; 3797 struct sock *newsk = NULL; 3798 struct sctp_association *asoc; 3799 long timeo; 3800 int error = 0; 3801 3802 lock_sock(sk); 3803 3804 sp = sctp_sk(sk); 3805 ep = sp->ep; 3806 3807 if (!sctp_style(sk, TCP)) { 3808 error = -EOPNOTSUPP; 3809 goto out; 3810 } 3811 3812 if (!sctp_sstate(sk, LISTENING)) { 3813 error = -EINVAL; 3814 goto out; 3815 } 3816 3817 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 3818 3819 error = sctp_wait_for_accept(sk, timeo); 3820 if (error) 3821 goto out; 3822 3823 /* We treat the list of associations on the endpoint as the accept 3824 * queue and pick the first association on the list. 3825 */ 3826 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 3827 3828 newsk = sp->pf->create_accept_sk(sk, asoc); 3829 if (!newsk) { 3830 error = -ENOMEM; 3831 goto out; 3832 } 3833 3834 /* Populate the fields of the newsk from the oldsk and migrate the 3835 * asoc to the newsk. 3836 */ 3837 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 3838 3839 out: 3840 release_sock(sk); 3841 *err = error; 3842 return newsk; 3843 } 3844 3845 /* The SCTP ioctl handler. */ 3846 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3847 { 3848 int rc = -ENOTCONN; 3849 3850 lock_sock(sk); 3851 3852 /* 3853 * SEQPACKET-style sockets in LISTENING state are valid, for 3854 * SCTP, so only discard TCP-style sockets in LISTENING state. 3855 */ 3856 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 3857 goto out; 3858 3859 switch (cmd) { 3860 case SIOCINQ: { 3861 struct sk_buff *skb; 3862 unsigned int amount = 0; 3863 3864 skb = skb_peek(&sk->sk_receive_queue); 3865 if (skb != NULL) { 3866 /* 3867 * We will only return the amount of this packet since 3868 * that is all that will be read. 3869 */ 3870 amount = skb->len; 3871 } 3872 rc = put_user(amount, (int __user *)arg); 3873 break; 3874 } 3875 default: 3876 rc = -ENOIOCTLCMD; 3877 break; 3878 } 3879 out: 3880 release_sock(sk); 3881 return rc; 3882 } 3883 3884 /* This is the function which gets called during socket creation to 3885 * initialized the SCTP-specific portion of the sock. 3886 * The sock structure should already be zero-filled memory. 3887 */ 3888 static int sctp_init_sock(struct sock *sk) 3889 { 3890 struct net *net = sock_net(sk); 3891 struct sctp_sock *sp; 3892 3893 pr_debug("%s: sk:%p\n", __func__, sk); 3894 3895 sp = sctp_sk(sk); 3896 3897 /* Initialize the SCTP per socket area. */ 3898 switch (sk->sk_type) { 3899 case SOCK_SEQPACKET: 3900 sp->type = SCTP_SOCKET_UDP; 3901 break; 3902 case SOCK_STREAM: 3903 sp->type = SCTP_SOCKET_TCP; 3904 break; 3905 default: 3906 return -ESOCKTNOSUPPORT; 3907 } 3908 3909 /* Initialize default send parameters. These parameters can be 3910 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 3911 */ 3912 sp->default_stream = 0; 3913 sp->default_ppid = 0; 3914 sp->default_flags = 0; 3915 sp->default_context = 0; 3916 sp->default_timetolive = 0; 3917 3918 sp->default_rcv_context = 0; 3919 sp->max_burst = net->sctp.max_burst; 3920 3921 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 3922 3923 /* Initialize default setup parameters. These parameters 3924 * can be modified with the SCTP_INITMSG socket option or 3925 * overridden by the SCTP_INIT CMSG. 3926 */ 3927 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 3928 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 3929 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 3930 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 3931 3932 /* Initialize default RTO related parameters. These parameters can 3933 * be modified for with the SCTP_RTOINFO socket option. 3934 */ 3935 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 3936 sp->rtoinfo.srto_max = net->sctp.rto_max; 3937 sp->rtoinfo.srto_min = net->sctp.rto_min; 3938 3939 /* Initialize default association related parameters. These parameters 3940 * can be modified with the SCTP_ASSOCINFO socket option. 3941 */ 3942 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 3943 sp->assocparams.sasoc_number_peer_destinations = 0; 3944 sp->assocparams.sasoc_peer_rwnd = 0; 3945 sp->assocparams.sasoc_local_rwnd = 0; 3946 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 3947 3948 /* Initialize default event subscriptions. By default, all the 3949 * options are off. 3950 */ 3951 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 3952 3953 /* Default Peer Address Parameters. These defaults can 3954 * be modified via SCTP_PEER_ADDR_PARAMS 3955 */ 3956 sp->hbinterval = net->sctp.hb_interval; 3957 sp->pathmaxrxt = net->sctp.max_retrans_path; 3958 sp->pathmtu = 0; /* allow default discovery */ 3959 sp->sackdelay = net->sctp.sack_timeout; 3960 sp->sackfreq = 2; 3961 sp->param_flags = SPP_HB_ENABLE | 3962 SPP_PMTUD_ENABLE | 3963 SPP_SACKDELAY_ENABLE; 3964 3965 /* If enabled no SCTP message fragmentation will be performed. 3966 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 3967 */ 3968 sp->disable_fragments = 0; 3969 3970 /* Enable Nagle algorithm by default. */ 3971 sp->nodelay = 0; 3972 3973 /* Enable by default. */ 3974 sp->v4mapped = 1; 3975 3976 /* Auto-close idle associations after the configured 3977 * number of seconds. A value of 0 disables this 3978 * feature. Configure through the SCTP_AUTOCLOSE socket option, 3979 * for UDP-style sockets only. 3980 */ 3981 sp->autoclose = 0; 3982 3983 /* User specified fragmentation limit. */ 3984 sp->user_frag = 0; 3985 3986 sp->adaptation_ind = 0; 3987 3988 sp->pf = sctp_get_pf_specific(sk->sk_family); 3989 3990 /* Control variables for partial data delivery. */ 3991 atomic_set(&sp->pd_mode, 0); 3992 skb_queue_head_init(&sp->pd_lobby); 3993 sp->frag_interleave = 0; 3994 3995 /* Create a per socket endpoint structure. Even if we 3996 * change the data structure relationships, this may still 3997 * be useful for storing pre-connect address information. 3998 */ 3999 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 4000 if (!sp->ep) 4001 return -ENOMEM; 4002 4003 sp->hmac = NULL; 4004 4005 sk->sk_destruct = sctp_destruct_sock; 4006 4007 SCTP_DBG_OBJCNT_INC(sock); 4008 4009 local_bh_disable(); 4010 percpu_counter_inc(&sctp_sockets_allocated); 4011 sock_prot_inuse_add(net, sk->sk_prot, 1); 4012 if (net->sctp.default_auto_asconf) { 4013 list_add_tail(&sp->auto_asconf_list, 4014 &net->sctp.auto_asconf_splist); 4015 sp->do_auto_asconf = 1; 4016 } else 4017 sp->do_auto_asconf = 0; 4018 local_bh_enable(); 4019 4020 return 0; 4021 } 4022 4023 /* Cleanup any SCTP per socket resources. */ 4024 static void sctp_destroy_sock(struct sock *sk) 4025 { 4026 struct sctp_sock *sp; 4027 4028 pr_debug("%s: sk:%p\n", __func__, sk); 4029 4030 /* Release our hold on the endpoint. */ 4031 sp = sctp_sk(sk); 4032 /* This could happen during socket init, thus we bail out 4033 * early, since the rest of the below is not setup either. 4034 */ 4035 if (sp->ep == NULL) 4036 return; 4037 4038 if (sp->do_auto_asconf) { 4039 sp->do_auto_asconf = 0; 4040 list_del(&sp->auto_asconf_list); 4041 } 4042 sctp_endpoint_free(sp->ep); 4043 local_bh_disable(); 4044 percpu_counter_dec(&sctp_sockets_allocated); 4045 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4046 local_bh_enable(); 4047 } 4048 4049 /* Triggered when there are no references on the socket anymore */ 4050 static void sctp_destruct_sock(struct sock *sk) 4051 { 4052 struct sctp_sock *sp = sctp_sk(sk); 4053 4054 /* Free up the HMAC transform. */ 4055 crypto_free_hash(sp->hmac); 4056 4057 inet_sock_destruct(sk); 4058 } 4059 4060 /* API 4.1.7 shutdown() - TCP Style Syntax 4061 * int shutdown(int socket, int how); 4062 * 4063 * sd - the socket descriptor of the association to be closed. 4064 * how - Specifies the type of shutdown. The values are 4065 * as follows: 4066 * SHUT_RD 4067 * Disables further receive operations. No SCTP 4068 * protocol action is taken. 4069 * SHUT_WR 4070 * Disables further send operations, and initiates 4071 * the SCTP shutdown sequence. 4072 * SHUT_RDWR 4073 * Disables further send and receive operations 4074 * and initiates the SCTP shutdown sequence. 4075 */ 4076 static void sctp_shutdown(struct sock *sk, int how) 4077 { 4078 struct net *net = sock_net(sk); 4079 struct sctp_endpoint *ep; 4080 struct sctp_association *asoc; 4081 4082 if (!sctp_style(sk, TCP)) 4083 return; 4084 4085 if (how & SEND_SHUTDOWN) { 4086 ep = sctp_sk(sk)->ep; 4087 if (!list_empty(&ep->asocs)) { 4088 asoc = list_entry(ep->asocs.next, 4089 struct sctp_association, asocs); 4090 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4091 } 4092 } 4093 } 4094 4095 /* 7.2.1 Association Status (SCTP_STATUS) 4096 4097 * Applications can retrieve current status information about an 4098 * association, including association state, peer receiver window size, 4099 * number of unacked data chunks, and number of data chunks pending 4100 * receipt. This information is read-only. 4101 */ 4102 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4103 char __user *optval, 4104 int __user *optlen) 4105 { 4106 struct sctp_status status; 4107 struct sctp_association *asoc = NULL; 4108 struct sctp_transport *transport; 4109 sctp_assoc_t associd; 4110 int retval = 0; 4111 4112 if (len < sizeof(status)) { 4113 retval = -EINVAL; 4114 goto out; 4115 } 4116 4117 len = sizeof(status); 4118 if (copy_from_user(&status, optval, len)) { 4119 retval = -EFAULT; 4120 goto out; 4121 } 4122 4123 associd = status.sstat_assoc_id; 4124 asoc = sctp_id2assoc(sk, associd); 4125 if (!asoc) { 4126 retval = -EINVAL; 4127 goto out; 4128 } 4129 4130 transport = asoc->peer.primary_path; 4131 4132 status.sstat_assoc_id = sctp_assoc2id(asoc); 4133 status.sstat_state = asoc->state; 4134 status.sstat_rwnd = asoc->peer.rwnd; 4135 status.sstat_unackdata = asoc->unack_data; 4136 4137 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4138 status.sstat_instrms = asoc->c.sinit_max_instreams; 4139 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4140 status.sstat_fragmentation_point = asoc->frag_point; 4141 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4142 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4143 transport->af_specific->sockaddr_len); 4144 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4145 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4146 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4147 status.sstat_primary.spinfo_state = transport->state; 4148 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4149 status.sstat_primary.spinfo_srtt = transport->srtt; 4150 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4151 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4152 4153 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4154 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4155 4156 if (put_user(len, optlen)) { 4157 retval = -EFAULT; 4158 goto out; 4159 } 4160 4161 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4162 __func__, len, status.sstat_state, status.sstat_rwnd, 4163 status.sstat_assoc_id); 4164 4165 if (copy_to_user(optval, &status, len)) { 4166 retval = -EFAULT; 4167 goto out; 4168 } 4169 4170 out: 4171 return retval; 4172 } 4173 4174 4175 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4176 * 4177 * Applications can retrieve information about a specific peer address 4178 * of an association, including its reachability state, congestion 4179 * window, and retransmission timer values. This information is 4180 * read-only. 4181 */ 4182 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4183 char __user *optval, 4184 int __user *optlen) 4185 { 4186 struct sctp_paddrinfo pinfo; 4187 struct sctp_transport *transport; 4188 int retval = 0; 4189 4190 if (len < sizeof(pinfo)) { 4191 retval = -EINVAL; 4192 goto out; 4193 } 4194 4195 len = sizeof(pinfo); 4196 if (copy_from_user(&pinfo, optval, len)) { 4197 retval = -EFAULT; 4198 goto out; 4199 } 4200 4201 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4202 pinfo.spinfo_assoc_id); 4203 if (!transport) 4204 return -EINVAL; 4205 4206 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4207 pinfo.spinfo_state = transport->state; 4208 pinfo.spinfo_cwnd = transport->cwnd; 4209 pinfo.spinfo_srtt = transport->srtt; 4210 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4211 pinfo.spinfo_mtu = transport->pathmtu; 4212 4213 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4214 pinfo.spinfo_state = SCTP_ACTIVE; 4215 4216 if (put_user(len, optlen)) { 4217 retval = -EFAULT; 4218 goto out; 4219 } 4220 4221 if (copy_to_user(optval, &pinfo, len)) { 4222 retval = -EFAULT; 4223 goto out; 4224 } 4225 4226 out: 4227 return retval; 4228 } 4229 4230 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 4231 * 4232 * This option is a on/off flag. If enabled no SCTP message 4233 * fragmentation will be performed. Instead if a message being sent 4234 * exceeds the current PMTU size, the message will NOT be sent and 4235 * instead a error will be indicated to the user. 4236 */ 4237 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 4238 char __user *optval, int __user *optlen) 4239 { 4240 int val; 4241 4242 if (len < sizeof(int)) 4243 return -EINVAL; 4244 4245 len = sizeof(int); 4246 val = (sctp_sk(sk)->disable_fragments == 1); 4247 if (put_user(len, optlen)) 4248 return -EFAULT; 4249 if (copy_to_user(optval, &val, len)) 4250 return -EFAULT; 4251 return 0; 4252 } 4253 4254 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 4255 * 4256 * This socket option is used to specify various notifications and 4257 * ancillary data the user wishes to receive. 4258 */ 4259 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4260 int __user *optlen) 4261 { 4262 if (len <= 0) 4263 return -EINVAL; 4264 if (len > sizeof(struct sctp_event_subscribe)) 4265 len = sizeof(struct sctp_event_subscribe); 4266 if (put_user(len, optlen)) 4267 return -EFAULT; 4268 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4269 return -EFAULT; 4270 return 0; 4271 } 4272 4273 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 4274 * 4275 * This socket option is applicable to the UDP-style socket only. When 4276 * set it will cause associations that are idle for more than the 4277 * specified number of seconds to automatically close. An association 4278 * being idle is defined an association that has NOT sent or received 4279 * user data. The special value of '0' indicates that no automatic 4280 * close of any associations should be performed. The option expects an 4281 * integer defining the number of seconds of idle time before an 4282 * association is closed. 4283 */ 4284 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 4285 { 4286 /* Applicable to UDP-style socket only */ 4287 if (sctp_style(sk, TCP)) 4288 return -EOPNOTSUPP; 4289 if (len < sizeof(int)) 4290 return -EINVAL; 4291 len = sizeof(int); 4292 if (put_user(len, optlen)) 4293 return -EFAULT; 4294 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 4295 return -EFAULT; 4296 return 0; 4297 } 4298 4299 /* Helper routine to branch off an association to a new socket. */ 4300 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 4301 { 4302 struct sctp_association *asoc = sctp_id2assoc(sk, id); 4303 struct socket *sock; 4304 struct sctp_af *af; 4305 int err = 0; 4306 4307 if (!asoc) 4308 return -EINVAL; 4309 4310 /* An association cannot be branched off from an already peeled-off 4311 * socket, nor is this supported for tcp style sockets. 4312 */ 4313 if (!sctp_style(sk, UDP)) 4314 return -EINVAL; 4315 4316 /* Create a new socket. */ 4317 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 4318 if (err < 0) 4319 return err; 4320 4321 sctp_copy_sock(sock->sk, sk, asoc); 4322 4323 /* Make peeled-off sockets more like 1-1 accepted sockets. 4324 * Set the daddr and initialize id to something more random 4325 */ 4326 af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family); 4327 af->to_sk_daddr(&asoc->peer.primary_addr, sk); 4328 4329 /* Populate the fields of the newsk from the oldsk and migrate the 4330 * asoc to the newsk. 4331 */ 4332 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 4333 4334 *sockp = sock; 4335 4336 return err; 4337 } 4338 EXPORT_SYMBOL(sctp_do_peeloff); 4339 4340 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4341 { 4342 sctp_peeloff_arg_t peeloff; 4343 struct socket *newsock; 4344 struct file *newfile; 4345 int retval = 0; 4346 4347 if (len < sizeof(sctp_peeloff_arg_t)) 4348 return -EINVAL; 4349 len = sizeof(sctp_peeloff_arg_t); 4350 if (copy_from_user(&peeloff, optval, len)) 4351 return -EFAULT; 4352 4353 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); 4354 if (retval < 0) 4355 goto out; 4356 4357 /* Map the socket to an unused fd that can be returned to the user. */ 4358 retval = get_unused_fd_flags(0); 4359 if (retval < 0) { 4360 sock_release(newsock); 4361 goto out; 4362 } 4363 4364 newfile = sock_alloc_file(newsock, 0, NULL); 4365 if (unlikely(IS_ERR(newfile))) { 4366 put_unused_fd(retval); 4367 sock_release(newsock); 4368 return PTR_ERR(newfile); 4369 } 4370 4371 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 4372 retval); 4373 4374 /* Return the fd mapped to the new socket. */ 4375 if (put_user(len, optlen)) { 4376 fput(newfile); 4377 put_unused_fd(retval); 4378 return -EFAULT; 4379 } 4380 peeloff.sd = retval; 4381 if (copy_to_user(optval, &peeloff, len)) { 4382 fput(newfile); 4383 put_unused_fd(retval); 4384 return -EFAULT; 4385 } 4386 fd_install(retval, newfile); 4387 out: 4388 return retval; 4389 } 4390 4391 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 4392 * 4393 * Applications can enable or disable heartbeats for any peer address of 4394 * an association, modify an address's heartbeat interval, force a 4395 * heartbeat to be sent immediately, and adjust the address's maximum 4396 * number of retransmissions sent before an address is considered 4397 * unreachable. The following structure is used to access and modify an 4398 * address's parameters: 4399 * 4400 * struct sctp_paddrparams { 4401 * sctp_assoc_t spp_assoc_id; 4402 * struct sockaddr_storage spp_address; 4403 * uint32_t spp_hbinterval; 4404 * uint16_t spp_pathmaxrxt; 4405 * uint32_t spp_pathmtu; 4406 * uint32_t spp_sackdelay; 4407 * uint32_t spp_flags; 4408 * }; 4409 * 4410 * spp_assoc_id - (one-to-many style socket) This is filled in the 4411 * application, and identifies the association for 4412 * this query. 4413 * spp_address - This specifies which address is of interest. 4414 * spp_hbinterval - This contains the value of the heartbeat interval, 4415 * in milliseconds. If a value of zero 4416 * is present in this field then no changes are to 4417 * be made to this parameter. 4418 * spp_pathmaxrxt - This contains the maximum number of 4419 * retransmissions before this address shall be 4420 * considered unreachable. If a value of zero 4421 * is present in this field then no changes are to 4422 * be made to this parameter. 4423 * spp_pathmtu - When Path MTU discovery is disabled the value 4424 * specified here will be the "fixed" path mtu. 4425 * Note that if the spp_address field is empty 4426 * then all associations on this address will 4427 * have this fixed path mtu set upon them. 4428 * 4429 * spp_sackdelay - When delayed sack is enabled, this value specifies 4430 * the number of milliseconds that sacks will be delayed 4431 * for. This value will apply to all addresses of an 4432 * association if the spp_address field is empty. Note 4433 * also, that if delayed sack is enabled and this 4434 * value is set to 0, no change is made to the last 4435 * recorded delayed sack timer value. 4436 * 4437 * spp_flags - These flags are used to control various features 4438 * on an association. The flag field may contain 4439 * zero or more of the following options. 4440 * 4441 * SPP_HB_ENABLE - Enable heartbeats on the 4442 * specified address. Note that if the address 4443 * field is empty all addresses for the association 4444 * have heartbeats enabled upon them. 4445 * 4446 * SPP_HB_DISABLE - Disable heartbeats on the 4447 * speicifed address. Note that if the address 4448 * field is empty all addresses for the association 4449 * will have their heartbeats disabled. Note also 4450 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 4451 * mutually exclusive, only one of these two should 4452 * be specified. Enabling both fields will have 4453 * undetermined results. 4454 * 4455 * SPP_HB_DEMAND - Request a user initiated heartbeat 4456 * to be made immediately. 4457 * 4458 * SPP_PMTUD_ENABLE - This field will enable PMTU 4459 * discovery upon the specified address. Note that 4460 * if the address feild is empty then all addresses 4461 * on the association are effected. 4462 * 4463 * SPP_PMTUD_DISABLE - This field will disable PMTU 4464 * discovery upon the specified address. Note that 4465 * if the address feild is empty then all addresses 4466 * on the association are effected. Not also that 4467 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 4468 * exclusive. Enabling both will have undetermined 4469 * results. 4470 * 4471 * SPP_SACKDELAY_ENABLE - Setting this flag turns 4472 * on delayed sack. The time specified in spp_sackdelay 4473 * is used to specify the sack delay for this address. Note 4474 * that if spp_address is empty then all addresses will 4475 * enable delayed sack and take on the sack delay 4476 * value specified in spp_sackdelay. 4477 * SPP_SACKDELAY_DISABLE - Setting this flag turns 4478 * off delayed sack. If the spp_address field is blank then 4479 * delayed sack is disabled for the entire association. Note 4480 * also that this field is mutually exclusive to 4481 * SPP_SACKDELAY_ENABLE, setting both will have undefined 4482 * results. 4483 */ 4484 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 4485 char __user *optval, int __user *optlen) 4486 { 4487 struct sctp_paddrparams params; 4488 struct sctp_transport *trans = NULL; 4489 struct sctp_association *asoc = NULL; 4490 struct sctp_sock *sp = sctp_sk(sk); 4491 4492 if (len < sizeof(struct sctp_paddrparams)) 4493 return -EINVAL; 4494 len = sizeof(struct sctp_paddrparams); 4495 if (copy_from_user(¶ms, optval, len)) 4496 return -EFAULT; 4497 4498 /* If an address other than INADDR_ANY is specified, and 4499 * no transport is found, then the request is invalid. 4500 */ 4501 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 4502 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 4503 params.spp_assoc_id); 4504 if (!trans) { 4505 pr_debug("%s: failed no transport\n", __func__); 4506 return -EINVAL; 4507 } 4508 } 4509 4510 /* Get association, if assoc_id != 0 and the socket is a one 4511 * to many style socket, and an association was not found, then 4512 * the id was invalid. 4513 */ 4514 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 4515 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 4516 pr_debug("%s: failed no association\n", __func__); 4517 return -EINVAL; 4518 } 4519 4520 if (trans) { 4521 /* Fetch transport values. */ 4522 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 4523 params.spp_pathmtu = trans->pathmtu; 4524 params.spp_pathmaxrxt = trans->pathmaxrxt; 4525 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 4526 4527 /*draft-11 doesn't say what to return in spp_flags*/ 4528 params.spp_flags = trans->param_flags; 4529 } else if (asoc) { 4530 /* Fetch association values. */ 4531 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 4532 params.spp_pathmtu = asoc->pathmtu; 4533 params.spp_pathmaxrxt = asoc->pathmaxrxt; 4534 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 4535 4536 /*draft-11 doesn't say what to return in spp_flags*/ 4537 params.spp_flags = asoc->param_flags; 4538 } else { 4539 /* Fetch socket values. */ 4540 params.spp_hbinterval = sp->hbinterval; 4541 params.spp_pathmtu = sp->pathmtu; 4542 params.spp_sackdelay = sp->sackdelay; 4543 params.spp_pathmaxrxt = sp->pathmaxrxt; 4544 4545 /*draft-11 doesn't say what to return in spp_flags*/ 4546 params.spp_flags = sp->param_flags; 4547 } 4548 4549 if (copy_to_user(optval, ¶ms, len)) 4550 return -EFAULT; 4551 4552 if (put_user(len, optlen)) 4553 return -EFAULT; 4554 4555 return 0; 4556 } 4557 4558 /* 4559 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 4560 * 4561 * This option will effect the way delayed acks are performed. This 4562 * option allows you to get or set the delayed ack time, in 4563 * milliseconds. It also allows changing the delayed ack frequency. 4564 * Changing the frequency to 1 disables the delayed sack algorithm. If 4565 * the assoc_id is 0, then this sets or gets the endpoints default 4566 * values. If the assoc_id field is non-zero, then the set or get 4567 * effects the specified association for the one to many model (the 4568 * assoc_id field is ignored by the one to one model). Note that if 4569 * sack_delay or sack_freq are 0 when setting this option, then the 4570 * current values will remain unchanged. 4571 * 4572 * struct sctp_sack_info { 4573 * sctp_assoc_t sack_assoc_id; 4574 * uint32_t sack_delay; 4575 * uint32_t sack_freq; 4576 * }; 4577 * 4578 * sack_assoc_id - This parameter, indicates which association the user 4579 * is performing an action upon. Note that if this field's value is 4580 * zero then the endpoints default value is changed (effecting future 4581 * associations only). 4582 * 4583 * sack_delay - This parameter contains the number of milliseconds that 4584 * the user is requesting the delayed ACK timer be set to. Note that 4585 * this value is defined in the standard to be between 200 and 500 4586 * milliseconds. 4587 * 4588 * sack_freq - This parameter contains the number of packets that must 4589 * be received before a sack is sent without waiting for the delay 4590 * timer to expire. The default value for this is 2, setting this 4591 * value to 1 will disable the delayed sack algorithm. 4592 */ 4593 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 4594 char __user *optval, 4595 int __user *optlen) 4596 { 4597 struct sctp_sack_info params; 4598 struct sctp_association *asoc = NULL; 4599 struct sctp_sock *sp = sctp_sk(sk); 4600 4601 if (len >= sizeof(struct sctp_sack_info)) { 4602 len = sizeof(struct sctp_sack_info); 4603 4604 if (copy_from_user(¶ms, optval, len)) 4605 return -EFAULT; 4606 } else if (len == sizeof(struct sctp_assoc_value)) { 4607 pr_warn_ratelimited(DEPRECATED 4608 "%s (pid %d) " 4609 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 4610 "Use struct sctp_sack_info instead\n", 4611 current->comm, task_pid_nr(current)); 4612 if (copy_from_user(¶ms, optval, len)) 4613 return -EFAULT; 4614 } else 4615 return -EINVAL; 4616 4617 /* Get association, if sack_assoc_id != 0 and the socket is a one 4618 * to many style socket, and an association was not found, then 4619 * the id was invalid. 4620 */ 4621 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 4622 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 4623 return -EINVAL; 4624 4625 if (asoc) { 4626 /* Fetch association values. */ 4627 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 4628 params.sack_delay = jiffies_to_msecs( 4629 asoc->sackdelay); 4630 params.sack_freq = asoc->sackfreq; 4631 4632 } else { 4633 params.sack_delay = 0; 4634 params.sack_freq = 1; 4635 } 4636 } else { 4637 /* Fetch socket values. */ 4638 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 4639 params.sack_delay = sp->sackdelay; 4640 params.sack_freq = sp->sackfreq; 4641 } else { 4642 params.sack_delay = 0; 4643 params.sack_freq = 1; 4644 } 4645 } 4646 4647 if (copy_to_user(optval, ¶ms, len)) 4648 return -EFAULT; 4649 4650 if (put_user(len, optlen)) 4651 return -EFAULT; 4652 4653 return 0; 4654 } 4655 4656 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 4657 * 4658 * Applications can specify protocol parameters for the default association 4659 * initialization. The option name argument to setsockopt() and getsockopt() 4660 * is SCTP_INITMSG. 4661 * 4662 * Setting initialization parameters is effective only on an unconnected 4663 * socket (for UDP-style sockets only future associations are effected 4664 * by the change). With TCP-style sockets, this option is inherited by 4665 * sockets derived from a listener socket. 4666 */ 4667 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 4668 { 4669 if (len < sizeof(struct sctp_initmsg)) 4670 return -EINVAL; 4671 len = sizeof(struct sctp_initmsg); 4672 if (put_user(len, optlen)) 4673 return -EFAULT; 4674 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 4675 return -EFAULT; 4676 return 0; 4677 } 4678 4679 4680 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 4681 char __user *optval, int __user *optlen) 4682 { 4683 struct sctp_association *asoc; 4684 int cnt = 0; 4685 struct sctp_getaddrs getaddrs; 4686 struct sctp_transport *from; 4687 void __user *to; 4688 union sctp_addr temp; 4689 struct sctp_sock *sp = sctp_sk(sk); 4690 int addrlen; 4691 size_t space_left; 4692 int bytes_copied; 4693 4694 if (len < sizeof(struct sctp_getaddrs)) 4695 return -EINVAL; 4696 4697 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4698 return -EFAULT; 4699 4700 /* For UDP-style sockets, id specifies the association to query. */ 4701 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4702 if (!asoc) 4703 return -EINVAL; 4704 4705 to = optval + offsetof(struct sctp_getaddrs, addrs); 4706 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4707 4708 list_for_each_entry(from, &asoc->peer.transport_addr_list, 4709 transports) { 4710 memcpy(&temp, &from->ipaddr, sizeof(temp)); 4711 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4712 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4713 if (space_left < addrlen) 4714 return -ENOMEM; 4715 if (copy_to_user(to, &temp, addrlen)) 4716 return -EFAULT; 4717 to += addrlen; 4718 cnt++; 4719 space_left -= addrlen; 4720 } 4721 4722 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4723 return -EFAULT; 4724 bytes_copied = ((char __user *)to) - optval; 4725 if (put_user(bytes_copied, optlen)) 4726 return -EFAULT; 4727 4728 return 0; 4729 } 4730 4731 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4732 size_t space_left, int *bytes_copied) 4733 { 4734 struct sctp_sockaddr_entry *addr; 4735 union sctp_addr temp; 4736 int cnt = 0; 4737 int addrlen; 4738 struct net *net = sock_net(sk); 4739 4740 rcu_read_lock(); 4741 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 4742 if (!addr->valid) 4743 continue; 4744 4745 if ((PF_INET == sk->sk_family) && 4746 (AF_INET6 == addr->a.sa.sa_family)) 4747 continue; 4748 if ((PF_INET6 == sk->sk_family) && 4749 inet_v6_ipv6only(sk) && 4750 (AF_INET == addr->a.sa.sa_family)) 4751 continue; 4752 memcpy(&temp, &addr->a, sizeof(temp)); 4753 if (!temp.v4.sin_port) 4754 temp.v4.sin_port = htons(port); 4755 4756 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4757 &temp); 4758 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4759 if (space_left < addrlen) { 4760 cnt = -ENOMEM; 4761 break; 4762 } 4763 memcpy(to, &temp, addrlen); 4764 4765 to += addrlen; 4766 cnt++; 4767 space_left -= addrlen; 4768 *bytes_copied += addrlen; 4769 } 4770 rcu_read_unlock(); 4771 4772 return cnt; 4773 } 4774 4775 4776 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 4777 char __user *optval, int __user *optlen) 4778 { 4779 struct sctp_bind_addr *bp; 4780 struct sctp_association *asoc; 4781 int cnt = 0; 4782 struct sctp_getaddrs getaddrs; 4783 struct sctp_sockaddr_entry *addr; 4784 void __user *to; 4785 union sctp_addr temp; 4786 struct sctp_sock *sp = sctp_sk(sk); 4787 int addrlen; 4788 int err = 0; 4789 size_t space_left; 4790 int bytes_copied = 0; 4791 void *addrs; 4792 void *buf; 4793 4794 if (len < sizeof(struct sctp_getaddrs)) 4795 return -EINVAL; 4796 4797 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4798 return -EFAULT; 4799 4800 /* 4801 * For UDP-style sockets, id specifies the association to query. 4802 * If the id field is set to the value '0' then the locally bound 4803 * addresses are returned without regard to any particular 4804 * association. 4805 */ 4806 if (0 == getaddrs.assoc_id) { 4807 bp = &sctp_sk(sk)->ep->base.bind_addr; 4808 } else { 4809 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4810 if (!asoc) 4811 return -EINVAL; 4812 bp = &asoc->base.bind_addr; 4813 } 4814 4815 to = optval + offsetof(struct sctp_getaddrs, addrs); 4816 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4817 4818 addrs = kmalloc(space_left, GFP_KERNEL); 4819 if (!addrs) 4820 return -ENOMEM; 4821 4822 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4823 * addresses from the global local address list. 4824 */ 4825 if (sctp_list_single_entry(&bp->address_list)) { 4826 addr = list_entry(bp->address_list.next, 4827 struct sctp_sockaddr_entry, list); 4828 if (sctp_is_any(sk, &addr->a)) { 4829 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 4830 space_left, &bytes_copied); 4831 if (cnt < 0) { 4832 err = cnt; 4833 goto out; 4834 } 4835 goto copy_getaddrs; 4836 } 4837 } 4838 4839 buf = addrs; 4840 /* Protection on the bound address list is not needed since 4841 * in the socket option context we hold a socket lock and 4842 * thus the bound address list can't change. 4843 */ 4844 list_for_each_entry(addr, &bp->address_list, list) { 4845 memcpy(&temp, &addr->a, sizeof(temp)); 4846 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4847 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4848 if (space_left < addrlen) { 4849 err = -ENOMEM; /*fixme: right error?*/ 4850 goto out; 4851 } 4852 memcpy(buf, &temp, addrlen); 4853 buf += addrlen; 4854 bytes_copied += addrlen; 4855 cnt++; 4856 space_left -= addrlen; 4857 } 4858 4859 copy_getaddrs: 4860 if (copy_to_user(to, addrs, bytes_copied)) { 4861 err = -EFAULT; 4862 goto out; 4863 } 4864 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 4865 err = -EFAULT; 4866 goto out; 4867 } 4868 if (put_user(bytes_copied, optlen)) 4869 err = -EFAULT; 4870 out: 4871 kfree(addrs); 4872 return err; 4873 } 4874 4875 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 4876 * 4877 * Requests that the local SCTP stack use the enclosed peer address as 4878 * the association primary. The enclosed address must be one of the 4879 * association peer's addresses. 4880 */ 4881 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 4882 char __user *optval, int __user *optlen) 4883 { 4884 struct sctp_prim prim; 4885 struct sctp_association *asoc; 4886 struct sctp_sock *sp = sctp_sk(sk); 4887 4888 if (len < sizeof(struct sctp_prim)) 4889 return -EINVAL; 4890 4891 len = sizeof(struct sctp_prim); 4892 4893 if (copy_from_user(&prim, optval, len)) 4894 return -EFAULT; 4895 4896 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 4897 if (!asoc) 4898 return -EINVAL; 4899 4900 if (!asoc->peer.primary_path) 4901 return -ENOTCONN; 4902 4903 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 4904 asoc->peer.primary_path->af_specific->sockaddr_len); 4905 4906 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, 4907 (union sctp_addr *)&prim.ssp_addr); 4908 4909 if (put_user(len, optlen)) 4910 return -EFAULT; 4911 if (copy_to_user(optval, &prim, len)) 4912 return -EFAULT; 4913 4914 return 0; 4915 } 4916 4917 /* 4918 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 4919 * 4920 * Requests that the local endpoint set the specified Adaptation Layer 4921 * Indication parameter for all future INIT and INIT-ACK exchanges. 4922 */ 4923 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 4924 char __user *optval, int __user *optlen) 4925 { 4926 struct sctp_setadaptation adaptation; 4927 4928 if (len < sizeof(struct sctp_setadaptation)) 4929 return -EINVAL; 4930 4931 len = sizeof(struct sctp_setadaptation); 4932 4933 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 4934 4935 if (put_user(len, optlen)) 4936 return -EFAULT; 4937 if (copy_to_user(optval, &adaptation, len)) 4938 return -EFAULT; 4939 4940 return 0; 4941 } 4942 4943 /* 4944 * 4945 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 4946 * 4947 * Applications that wish to use the sendto() system call may wish to 4948 * specify a default set of parameters that would normally be supplied 4949 * through the inclusion of ancillary data. This socket option allows 4950 * such an application to set the default sctp_sndrcvinfo structure. 4951 4952 4953 * The application that wishes to use this socket option simply passes 4954 * in to this call the sctp_sndrcvinfo structure defined in Section 4955 * 5.2.2) The input parameters accepted by this call include 4956 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 4957 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 4958 * to this call if the caller is using the UDP model. 4959 * 4960 * For getsockopt, it get the default sctp_sndrcvinfo structure. 4961 */ 4962 static int sctp_getsockopt_default_send_param(struct sock *sk, 4963 int len, char __user *optval, 4964 int __user *optlen) 4965 { 4966 struct sctp_sndrcvinfo info; 4967 struct sctp_association *asoc; 4968 struct sctp_sock *sp = sctp_sk(sk); 4969 4970 if (len < sizeof(struct sctp_sndrcvinfo)) 4971 return -EINVAL; 4972 4973 len = sizeof(struct sctp_sndrcvinfo); 4974 4975 if (copy_from_user(&info, optval, len)) 4976 return -EFAULT; 4977 4978 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 4979 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 4980 return -EINVAL; 4981 4982 if (asoc) { 4983 info.sinfo_stream = asoc->default_stream; 4984 info.sinfo_flags = asoc->default_flags; 4985 info.sinfo_ppid = asoc->default_ppid; 4986 info.sinfo_context = asoc->default_context; 4987 info.sinfo_timetolive = asoc->default_timetolive; 4988 } else { 4989 info.sinfo_stream = sp->default_stream; 4990 info.sinfo_flags = sp->default_flags; 4991 info.sinfo_ppid = sp->default_ppid; 4992 info.sinfo_context = sp->default_context; 4993 info.sinfo_timetolive = sp->default_timetolive; 4994 } 4995 4996 if (put_user(len, optlen)) 4997 return -EFAULT; 4998 if (copy_to_user(optval, &info, len)) 4999 return -EFAULT; 5000 5001 return 0; 5002 } 5003 5004 /* 5005 * 5006 * 7.1.5 SCTP_NODELAY 5007 * 5008 * Turn on/off any Nagle-like algorithm. This means that packets are 5009 * generally sent as soon as possible and no unnecessary delays are 5010 * introduced, at the cost of more packets in the network. Expects an 5011 * integer boolean flag. 5012 */ 5013 5014 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 5015 char __user *optval, int __user *optlen) 5016 { 5017 int val; 5018 5019 if (len < sizeof(int)) 5020 return -EINVAL; 5021 5022 len = sizeof(int); 5023 val = (sctp_sk(sk)->nodelay == 1); 5024 if (put_user(len, optlen)) 5025 return -EFAULT; 5026 if (copy_to_user(optval, &val, len)) 5027 return -EFAULT; 5028 return 0; 5029 } 5030 5031 /* 5032 * 5033 * 7.1.1 SCTP_RTOINFO 5034 * 5035 * The protocol parameters used to initialize and bound retransmission 5036 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5037 * and modify these parameters. 5038 * All parameters are time values, in milliseconds. A value of 0, when 5039 * modifying the parameters, indicates that the current value should not 5040 * be changed. 5041 * 5042 */ 5043 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5044 char __user *optval, 5045 int __user *optlen) { 5046 struct sctp_rtoinfo rtoinfo; 5047 struct sctp_association *asoc; 5048 5049 if (len < sizeof (struct sctp_rtoinfo)) 5050 return -EINVAL; 5051 5052 len = sizeof(struct sctp_rtoinfo); 5053 5054 if (copy_from_user(&rtoinfo, optval, len)) 5055 return -EFAULT; 5056 5057 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5058 5059 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5060 return -EINVAL; 5061 5062 /* Values corresponding to the specific association. */ 5063 if (asoc) { 5064 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5065 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5066 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5067 } else { 5068 /* Values corresponding to the endpoint. */ 5069 struct sctp_sock *sp = sctp_sk(sk); 5070 5071 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5072 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5073 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5074 } 5075 5076 if (put_user(len, optlen)) 5077 return -EFAULT; 5078 5079 if (copy_to_user(optval, &rtoinfo, len)) 5080 return -EFAULT; 5081 5082 return 0; 5083 } 5084 5085 /* 5086 * 5087 * 7.1.2 SCTP_ASSOCINFO 5088 * 5089 * This option is used to tune the maximum retransmission attempts 5090 * of the association. 5091 * Returns an error if the new association retransmission value is 5092 * greater than the sum of the retransmission value of the peer. 5093 * See [SCTP] for more information. 5094 * 5095 */ 5096 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5097 char __user *optval, 5098 int __user *optlen) 5099 { 5100 5101 struct sctp_assocparams assocparams; 5102 struct sctp_association *asoc; 5103 struct list_head *pos; 5104 int cnt = 0; 5105 5106 if (len < sizeof (struct sctp_assocparams)) 5107 return -EINVAL; 5108 5109 len = sizeof(struct sctp_assocparams); 5110 5111 if (copy_from_user(&assocparams, optval, len)) 5112 return -EFAULT; 5113 5114 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5115 5116 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5117 return -EINVAL; 5118 5119 /* Values correspoinding to the specific association */ 5120 if (asoc) { 5121 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 5122 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 5123 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 5124 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 5125 5126 list_for_each(pos, &asoc->peer.transport_addr_list) { 5127 cnt++; 5128 } 5129 5130 assocparams.sasoc_number_peer_destinations = cnt; 5131 } else { 5132 /* Values corresponding to the endpoint */ 5133 struct sctp_sock *sp = sctp_sk(sk); 5134 5135 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 5136 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 5137 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 5138 assocparams.sasoc_cookie_life = 5139 sp->assocparams.sasoc_cookie_life; 5140 assocparams.sasoc_number_peer_destinations = 5141 sp->assocparams. 5142 sasoc_number_peer_destinations; 5143 } 5144 5145 if (put_user(len, optlen)) 5146 return -EFAULT; 5147 5148 if (copy_to_user(optval, &assocparams, len)) 5149 return -EFAULT; 5150 5151 return 0; 5152 } 5153 5154 /* 5155 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 5156 * 5157 * This socket option is a boolean flag which turns on or off mapped V4 5158 * addresses. If this option is turned on and the socket is type 5159 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 5160 * If this option is turned off, then no mapping will be done of V4 5161 * addresses and a user will receive both PF_INET6 and PF_INET type 5162 * addresses on the socket. 5163 */ 5164 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 5165 char __user *optval, int __user *optlen) 5166 { 5167 int val; 5168 struct sctp_sock *sp = sctp_sk(sk); 5169 5170 if (len < sizeof(int)) 5171 return -EINVAL; 5172 5173 len = sizeof(int); 5174 val = sp->v4mapped; 5175 if (put_user(len, optlen)) 5176 return -EFAULT; 5177 if (copy_to_user(optval, &val, len)) 5178 return -EFAULT; 5179 5180 return 0; 5181 } 5182 5183 /* 5184 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 5185 * (chapter and verse is quoted at sctp_setsockopt_context()) 5186 */ 5187 static int sctp_getsockopt_context(struct sock *sk, int len, 5188 char __user *optval, int __user *optlen) 5189 { 5190 struct sctp_assoc_value params; 5191 struct sctp_sock *sp; 5192 struct sctp_association *asoc; 5193 5194 if (len < sizeof(struct sctp_assoc_value)) 5195 return -EINVAL; 5196 5197 len = sizeof(struct sctp_assoc_value); 5198 5199 if (copy_from_user(¶ms, optval, len)) 5200 return -EFAULT; 5201 5202 sp = sctp_sk(sk); 5203 5204 if (params.assoc_id != 0) { 5205 asoc = sctp_id2assoc(sk, params.assoc_id); 5206 if (!asoc) 5207 return -EINVAL; 5208 params.assoc_value = asoc->default_rcv_context; 5209 } else { 5210 params.assoc_value = sp->default_rcv_context; 5211 } 5212 5213 if (put_user(len, optlen)) 5214 return -EFAULT; 5215 if (copy_to_user(optval, ¶ms, len)) 5216 return -EFAULT; 5217 5218 return 0; 5219 } 5220 5221 /* 5222 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 5223 * This option will get or set the maximum size to put in any outgoing 5224 * SCTP DATA chunk. If a message is larger than this size it will be 5225 * fragmented by SCTP into the specified size. Note that the underlying 5226 * SCTP implementation may fragment into smaller sized chunks when the 5227 * PMTU of the underlying association is smaller than the value set by 5228 * the user. The default value for this option is '0' which indicates 5229 * the user is NOT limiting fragmentation and only the PMTU will effect 5230 * SCTP's choice of DATA chunk size. Note also that values set larger 5231 * than the maximum size of an IP datagram will effectively let SCTP 5232 * control fragmentation (i.e. the same as setting this option to 0). 5233 * 5234 * The following structure is used to access and modify this parameter: 5235 * 5236 * struct sctp_assoc_value { 5237 * sctp_assoc_t assoc_id; 5238 * uint32_t assoc_value; 5239 * }; 5240 * 5241 * assoc_id: This parameter is ignored for one-to-one style sockets. 5242 * For one-to-many style sockets this parameter indicates which 5243 * association the user is performing an action upon. Note that if 5244 * this field's value is zero then the endpoints default value is 5245 * changed (effecting future associations only). 5246 * assoc_value: This parameter specifies the maximum size in bytes. 5247 */ 5248 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 5249 char __user *optval, int __user *optlen) 5250 { 5251 struct sctp_assoc_value params; 5252 struct sctp_association *asoc; 5253 5254 if (len == sizeof(int)) { 5255 pr_warn_ratelimited(DEPRECATED 5256 "%s (pid %d) " 5257 "Use of int in maxseg socket option.\n" 5258 "Use struct sctp_assoc_value instead\n", 5259 current->comm, task_pid_nr(current)); 5260 params.assoc_id = 0; 5261 } else if (len >= sizeof(struct sctp_assoc_value)) { 5262 len = sizeof(struct sctp_assoc_value); 5263 if (copy_from_user(¶ms, optval, sizeof(params))) 5264 return -EFAULT; 5265 } else 5266 return -EINVAL; 5267 5268 asoc = sctp_id2assoc(sk, params.assoc_id); 5269 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 5270 return -EINVAL; 5271 5272 if (asoc) 5273 params.assoc_value = asoc->frag_point; 5274 else 5275 params.assoc_value = sctp_sk(sk)->user_frag; 5276 5277 if (put_user(len, optlen)) 5278 return -EFAULT; 5279 if (len == sizeof(int)) { 5280 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5281 return -EFAULT; 5282 } else { 5283 if (copy_to_user(optval, ¶ms, len)) 5284 return -EFAULT; 5285 } 5286 5287 return 0; 5288 } 5289 5290 /* 5291 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 5292 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 5293 */ 5294 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 5295 char __user *optval, int __user *optlen) 5296 { 5297 int val; 5298 5299 if (len < sizeof(int)) 5300 return -EINVAL; 5301 5302 len = sizeof(int); 5303 5304 val = sctp_sk(sk)->frag_interleave; 5305 if (put_user(len, optlen)) 5306 return -EFAULT; 5307 if (copy_to_user(optval, &val, len)) 5308 return -EFAULT; 5309 5310 return 0; 5311 } 5312 5313 /* 5314 * 7.1.25. Set or Get the sctp partial delivery point 5315 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 5316 */ 5317 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 5318 char __user *optval, 5319 int __user *optlen) 5320 { 5321 u32 val; 5322 5323 if (len < sizeof(u32)) 5324 return -EINVAL; 5325 5326 len = sizeof(u32); 5327 5328 val = sctp_sk(sk)->pd_point; 5329 if (put_user(len, optlen)) 5330 return -EFAULT; 5331 if (copy_to_user(optval, &val, len)) 5332 return -EFAULT; 5333 5334 return 0; 5335 } 5336 5337 /* 5338 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 5339 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 5340 */ 5341 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 5342 char __user *optval, 5343 int __user *optlen) 5344 { 5345 struct sctp_assoc_value params; 5346 struct sctp_sock *sp; 5347 struct sctp_association *asoc; 5348 5349 if (len == sizeof(int)) { 5350 pr_warn_ratelimited(DEPRECATED 5351 "%s (pid %d) " 5352 "Use of int in max_burst socket option.\n" 5353 "Use struct sctp_assoc_value instead\n", 5354 current->comm, task_pid_nr(current)); 5355 params.assoc_id = 0; 5356 } else if (len >= sizeof(struct sctp_assoc_value)) { 5357 len = sizeof(struct sctp_assoc_value); 5358 if (copy_from_user(¶ms, optval, len)) 5359 return -EFAULT; 5360 } else 5361 return -EINVAL; 5362 5363 sp = sctp_sk(sk); 5364 5365 if (params.assoc_id != 0) { 5366 asoc = sctp_id2assoc(sk, params.assoc_id); 5367 if (!asoc) 5368 return -EINVAL; 5369 params.assoc_value = asoc->max_burst; 5370 } else 5371 params.assoc_value = sp->max_burst; 5372 5373 if (len == sizeof(int)) { 5374 if (copy_to_user(optval, ¶ms.assoc_value, len)) 5375 return -EFAULT; 5376 } else { 5377 if (copy_to_user(optval, ¶ms, len)) 5378 return -EFAULT; 5379 } 5380 5381 return 0; 5382 5383 } 5384 5385 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5386 char __user *optval, int __user *optlen) 5387 { 5388 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5389 struct sctp_hmacalgo __user *p = (void __user *)optval; 5390 struct sctp_hmac_algo_param *hmacs; 5391 __u16 data_len = 0; 5392 u32 num_idents; 5393 5394 if (!ep->auth_enable) 5395 return -EACCES; 5396 5397 hmacs = ep->auth_hmacs_list; 5398 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); 5399 5400 if (len < sizeof(struct sctp_hmacalgo) + data_len) 5401 return -EINVAL; 5402 5403 len = sizeof(struct sctp_hmacalgo) + data_len; 5404 num_idents = data_len / sizeof(u16); 5405 5406 if (put_user(len, optlen)) 5407 return -EFAULT; 5408 if (put_user(num_idents, &p->shmac_num_idents)) 5409 return -EFAULT; 5410 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5411 return -EFAULT; 5412 return 0; 5413 } 5414 5415 static int sctp_getsockopt_active_key(struct sock *sk, int len, 5416 char __user *optval, int __user *optlen) 5417 { 5418 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5419 struct sctp_authkeyid val; 5420 struct sctp_association *asoc; 5421 5422 if (!ep->auth_enable) 5423 return -EACCES; 5424 5425 if (len < sizeof(struct sctp_authkeyid)) 5426 return -EINVAL; 5427 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5428 return -EFAULT; 5429 5430 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 5431 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 5432 return -EINVAL; 5433 5434 if (asoc) 5435 val.scact_keynumber = asoc->active_key_id; 5436 else 5437 val.scact_keynumber = ep->active_key_id; 5438 5439 len = sizeof(struct sctp_authkeyid); 5440 if (put_user(len, optlen)) 5441 return -EFAULT; 5442 if (copy_to_user(optval, &val, len)) 5443 return -EFAULT; 5444 5445 return 0; 5446 } 5447 5448 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5449 char __user *optval, int __user *optlen) 5450 { 5451 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5452 struct sctp_authchunks __user *p = (void __user *)optval; 5453 struct sctp_authchunks val; 5454 struct sctp_association *asoc; 5455 struct sctp_chunks_param *ch; 5456 u32 num_chunks = 0; 5457 char __user *to; 5458 5459 if (!ep->auth_enable) 5460 return -EACCES; 5461 5462 if (len < sizeof(struct sctp_authchunks)) 5463 return -EINVAL; 5464 5465 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5466 return -EFAULT; 5467 5468 to = p->gauth_chunks; 5469 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5470 if (!asoc) 5471 return -EINVAL; 5472 5473 ch = asoc->peer.peer_chunks; 5474 if (!ch) 5475 goto num; 5476 5477 /* See if the user provided enough room for all the data */ 5478 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5479 if (len < num_chunks) 5480 return -EINVAL; 5481 5482 if (copy_to_user(to, ch->chunks, num_chunks)) 5483 return -EFAULT; 5484 num: 5485 len = sizeof(struct sctp_authchunks) + num_chunks; 5486 if (put_user(len, optlen)) 5487 return -EFAULT; 5488 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5489 return -EFAULT; 5490 return 0; 5491 } 5492 5493 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5494 char __user *optval, int __user *optlen) 5495 { 5496 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 5497 struct sctp_authchunks __user *p = (void __user *)optval; 5498 struct sctp_authchunks val; 5499 struct sctp_association *asoc; 5500 struct sctp_chunks_param *ch; 5501 u32 num_chunks = 0; 5502 char __user *to; 5503 5504 if (!ep->auth_enable) 5505 return -EACCES; 5506 5507 if (len < sizeof(struct sctp_authchunks)) 5508 return -EINVAL; 5509 5510 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 5511 return -EFAULT; 5512 5513 to = p->gauth_chunks; 5514 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 5515 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 5516 return -EINVAL; 5517 5518 if (asoc) 5519 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; 5520 else 5521 ch = ep->auth_chunk_list; 5522 5523 if (!ch) 5524 goto num; 5525 5526 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5527 if (len < sizeof(struct sctp_authchunks) + num_chunks) 5528 return -EINVAL; 5529 5530 if (copy_to_user(to, ch->chunks, num_chunks)) 5531 return -EFAULT; 5532 num: 5533 len = sizeof(struct sctp_authchunks) + num_chunks; 5534 if (put_user(len, optlen)) 5535 return -EFAULT; 5536 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5537 return -EFAULT; 5538 5539 return 0; 5540 } 5541 5542 /* 5543 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 5544 * This option gets the current number of associations that are attached 5545 * to a one-to-many style socket. The option value is an uint32_t. 5546 */ 5547 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 5548 char __user *optval, int __user *optlen) 5549 { 5550 struct sctp_sock *sp = sctp_sk(sk); 5551 struct sctp_association *asoc; 5552 u32 val = 0; 5553 5554 if (sctp_style(sk, TCP)) 5555 return -EOPNOTSUPP; 5556 5557 if (len < sizeof(u32)) 5558 return -EINVAL; 5559 5560 len = sizeof(u32); 5561 5562 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5563 val++; 5564 } 5565 5566 if (put_user(len, optlen)) 5567 return -EFAULT; 5568 if (copy_to_user(optval, &val, len)) 5569 return -EFAULT; 5570 5571 return 0; 5572 } 5573 5574 /* 5575 * 8.1.23 SCTP_AUTO_ASCONF 5576 * See the corresponding setsockopt entry as description 5577 */ 5578 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 5579 char __user *optval, int __user *optlen) 5580 { 5581 int val = 0; 5582 5583 if (len < sizeof(int)) 5584 return -EINVAL; 5585 5586 len = sizeof(int); 5587 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 5588 val = 1; 5589 if (put_user(len, optlen)) 5590 return -EFAULT; 5591 if (copy_to_user(optval, &val, len)) 5592 return -EFAULT; 5593 return 0; 5594 } 5595 5596 /* 5597 * 8.2.6. Get the Current Identifiers of Associations 5598 * (SCTP_GET_ASSOC_ID_LIST) 5599 * 5600 * This option gets the current list of SCTP association identifiers of 5601 * the SCTP associations handled by a one-to-many style socket. 5602 */ 5603 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 5604 char __user *optval, int __user *optlen) 5605 { 5606 struct sctp_sock *sp = sctp_sk(sk); 5607 struct sctp_association *asoc; 5608 struct sctp_assoc_ids *ids; 5609 u32 num = 0; 5610 5611 if (sctp_style(sk, TCP)) 5612 return -EOPNOTSUPP; 5613 5614 if (len < sizeof(struct sctp_assoc_ids)) 5615 return -EINVAL; 5616 5617 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5618 num++; 5619 } 5620 5621 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 5622 return -EINVAL; 5623 5624 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 5625 5626 ids = kmalloc(len, GFP_KERNEL); 5627 if (unlikely(!ids)) 5628 return -ENOMEM; 5629 5630 ids->gaids_number_of_ids = num; 5631 num = 0; 5632 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 5633 ids->gaids_assoc_id[num++] = asoc->assoc_id; 5634 } 5635 5636 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 5637 kfree(ids); 5638 return -EFAULT; 5639 } 5640 5641 kfree(ids); 5642 return 0; 5643 } 5644 5645 /* 5646 * SCTP_PEER_ADDR_THLDS 5647 * 5648 * This option allows us to fetch the partially failed threshold for one or all 5649 * transports in an association. See Section 6.1 of: 5650 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 5651 */ 5652 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 5653 char __user *optval, 5654 int len, 5655 int __user *optlen) 5656 { 5657 struct sctp_paddrthlds val; 5658 struct sctp_transport *trans; 5659 struct sctp_association *asoc; 5660 5661 if (len < sizeof(struct sctp_paddrthlds)) 5662 return -EINVAL; 5663 len = sizeof(struct sctp_paddrthlds); 5664 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 5665 return -EFAULT; 5666 5667 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 5668 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 5669 if (!asoc) 5670 return -ENOENT; 5671 5672 val.spt_pathpfthld = asoc->pf_retrans; 5673 val.spt_pathmaxrxt = asoc->pathmaxrxt; 5674 } else { 5675 trans = sctp_addr_id2transport(sk, &val.spt_address, 5676 val.spt_assoc_id); 5677 if (!trans) 5678 return -ENOENT; 5679 5680 val.spt_pathmaxrxt = trans->pathmaxrxt; 5681 val.spt_pathpfthld = trans->pf_retrans; 5682 } 5683 5684 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 5685 return -EFAULT; 5686 5687 return 0; 5688 } 5689 5690 /* 5691 * SCTP_GET_ASSOC_STATS 5692 * 5693 * This option retrieves local per endpoint statistics. It is modeled 5694 * after OpenSolaris' implementation 5695 */ 5696 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 5697 char __user *optval, 5698 int __user *optlen) 5699 { 5700 struct sctp_assoc_stats sas; 5701 struct sctp_association *asoc = NULL; 5702 5703 /* User must provide at least the assoc id */ 5704 if (len < sizeof(sctp_assoc_t)) 5705 return -EINVAL; 5706 5707 /* Allow the struct to grow and fill in as much as possible */ 5708 len = min_t(size_t, len, sizeof(sas)); 5709 5710 if (copy_from_user(&sas, optval, len)) 5711 return -EFAULT; 5712 5713 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 5714 if (!asoc) 5715 return -EINVAL; 5716 5717 sas.sas_rtxchunks = asoc->stats.rtxchunks; 5718 sas.sas_gapcnt = asoc->stats.gapcnt; 5719 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 5720 sas.sas_osacks = asoc->stats.osacks; 5721 sas.sas_isacks = asoc->stats.isacks; 5722 sas.sas_octrlchunks = asoc->stats.octrlchunks; 5723 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 5724 sas.sas_oodchunks = asoc->stats.oodchunks; 5725 sas.sas_iodchunks = asoc->stats.iodchunks; 5726 sas.sas_ouodchunks = asoc->stats.ouodchunks; 5727 sas.sas_iuodchunks = asoc->stats.iuodchunks; 5728 sas.sas_idupchunks = asoc->stats.idupchunks; 5729 sas.sas_opackets = asoc->stats.opackets; 5730 sas.sas_ipackets = asoc->stats.ipackets; 5731 5732 /* New high max rto observed, will return 0 if not a single 5733 * RTO update took place. obs_rto_ipaddr will be bogus 5734 * in such a case 5735 */ 5736 sas.sas_maxrto = asoc->stats.max_obs_rto; 5737 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 5738 sizeof(struct sockaddr_storage)); 5739 5740 /* Mark beginning of a new observation period */ 5741 asoc->stats.max_obs_rto = asoc->rto_min; 5742 5743 if (put_user(len, optlen)) 5744 return -EFAULT; 5745 5746 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 5747 5748 if (copy_to_user(optval, &sas, len)) 5749 return -EFAULT; 5750 5751 return 0; 5752 } 5753 5754 static int sctp_getsockopt(struct sock *sk, int level, int optname, 5755 char __user *optval, int __user *optlen) 5756 { 5757 int retval = 0; 5758 int len; 5759 5760 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 5761 5762 /* I can hardly begin to describe how wrong this is. This is 5763 * so broken as to be worse than useless. The API draft 5764 * REALLY is NOT helpful here... I am not convinced that the 5765 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 5766 * are at all well-founded. 5767 */ 5768 if (level != SOL_SCTP) { 5769 struct sctp_af *af = sctp_sk(sk)->pf->af; 5770 5771 retval = af->getsockopt(sk, level, optname, optval, optlen); 5772 return retval; 5773 } 5774 5775 if (get_user(len, optlen)) 5776 return -EFAULT; 5777 5778 lock_sock(sk); 5779 5780 switch (optname) { 5781 case SCTP_STATUS: 5782 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 5783 break; 5784 case SCTP_DISABLE_FRAGMENTS: 5785 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 5786 optlen); 5787 break; 5788 case SCTP_EVENTS: 5789 retval = sctp_getsockopt_events(sk, len, optval, optlen); 5790 break; 5791 case SCTP_AUTOCLOSE: 5792 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 5793 break; 5794 case SCTP_SOCKOPT_PEELOFF: 5795 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 5796 break; 5797 case SCTP_PEER_ADDR_PARAMS: 5798 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5799 optlen); 5800 break; 5801 case SCTP_DELAYED_SACK: 5802 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 5803 optlen); 5804 break; 5805 case SCTP_INITMSG: 5806 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 5807 break; 5808 case SCTP_GET_PEER_ADDRS: 5809 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 5810 optlen); 5811 break; 5812 case SCTP_GET_LOCAL_ADDRS: 5813 retval = sctp_getsockopt_local_addrs(sk, len, optval, 5814 optlen); 5815 break; 5816 case SCTP_SOCKOPT_CONNECTX3: 5817 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 5818 break; 5819 case SCTP_DEFAULT_SEND_PARAM: 5820 retval = sctp_getsockopt_default_send_param(sk, len, 5821 optval, optlen); 5822 break; 5823 case SCTP_PRIMARY_ADDR: 5824 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 5825 break; 5826 case SCTP_NODELAY: 5827 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 5828 break; 5829 case SCTP_RTOINFO: 5830 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 5831 break; 5832 case SCTP_ASSOCINFO: 5833 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 5834 break; 5835 case SCTP_I_WANT_MAPPED_V4_ADDR: 5836 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 5837 break; 5838 case SCTP_MAXSEG: 5839 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 5840 break; 5841 case SCTP_GET_PEER_ADDR_INFO: 5842 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 5843 optlen); 5844 break; 5845 case SCTP_ADAPTATION_LAYER: 5846 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 5847 optlen); 5848 break; 5849 case SCTP_CONTEXT: 5850 retval = sctp_getsockopt_context(sk, len, optval, optlen); 5851 break; 5852 case SCTP_FRAGMENT_INTERLEAVE: 5853 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 5854 optlen); 5855 break; 5856 case SCTP_PARTIAL_DELIVERY_POINT: 5857 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 5858 optlen); 5859 break; 5860 case SCTP_MAX_BURST: 5861 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 5862 break; 5863 case SCTP_AUTH_KEY: 5864 case SCTP_AUTH_CHUNK: 5865 case SCTP_AUTH_DELETE_KEY: 5866 retval = -EOPNOTSUPP; 5867 break; 5868 case SCTP_HMAC_IDENT: 5869 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 5870 break; 5871 case SCTP_AUTH_ACTIVE_KEY: 5872 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 5873 break; 5874 case SCTP_PEER_AUTH_CHUNKS: 5875 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 5876 optlen); 5877 break; 5878 case SCTP_LOCAL_AUTH_CHUNKS: 5879 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 5880 optlen); 5881 break; 5882 case SCTP_GET_ASSOC_NUMBER: 5883 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5884 break; 5885 case SCTP_GET_ASSOC_ID_LIST: 5886 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 5887 break; 5888 case SCTP_AUTO_ASCONF: 5889 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 5890 break; 5891 case SCTP_PEER_ADDR_THLDS: 5892 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 5893 break; 5894 case SCTP_GET_ASSOC_STATS: 5895 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 5896 break; 5897 default: 5898 retval = -ENOPROTOOPT; 5899 break; 5900 } 5901 5902 release_sock(sk); 5903 return retval; 5904 } 5905 5906 static void sctp_hash(struct sock *sk) 5907 { 5908 /* STUB */ 5909 } 5910 5911 static void sctp_unhash(struct sock *sk) 5912 { 5913 /* STUB */ 5914 } 5915 5916 /* Check if port is acceptable. Possibly find first available port. 5917 * 5918 * The port hash table (contained in the 'global' SCTP protocol storage 5919 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 5920 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 5921 * list (the list number is the port number hashed out, so as you 5922 * would expect from a hash function, all the ports in a given list have 5923 * such a number that hashes out to the same list number; you were 5924 * expecting that, right?); so each list has a set of ports, with a 5925 * link to the socket (struct sock) that uses it, the port number and 5926 * a fastreuse flag (FIXME: NPI ipg). 5927 */ 5928 static struct sctp_bind_bucket *sctp_bucket_create( 5929 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 5930 5931 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 5932 { 5933 struct sctp_bind_hashbucket *head; /* hash list */ 5934 struct sctp_bind_bucket *pp; 5935 unsigned short snum; 5936 int ret; 5937 5938 snum = ntohs(addr->v4.sin_port); 5939 5940 pr_debug("%s: begins, snum:%d\n", __func__, snum); 5941 5942 local_bh_disable(); 5943 5944 if (snum == 0) { 5945 /* Search for an available port. */ 5946 int low, high, remaining, index; 5947 unsigned int rover; 5948 5949 inet_get_local_port_range(sock_net(sk), &low, &high); 5950 remaining = (high - low) + 1; 5951 rover = prandom_u32() % remaining + low; 5952 5953 do { 5954 rover++; 5955 if ((rover < low) || (rover > high)) 5956 rover = low; 5957 if (inet_is_reserved_local_port(rover)) 5958 continue; 5959 index = sctp_phashfn(sock_net(sk), rover); 5960 head = &sctp_port_hashtable[index]; 5961 spin_lock(&head->lock); 5962 sctp_for_each_hentry(pp, &head->chain) 5963 if ((pp->port == rover) && 5964 net_eq(sock_net(sk), pp->net)) 5965 goto next; 5966 break; 5967 next: 5968 spin_unlock(&head->lock); 5969 } while (--remaining > 0); 5970 5971 /* Exhausted local port range during search? */ 5972 ret = 1; 5973 if (remaining <= 0) 5974 goto fail; 5975 5976 /* OK, here is the one we will use. HEAD (the port 5977 * hash table list entry) is non-NULL and we hold it's 5978 * mutex. 5979 */ 5980 snum = rover; 5981 } else { 5982 /* We are given an specific port number; we verify 5983 * that it is not being used. If it is used, we will 5984 * exahust the search in the hash list corresponding 5985 * to the port number (snum) - we detect that with the 5986 * port iterator, pp being NULL. 5987 */ 5988 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 5989 spin_lock(&head->lock); 5990 sctp_for_each_hentry(pp, &head->chain) { 5991 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 5992 goto pp_found; 5993 } 5994 } 5995 pp = NULL; 5996 goto pp_not_found; 5997 pp_found: 5998 if (!hlist_empty(&pp->owner)) { 5999 /* We had a port hash table hit - there is an 6000 * available port (pp != NULL) and it is being 6001 * used by other socket (pp->owner not empty); that other 6002 * socket is going to be sk2. 6003 */ 6004 int reuse = sk->sk_reuse; 6005 struct sock *sk2; 6006 6007 pr_debug("%s: found a possible match\n", __func__); 6008 6009 if (pp->fastreuse && sk->sk_reuse && 6010 sk->sk_state != SCTP_SS_LISTENING) 6011 goto success; 6012 6013 /* Run through the list of sockets bound to the port 6014 * (pp->port) [via the pointers bind_next and 6015 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 6016 * we get the endpoint they describe and run through 6017 * the endpoint's list of IP (v4 or v6) addresses, 6018 * comparing each of the addresses with the address of 6019 * the socket sk. If we find a match, then that means 6020 * that this port/socket (sk) combination are already 6021 * in an endpoint. 6022 */ 6023 sk_for_each_bound(sk2, &pp->owner) { 6024 struct sctp_endpoint *ep2; 6025 ep2 = sctp_sk(sk2)->ep; 6026 6027 if (sk == sk2 || 6028 (reuse && sk2->sk_reuse && 6029 sk2->sk_state != SCTP_SS_LISTENING)) 6030 continue; 6031 6032 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 6033 sctp_sk(sk2), sctp_sk(sk))) { 6034 ret = (long)sk2; 6035 goto fail_unlock; 6036 } 6037 } 6038 6039 pr_debug("%s: found a match\n", __func__); 6040 } 6041 pp_not_found: 6042 /* If there was a hash table miss, create a new port. */ 6043 ret = 1; 6044 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 6045 goto fail_unlock; 6046 6047 /* In either case (hit or miss), make sure fastreuse is 1 only 6048 * if sk->sk_reuse is too (that is, if the caller requested 6049 * SO_REUSEADDR on this socket -sk-). 6050 */ 6051 if (hlist_empty(&pp->owner)) { 6052 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 6053 pp->fastreuse = 1; 6054 else 6055 pp->fastreuse = 0; 6056 } else if (pp->fastreuse && 6057 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 6058 pp->fastreuse = 0; 6059 6060 /* We are set, so fill up all the data in the hash table 6061 * entry, tie the socket list information with the rest of the 6062 * sockets FIXME: Blurry, NPI (ipg). 6063 */ 6064 success: 6065 if (!sctp_sk(sk)->bind_hash) { 6066 inet_sk(sk)->inet_num = snum; 6067 sk_add_bind_node(sk, &pp->owner); 6068 sctp_sk(sk)->bind_hash = pp; 6069 } 6070 ret = 0; 6071 6072 fail_unlock: 6073 spin_unlock(&head->lock); 6074 6075 fail: 6076 local_bh_enable(); 6077 return ret; 6078 } 6079 6080 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 6081 * port is requested. 6082 */ 6083 static int sctp_get_port(struct sock *sk, unsigned short snum) 6084 { 6085 union sctp_addr addr; 6086 struct sctp_af *af = sctp_sk(sk)->pf->af; 6087 6088 /* Set up a dummy address struct from the sk. */ 6089 af->from_sk(&addr, sk); 6090 addr.v4.sin_port = htons(snum); 6091 6092 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 6093 return !!sctp_get_port_local(sk, &addr); 6094 } 6095 6096 /* 6097 * Move a socket to LISTENING state. 6098 */ 6099 static int sctp_listen_start(struct sock *sk, int backlog) 6100 { 6101 struct sctp_sock *sp = sctp_sk(sk); 6102 struct sctp_endpoint *ep = sp->ep; 6103 struct crypto_hash *tfm = NULL; 6104 char alg[32]; 6105 6106 /* Allocate HMAC for generating cookie. */ 6107 if (!sp->hmac && sp->sctp_hmac_alg) { 6108 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 6109 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 6110 if (IS_ERR(tfm)) { 6111 net_info_ratelimited("failed to load transform for %s: %ld\n", 6112 sp->sctp_hmac_alg, PTR_ERR(tfm)); 6113 return -ENOSYS; 6114 } 6115 sctp_sk(sk)->hmac = tfm; 6116 } 6117 6118 /* 6119 * If a bind() or sctp_bindx() is not called prior to a listen() 6120 * call that allows new associations to be accepted, the system 6121 * picks an ephemeral port and will choose an address set equivalent 6122 * to binding with a wildcard address. 6123 * 6124 * This is not currently spelled out in the SCTP sockets 6125 * extensions draft, but follows the practice as seen in TCP 6126 * sockets. 6127 * 6128 */ 6129 sk->sk_state = SCTP_SS_LISTENING; 6130 if (!ep->base.bind_addr.port) { 6131 if (sctp_autobind(sk)) 6132 return -EAGAIN; 6133 } else { 6134 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 6135 sk->sk_state = SCTP_SS_CLOSED; 6136 return -EADDRINUSE; 6137 } 6138 } 6139 6140 sk->sk_max_ack_backlog = backlog; 6141 sctp_hash_endpoint(ep); 6142 return 0; 6143 } 6144 6145 /* 6146 * 4.1.3 / 5.1.3 listen() 6147 * 6148 * By default, new associations are not accepted for UDP style sockets. 6149 * An application uses listen() to mark a socket as being able to 6150 * accept new associations. 6151 * 6152 * On TCP style sockets, applications use listen() to ready the SCTP 6153 * endpoint for accepting inbound associations. 6154 * 6155 * On both types of endpoints a backlog of '0' disables listening. 6156 * 6157 * Move a socket to LISTENING state. 6158 */ 6159 int sctp_inet_listen(struct socket *sock, int backlog) 6160 { 6161 struct sock *sk = sock->sk; 6162 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6163 int err = -EINVAL; 6164 6165 if (unlikely(backlog < 0)) 6166 return err; 6167 6168 lock_sock(sk); 6169 6170 /* Peeled-off sockets are not allowed to listen(). */ 6171 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 6172 goto out; 6173 6174 if (sock->state != SS_UNCONNECTED) 6175 goto out; 6176 6177 /* If backlog is zero, disable listening. */ 6178 if (!backlog) { 6179 if (sctp_sstate(sk, CLOSED)) 6180 goto out; 6181 6182 err = 0; 6183 sctp_unhash_endpoint(ep); 6184 sk->sk_state = SCTP_SS_CLOSED; 6185 if (sk->sk_reuse) 6186 sctp_sk(sk)->bind_hash->fastreuse = 1; 6187 goto out; 6188 } 6189 6190 /* If we are already listening, just update the backlog */ 6191 if (sctp_sstate(sk, LISTENING)) 6192 sk->sk_max_ack_backlog = backlog; 6193 else { 6194 err = sctp_listen_start(sk, backlog); 6195 if (err) 6196 goto out; 6197 } 6198 6199 err = 0; 6200 out: 6201 release_sock(sk); 6202 return err; 6203 } 6204 6205 /* 6206 * This function is done by modeling the current datagram_poll() and the 6207 * tcp_poll(). Note that, based on these implementations, we don't 6208 * lock the socket in this function, even though it seems that, 6209 * ideally, locking or some other mechanisms can be used to ensure 6210 * the integrity of the counters (sndbuf and wmem_alloc) used 6211 * in this place. We assume that we don't need locks either until proven 6212 * otherwise. 6213 * 6214 * Another thing to note is that we include the Async I/O support 6215 * here, again, by modeling the current TCP/UDP code. We don't have 6216 * a good way to test with it yet. 6217 */ 6218 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 6219 { 6220 struct sock *sk = sock->sk; 6221 struct sctp_sock *sp = sctp_sk(sk); 6222 unsigned int mask; 6223 6224 poll_wait(file, sk_sleep(sk), wait); 6225 6226 /* A TCP-style listening socket becomes readable when the accept queue 6227 * is not empty. 6228 */ 6229 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 6230 return (!list_empty(&sp->ep->asocs)) ? 6231 (POLLIN | POLLRDNORM) : 0; 6232 6233 mask = 0; 6234 6235 /* Is there any exceptional events? */ 6236 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6237 mask |= POLLERR | 6238 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 6239 if (sk->sk_shutdown & RCV_SHUTDOWN) 6240 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6241 if (sk->sk_shutdown == SHUTDOWN_MASK) 6242 mask |= POLLHUP; 6243 6244 /* Is it readable? Reconsider this code with TCP-style support. */ 6245 if (!skb_queue_empty(&sk->sk_receive_queue)) 6246 mask |= POLLIN | POLLRDNORM; 6247 6248 /* The association is either gone or not ready. */ 6249 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 6250 return mask; 6251 6252 /* Is it writable? */ 6253 if (sctp_writeable(sk)) { 6254 mask |= POLLOUT | POLLWRNORM; 6255 } else { 6256 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6257 /* 6258 * Since the socket is not locked, the buffer 6259 * might be made available after the writeable check and 6260 * before the bit is set. This could cause a lost I/O 6261 * signal. tcp_poll() has a race breaker for this race 6262 * condition. Based on their implementation, we put 6263 * in the following code to cover it as well. 6264 */ 6265 if (sctp_writeable(sk)) 6266 mask |= POLLOUT | POLLWRNORM; 6267 } 6268 return mask; 6269 } 6270 6271 /******************************************************************** 6272 * 2nd Level Abstractions 6273 ********************************************************************/ 6274 6275 static struct sctp_bind_bucket *sctp_bucket_create( 6276 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 6277 { 6278 struct sctp_bind_bucket *pp; 6279 6280 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 6281 if (pp) { 6282 SCTP_DBG_OBJCNT_INC(bind_bucket); 6283 pp->port = snum; 6284 pp->fastreuse = 0; 6285 INIT_HLIST_HEAD(&pp->owner); 6286 pp->net = net; 6287 hlist_add_head(&pp->node, &head->chain); 6288 } 6289 return pp; 6290 } 6291 6292 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 6293 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 6294 { 6295 if (pp && hlist_empty(&pp->owner)) { 6296 __hlist_del(&pp->node); 6297 kmem_cache_free(sctp_bucket_cachep, pp); 6298 SCTP_DBG_OBJCNT_DEC(bind_bucket); 6299 } 6300 } 6301 6302 /* Release this socket's reference to a local port. */ 6303 static inline void __sctp_put_port(struct sock *sk) 6304 { 6305 struct sctp_bind_hashbucket *head = 6306 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 6307 inet_sk(sk)->inet_num)]; 6308 struct sctp_bind_bucket *pp; 6309 6310 spin_lock(&head->lock); 6311 pp = sctp_sk(sk)->bind_hash; 6312 __sk_del_bind_node(sk); 6313 sctp_sk(sk)->bind_hash = NULL; 6314 inet_sk(sk)->inet_num = 0; 6315 sctp_bucket_destroy(pp); 6316 spin_unlock(&head->lock); 6317 } 6318 6319 void sctp_put_port(struct sock *sk) 6320 { 6321 local_bh_disable(); 6322 __sctp_put_port(sk); 6323 local_bh_enable(); 6324 } 6325 6326 /* 6327 * The system picks an ephemeral port and choose an address set equivalent 6328 * to binding with a wildcard address. 6329 * One of those addresses will be the primary address for the association. 6330 * This automatically enables the multihoming capability of SCTP. 6331 */ 6332 static int sctp_autobind(struct sock *sk) 6333 { 6334 union sctp_addr autoaddr; 6335 struct sctp_af *af; 6336 __be16 port; 6337 6338 /* Initialize a local sockaddr structure to INADDR_ANY. */ 6339 af = sctp_sk(sk)->pf->af; 6340 6341 port = htons(inet_sk(sk)->inet_num); 6342 af->inaddr_any(&autoaddr, port); 6343 6344 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 6345 } 6346 6347 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 6348 * 6349 * From RFC 2292 6350 * 4.2 The cmsghdr Structure * 6351 * 6352 * When ancillary data is sent or received, any number of ancillary data 6353 * objects can be specified by the msg_control and msg_controllen members of 6354 * the msghdr structure, because each object is preceded by 6355 * a cmsghdr structure defining the object's length (the cmsg_len member). 6356 * Historically Berkeley-derived implementations have passed only one object 6357 * at a time, but this API allows multiple objects to be 6358 * passed in a single call to sendmsg() or recvmsg(). The following example 6359 * shows two ancillary data objects in a control buffer. 6360 * 6361 * |<--------------------------- msg_controllen -------------------------->| 6362 * | | 6363 * 6364 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 6365 * 6366 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 6367 * | | | 6368 * 6369 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 6370 * 6371 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 6372 * | | | | | 6373 * 6374 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6375 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 6376 * 6377 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 6378 * 6379 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 6380 * ^ 6381 * | 6382 * 6383 * msg_control 6384 * points here 6385 */ 6386 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) 6387 { 6388 struct cmsghdr *cmsg; 6389 struct msghdr *my_msg = (struct msghdr *)msg; 6390 6391 for (cmsg = CMSG_FIRSTHDR(msg); 6392 cmsg != NULL; 6393 cmsg = CMSG_NXTHDR(my_msg, cmsg)) { 6394 if (!CMSG_OK(my_msg, cmsg)) 6395 return -EINVAL; 6396 6397 /* Should we parse this header or ignore? */ 6398 if (cmsg->cmsg_level != IPPROTO_SCTP) 6399 continue; 6400 6401 /* Strictly check lengths following example in SCM code. */ 6402 switch (cmsg->cmsg_type) { 6403 case SCTP_INIT: 6404 /* SCTP Socket API Extension 6405 * 5.2.1 SCTP Initiation Structure (SCTP_INIT) 6406 * 6407 * This cmsghdr structure provides information for 6408 * initializing new SCTP associations with sendmsg(). 6409 * The SCTP_INITMSG socket option uses this same data 6410 * structure. This structure is not used for 6411 * recvmsg(). 6412 * 6413 * cmsg_level cmsg_type cmsg_data[] 6414 * ------------ ------------ ---------------------- 6415 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 6416 */ 6417 if (cmsg->cmsg_len != 6418 CMSG_LEN(sizeof(struct sctp_initmsg))) 6419 return -EINVAL; 6420 cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg); 6421 break; 6422 6423 case SCTP_SNDRCV: 6424 /* SCTP Socket API Extension 6425 * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV) 6426 * 6427 * This cmsghdr structure specifies SCTP options for 6428 * sendmsg() and describes SCTP header information 6429 * about a received message through recvmsg(). 6430 * 6431 * cmsg_level cmsg_type cmsg_data[] 6432 * ------------ ------------ ---------------------- 6433 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 6434 */ 6435 if (cmsg->cmsg_len != 6436 CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 6437 return -EINVAL; 6438 6439 cmsgs->info = 6440 (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 6441 6442 /* Minimally, validate the sinfo_flags. */ 6443 if (cmsgs->info->sinfo_flags & 6444 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 6445 SCTP_ABORT | SCTP_EOF)) 6446 return -EINVAL; 6447 break; 6448 6449 default: 6450 return -EINVAL; 6451 } 6452 } 6453 return 0; 6454 } 6455 6456 /* 6457 * Wait for a packet.. 6458 * Note: This function is the same function as in core/datagram.c 6459 * with a few modifications to make lksctp work. 6460 */ 6461 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) 6462 { 6463 int error; 6464 DEFINE_WAIT(wait); 6465 6466 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6467 6468 /* Socket errors? */ 6469 error = sock_error(sk); 6470 if (error) 6471 goto out; 6472 6473 if (!skb_queue_empty(&sk->sk_receive_queue)) 6474 goto ready; 6475 6476 /* Socket shut down? */ 6477 if (sk->sk_shutdown & RCV_SHUTDOWN) 6478 goto out; 6479 6480 /* Sequenced packets can come disconnected. If so we report the 6481 * problem. 6482 */ 6483 error = -ENOTCONN; 6484 6485 /* Is there a good reason to think that we may receive some data? */ 6486 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 6487 goto out; 6488 6489 /* Handle signals. */ 6490 if (signal_pending(current)) 6491 goto interrupted; 6492 6493 /* Let another process have a go. Since we are going to sleep 6494 * anyway. Note: This may cause odd behaviors if the message 6495 * does not fit in the user's buffer, but this seems to be the 6496 * only way to honor MSG_DONTWAIT realistically. 6497 */ 6498 release_sock(sk); 6499 *timeo_p = schedule_timeout(*timeo_p); 6500 lock_sock(sk); 6501 6502 ready: 6503 finish_wait(sk_sleep(sk), &wait); 6504 return 0; 6505 6506 interrupted: 6507 error = sock_intr_errno(*timeo_p); 6508 6509 out: 6510 finish_wait(sk_sleep(sk), &wait); 6511 *err = error; 6512 return error; 6513 } 6514 6515 /* Receive a datagram. 6516 * Note: This is pretty much the same routine as in core/datagram.c 6517 * with a few changes to make lksctp work. 6518 */ 6519 static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 6520 int noblock, int *err) 6521 { 6522 int error; 6523 struct sk_buff *skb; 6524 long timeo; 6525 6526 timeo = sock_rcvtimeo(sk, noblock); 6527 6528 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 6529 MAX_SCHEDULE_TIMEOUT); 6530 6531 do { 6532 /* Again only user level code calls this function, 6533 * so nothing interrupt level 6534 * will suddenly eat the receive_queue. 6535 * 6536 * Look at current nfs client by the way... 6537 * However, this function was correct in any case. 8) 6538 */ 6539 if (flags & MSG_PEEK) { 6540 spin_lock_bh(&sk->sk_receive_queue.lock); 6541 skb = skb_peek(&sk->sk_receive_queue); 6542 if (skb) 6543 atomic_inc(&skb->users); 6544 spin_unlock_bh(&sk->sk_receive_queue.lock); 6545 } else { 6546 skb = skb_dequeue(&sk->sk_receive_queue); 6547 } 6548 6549 if (skb) 6550 return skb; 6551 6552 /* Caller is allowed not to check sk->sk_err before calling. */ 6553 error = sock_error(sk); 6554 if (error) 6555 goto no_packet; 6556 6557 if (sk->sk_shutdown & RCV_SHUTDOWN) 6558 break; 6559 6560 /* User doesn't want to wait. */ 6561 error = -EAGAIN; 6562 if (!timeo) 6563 goto no_packet; 6564 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 6565 6566 return NULL; 6567 6568 no_packet: 6569 *err = error; 6570 return NULL; 6571 } 6572 6573 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 6574 static void __sctp_write_space(struct sctp_association *asoc) 6575 { 6576 struct sock *sk = asoc->base.sk; 6577 struct socket *sock = sk->sk_socket; 6578 6579 if ((sctp_wspace(asoc) > 0) && sock) { 6580 if (waitqueue_active(&asoc->wait)) 6581 wake_up_interruptible(&asoc->wait); 6582 6583 if (sctp_writeable(sk)) { 6584 wait_queue_head_t *wq = sk_sleep(sk); 6585 6586 if (wq && waitqueue_active(wq)) 6587 wake_up_interruptible(wq); 6588 6589 /* Note that we try to include the Async I/O support 6590 * here by modeling from the current TCP/UDP code. 6591 * We have not tested with it yet. 6592 */ 6593 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6594 sock_wake_async(sock, 6595 SOCK_WAKE_SPACE, POLL_OUT); 6596 } 6597 } 6598 } 6599 6600 static void sctp_wake_up_waiters(struct sock *sk, 6601 struct sctp_association *asoc) 6602 { 6603 struct sctp_association *tmp = asoc; 6604 6605 /* We do accounting for the sndbuf space per association, 6606 * so we only need to wake our own association. 6607 */ 6608 if (asoc->ep->sndbuf_policy) 6609 return __sctp_write_space(asoc); 6610 6611 /* If association goes down and is just flushing its 6612 * outq, then just normally notify others. 6613 */ 6614 if (asoc->base.dead) 6615 return sctp_write_space(sk); 6616 6617 /* Accounting for the sndbuf space is per socket, so we 6618 * need to wake up others, try to be fair and in case of 6619 * other associations, let them have a go first instead 6620 * of just doing a sctp_write_space() call. 6621 * 6622 * Note that we reach sctp_wake_up_waiters() only when 6623 * associations free up queued chunks, thus we are under 6624 * lock and the list of associations on a socket is 6625 * guaranteed not to change. 6626 */ 6627 for (tmp = list_next_entry(tmp, asocs); 1; 6628 tmp = list_next_entry(tmp, asocs)) { 6629 /* Manually skip the head element. */ 6630 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) 6631 continue; 6632 /* Wake up association. */ 6633 __sctp_write_space(tmp); 6634 /* We've reached the end. */ 6635 if (tmp == asoc) 6636 break; 6637 } 6638 } 6639 6640 /* Do accounting for the sndbuf space. 6641 * Decrement the used sndbuf space of the corresponding association by the 6642 * data size which was just transmitted(freed). 6643 */ 6644 static void sctp_wfree(struct sk_buff *skb) 6645 { 6646 struct sctp_association *asoc; 6647 struct sctp_chunk *chunk; 6648 struct sock *sk; 6649 6650 /* Get the saved chunk pointer. */ 6651 chunk = *((struct sctp_chunk **)(skb->cb)); 6652 asoc = chunk->asoc; 6653 sk = asoc->base.sk; 6654 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 6655 sizeof(struct sk_buff) + 6656 sizeof(struct sctp_chunk); 6657 6658 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 6659 6660 /* 6661 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 6662 */ 6663 sk->sk_wmem_queued -= skb->truesize; 6664 sk_mem_uncharge(sk, skb->truesize); 6665 6666 sock_wfree(skb); 6667 sctp_wake_up_waiters(sk, asoc); 6668 6669 sctp_association_put(asoc); 6670 } 6671 6672 /* Do accounting for the receive space on the socket. 6673 * Accounting for the association is done in ulpevent.c 6674 * We set this as a destructor for the cloned data skbs so that 6675 * accounting is done at the correct time. 6676 */ 6677 void sctp_sock_rfree(struct sk_buff *skb) 6678 { 6679 struct sock *sk = skb->sk; 6680 struct sctp_ulpevent *event = sctp_skb2event(skb); 6681 6682 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 6683 6684 /* 6685 * Mimic the behavior of sock_rfree 6686 */ 6687 sk_mem_uncharge(sk, event->rmem_len); 6688 } 6689 6690 6691 /* Helper function to wait for space in the sndbuf. */ 6692 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 6693 size_t msg_len) 6694 { 6695 struct sock *sk = asoc->base.sk; 6696 int err = 0; 6697 long current_timeo = *timeo_p; 6698 DEFINE_WAIT(wait); 6699 6700 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 6701 *timeo_p, msg_len); 6702 6703 /* Increment the association's refcnt. */ 6704 sctp_association_hold(asoc); 6705 6706 /* Wait on the association specific sndbuf space. */ 6707 for (;;) { 6708 prepare_to_wait_exclusive(&asoc->wait, &wait, 6709 TASK_INTERRUPTIBLE); 6710 if (!*timeo_p) 6711 goto do_nonblock; 6712 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6713 asoc->base.dead) 6714 goto do_error; 6715 if (signal_pending(current)) 6716 goto do_interrupted; 6717 if (msg_len <= sctp_wspace(asoc)) 6718 break; 6719 6720 /* Let another process have a go. Since we are going 6721 * to sleep anyway. 6722 */ 6723 release_sock(sk); 6724 current_timeo = schedule_timeout(current_timeo); 6725 BUG_ON(sk != asoc->base.sk); 6726 lock_sock(sk); 6727 6728 *timeo_p = current_timeo; 6729 } 6730 6731 out: 6732 finish_wait(&asoc->wait, &wait); 6733 6734 /* Release the association's refcnt. */ 6735 sctp_association_put(asoc); 6736 6737 return err; 6738 6739 do_error: 6740 err = -EPIPE; 6741 goto out; 6742 6743 do_interrupted: 6744 err = sock_intr_errno(*timeo_p); 6745 goto out; 6746 6747 do_nonblock: 6748 err = -EAGAIN; 6749 goto out; 6750 } 6751 6752 void sctp_data_ready(struct sock *sk) 6753 { 6754 struct socket_wq *wq; 6755 6756 rcu_read_lock(); 6757 wq = rcu_dereference(sk->sk_wq); 6758 if (wq_has_sleeper(wq)) 6759 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 6760 POLLRDNORM | POLLRDBAND); 6761 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6762 rcu_read_unlock(); 6763 } 6764 6765 /* If socket sndbuf has changed, wake up all per association waiters. */ 6766 void sctp_write_space(struct sock *sk) 6767 { 6768 struct sctp_association *asoc; 6769 6770 /* Wake up the tasks in each wait queue. */ 6771 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 6772 __sctp_write_space(asoc); 6773 } 6774 } 6775 6776 /* Is there any sndbuf space available on the socket? 6777 * 6778 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 6779 * associations on the same socket. For a UDP-style socket with 6780 * multiple associations, it is possible for it to be "unwriteable" 6781 * prematurely. I assume that this is acceptable because 6782 * a premature "unwriteable" is better than an accidental "writeable" which 6783 * would cause an unwanted block under certain circumstances. For the 1-1 6784 * UDP-style sockets or TCP-style sockets, this code should work. 6785 * - Daisy 6786 */ 6787 static int sctp_writeable(struct sock *sk) 6788 { 6789 int amt = 0; 6790 6791 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 6792 if (amt < 0) 6793 amt = 0; 6794 return amt; 6795 } 6796 6797 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 6798 * returns immediately with EINPROGRESS. 6799 */ 6800 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 6801 { 6802 struct sock *sk = asoc->base.sk; 6803 int err = 0; 6804 long current_timeo = *timeo_p; 6805 DEFINE_WAIT(wait); 6806 6807 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 6808 6809 /* Increment the association's refcnt. */ 6810 sctp_association_hold(asoc); 6811 6812 for (;;) { 6813 prepare_to_wait_exclusive(&asoc->wait, &wait, 6814 TASK_INTERRUPTIBLE); 6815 if (!*timeo_p) 6816 goto do_nonblock; 6817 if (sk->sk_shutdown & RCV_SHUTDOWN) 6818 break; 6819 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 6820 asoc->base.dead) 6821 goto do_error; 6822 if (signal_pending(current)) 6823 goto do_interrupted; 6824 6825 if (sctp_state(asoc, ESTABLISHED)) 6826 break; 6827 6828 /* Let another process have a go. Since we are going 6829 * to sleep anyway. 6830 */ 6831 release_sock(sk); 6832 current_timeo = schedule_timeout(current_timeo); 6833 lock_sock(sk); 6834 6835 *timeo_p = current_timeo; 6836 } 6837 6838 out: 6839 finish_wait(&asoc->wait, &wait); 6840 6841 /* Release the association's refcnt. */ 6842 sctp_association_put(asoc); 6843 6844 return err; 6845 6846 do_error: 6847 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 6848 err = -ETIMEDOUT; 6849 else 6850 err = -ECONNREFUSED; 6851 goto out; 6852 6853 do_interrupted: 6854 err = sock_intr_errno(*timeo_p); 6855 goto out; 6856 6857 do_nonblock: 6858 err = -EINPROGRESS; 6859 goto out; 6860 } 6861 6862 static int sctp_wait_for_accept(struct sock *sk, long timeo) 6863 { 6864 struct sctp_endpoint *ep; 6865 int err = 0; 6866 DEFINE_WAIT(wait); 6867 6868 ep = sctp_sk(sk)->ep; 6869 6870 6871 for (;;) { 6872 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 6873 TASK_INTERRUPTIBLE); 6874 6875 if (list_empty(&ep->asocs)) { 6876 release_sock(sk); 6877 timeo = schedule_timeout(timeo); 6878 lock_sock(sk); 6879 } 6880 6881 err = -EINVAL; 6882 if (!sctp_sstate(sk, LISTENING)) 6883 break; 6884 6885 err = 0; 6886 if (!list_empty(&ep->asocs)) 6887 break; 6888 6889 err = sock_intr_errno(timeo); 6890 if (signal_pending(current)) 6891 break; 6892 6893 err = -EAGAIN; 6894 if (!timeo) 6895 break; 6896 } 6897 6898 finish_wait(sk_sleep(sk), &wait); 6899 6900 return err; 6901 } 6902 6903 static void sctp_wait_for_close(struct sock *sk, long timeout) 6904 { 6905 DEFINE_WAIT(wait); 6906 6907 do { 6908 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 6909 if (list_empty(&sctp_sk(sk)->ep->asocs)) 6910 break; 6911 release_sock(sk); 6912 timeout = schedule_timeout(timeout); 6913 lock_sock(sk); 6914 } while (!signal_pending(current) && timeout); 6915 6916 finish_wait(sk_sleep(sk), &wait); 6917 } 6918 6919 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 6920 { 6921 struct sk_buff *frag; 6922 6923 if (!skb->data_len) 6924 goto done; 6925 6926 /* Don't forget the fragments. */ 6927 skb_walk_frags(skb, frag) 6928 sctp_skb_set_owner_r_frag(frag, sk); 6929 6930 done: 6931 sctp_skb_set_owner_r(skb, sk); 6932 } 6933 6934 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 6935 struct sctp_association *asoc) 6936 { 6937 struct inet_sock *inet = inet_sk(sk); 6938 struct inet_sock *newinet; 6939 6940 newsk->sk_type = sk->sk_type; 6941 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6942 newsk->sk_flags = sk->sk_flags; 6943 newsk->sk_no_check = sk->sk_no_check; 6944 newsk->sk_reuse = sk->sk_reuse; 6945 6946 newsk->sk_shutdown = sk->sk_shutdown; 6947 newsk->sk_destruct = sctp_destruct_sock; 6948 newsk->sk_family = sk->sk_family; 6949 newsk->sk_protocol = IPPROTO_SCTP; 6950 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 6951 newsk->sk_sndbuf = sk->sk_sndbuf; 6952 newsk->sk_rcvbuf = sk->sk_rcvbuf; 6953 newsk->sk_lingertime = sk->sk_lingertime; 6954 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 6955 newsk->sk_sndtimeo = sk->sk_sndtimeo; 6956 6957 newinet = inet_sk(newsk); 6958 6959 /* Initialize sk's sport, dport, rcv_saddr and daddr for 6960 * getsockname() and getpeername() 6961 */ 6962 newinet->inet_sport = inet->inet_sport; 6963 newinet->inet_saddr = inet->inet_saddr; 6964 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 6965 newinet->inet_dport = htons(asoc->peer.port); 6966 newinet->pmtudisc = inet->pmtudisc; 6967 newinet->inet_id = asoc->next_tsn ^ jiffies; 6968 6969 newinet->uc_ttl = inet->uc_ttl; 6970 newinet->mc_loop = 1; 6971 newinet->mc_ttl = 1; 6972 newinet->mc_index = 0; 6973 newinet->mc_list = NULL; 6974 } 6975 6976 /* Populate the fields of the newsk from the oldsk and migrate the assoc 6977 * and its messages to the newsk. 6978 */ 6979 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 6980 struct sctp_association *assoc, 6981 sctp_socket_type_t type) 6982 { 6983 struct sctp_sock *oldsp = sctp_sk(oldsk); 6984 struct sctp_sock *newsp = sctp_sk(newsk); 6985 struct sctp_bind_bucket *pp; /* hash list port iterator */ 6986 struct sctp_endpoint *newep = newsp->ep; 6987 struct sk_buff *skb, *tmp; 6988 struct sctp_ulpevent *event; 6989 struct sctp_bind_hashbucket *head; 6990 struct list_head tmplist; 6991 6992 /* Migrate socket buffer sizes and all the socket level options to the 6993 * new socket. 6994 */ 6995 newsk->sk_sndbuf = oldsk->sk_sndbuf; 6996 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 6997 /* Brute force copy old sctp opt. */ 6998 if (oldsp->do_auto_asconf) { 6999 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); 7000 inet_sk_copy_descendant(newsk, oldsk); 7001 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); 7002 } else 7003 inet_sk_copy_descendant(newsk, oldsk); 7004 7005 /* Restore the ep value that was overwritten with the above structure 7006 * copy. 7007 */ 7008 newsp->ep = newep; 7009 newsp->hmac = NULL; 7010 7011 /* Hook this new socket in to the bind_hash list. */ 7012 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 7013 inet_sk(oldsk)->inet_num)]; 7014 local_bh_disable(); 7015 spin_lock(&head->lock); 7016 pp = sctp_sk(oldsk)->bind_hash; 7017 sk_add_bind_node(newsk, &pp->owner); 7018 sctp_sk(newsk)->bind_hash = pp; 7019 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 7020 spin_unlock(&head->lock); 7021 local_bh_enable(); 7022 7023 /* Copy the bind_addr list from the original endpoint to the new 7024 * endpoint so that we can handle restarts properly 7025 */ 7026 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 7027 &oldsp->ep->base.bind_addr, GFP_KERNEL); 7028 7029 /* Move any messages in the old socket's receive queue that are for the 7030 * peeled off association to the new socket's receive queue. 7031 */ 7032 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 7033 event = sctp_skb2event(skb); 7034 if (event->asoc == assoc) { 7035 __skb_unlink(skb, &oldsk->sk_receive_queue); 7036 __skb_queue_tail(&newsk->sk_receive_queue, skb); 7037 sctp_skb_set_owner_r_frag(skb, newsk); 7038 } 7039 } 7040 7041 /* Clean up any messages pending delivery due to partial 7042 * delivery. Three cases: 7043 * 1) No partial deliver; no work. 7044 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 7045 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 7046 */ 7047 skb_queue_head_init(&newsp->pd_lobby); 7048 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 7049 7050 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 7051 struct sk_buff_head *queue; 7052 7053 /* Decide which queue to move pd_lobby skbs to. */ 7054 if (assoc->ulpq.pd_mode) { 7055 queue = &newsp->pd_lobby; 7056 } else 7057 queue = &newsk->sk_receive_queue; 7058 7059 /* Walk through the pd_lobby, looking for skbs that 7060 * need moved to the new socket. 7061 */ 7062 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 7063 event = sctp_skb2event(skb); 7064 if (event->asoc == assoc) { 7065 __skb_unlink(skb, &oldsp->pd_lobby); 7066 __skb_queue_tail(queue, skb); 7067 sctp_skb_set_owner_r_frag(skb, newsk); 7068 } 7069 } 7070 7071 /* Clear up any skbs waiting for the partial 7072 * delivery to finish. 7073 */ 7074 if (assoc->ulpq.pd_mode) 7075 sctp_clear_pd(oldsk, NULL); 7076 7077 } 7078 7079 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 7080 sctp_skb_set_owner_r_frag(skb, newsk); 7081 7082 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) 7083 sctp_skb_set_owner_r_frag(skb, newsk); 7084 7085 /* Set the type of socket to indicate that it is peeled off from the 7086 * original UDP-style socket or created with the accept() call on a 7087 * TCP-style socket.. 7088 */ 7089 newsp->type = type; 7090 7091 /* Mark the new socket "in-use" by the user so that any packets 7092 * that may arrive on the association after we've moved it are 7093 * queued to the backlog. This prevents a potential race between 7094 * backlog processing on the old socket and new-packet processing 7095 * on the new socket. 7096 * 7097 * The caller has just allocated newsk so we can guarantee that other 7098 * paths won't try to lock it and then oldsk. 7099 */ 7100 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 7101 sctp_assoc_migrate(assoc, newsk); 7102 7103 /* If the association on the newsk is already closed before accept() 7104 * is called, set RCV_SHUTDOWN flag. 7105 */ 7106 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) 7107 newsk->sk_shutdown |= RCV_SHUTDOWN; 7108 7109 newsk->sk_state = SCTP_SS_ESTABLISHED; 7110 release_sock(newsk); 7111 } 7112 7113 7114 /* This proto struct describes the ULP interface for SCTP. */ 7115 struct proto sctp_prot = { 7116 .name = "SCTP", 7117 .owner = THIS_MODULE, 7118 .close = sctp_close, 7119 .connect = sctp_connect, 7120 .disconnect = sctp_disconnect, 7121 .accept = sctp_accept, 7122 .ioctl = sctp_ioctl, 7123 .init = sctp_init_sock, 7124 .destroy = sctp_destroy_sock, 7125 .shutdown = sctp_shutdown, 7126 .setsockopt = sctp_setsockopt, 7127 .getsockopt = sctp_getsockopt, 7128 .sendmsg = sctp_sendmsg, 7129 .recvmsg = sctp_recvmsg, 7130 .bind = sctp_bind, 7131 .backlog_rcv = sctp_backlog_rcv, 7132 .hash = sctp_hash, 7133 .unhash = sctp_unhash, 7134 .get_port = sctp_get_port, 7135 .obj_size = sizeof(struct sctp_sock), 7136 .sysctl_mem = sysctl_sctp_mem, 7137 .sysctl_rmem = sysctl_sctp_rmem, 7138 .sysctl_wmem = sysctl_sctp_wmem, 7139 .memory_pressure = &sctp_memory_pressure, 7140 .enter_memory_pressure = sctp_enter_memory_pressure, 7141 .memory_allocated = &sctp_memory_allocated, 7142 .sockets_allocated = &sctp_sockets_allocated, 7143 }; 7144 7145 #if IS_ENABLED(CONFIG_IPV6) 7146 7147 struct proto sctpv6_prot = { 7148 .name = "SCTPv6", 7149 .owner = THIS_MODULE, 7150 .close = sctp_close, 7151 .connect = sctp_connect, 7152 .disconnect = sctp_disconnect, 7153 .accept = sctp_accept, 7154 .ioctl = sctp_ioctl, 7155 .init = sctp_init_sock, 7156 .destroy = sctp_destroy_sock, 7157 .shutdown = sctp_shutdown, 7158 .setsockopt = sctp_setsockopt, 7159 .getsockopt = sctp_getsockopt, 7160 .sendmsg = sctp_sendmsg, 7161 .recvmsg = sctp_recvmsg, 7162 .bind = sctp_bind, 7163 .backlog_rcv = sctp_backlog_rcv, 7164 .hash = sctp_hash, 7165 .unhash = sctp_unhash, 7166 .get_port = sctp_get_port, 7167 .obj_size = sizeof(struct sctp6_sock), 7168 .sysctl_mem = sysctl_sctp_mem, 7169 .sysctl_rmem = sysctl_sctp_rmem, 7170 .sysctl_wmem = sysctl_sctp_wmem, 7171 .memory_pressure = &sctp_memory_pressure, 7172 .enter_memory_pressure = sctp_enter_memory_pressure, 7173 .memory_allocated = &sctp_memory_allocated, 7174 .sockets_allocated = &sctp_sockets_allocated, 7175 }; 7176 #endif /* IS_ENABLED(CONFIG_IPV6) */ 7177