1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * Copyright (c) 2001-2002 Nokia, Inc. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * These functions interface with the sockets layer to implement the 12 * SCTP Extensions for the Sockets API. 13 * 14 * Note that the descriptions from the specification are USER level 15 * functions--this file is the functions which populate the struct proto 16 * for SCTP which is the BOTTOM of the sockets interface. 17 * 18 * This SCTP implementation is free software; 19 * you can redistribute it and/or modify it under the terms of 20 * the GNU General Public License as published by 21 * the Free Software Foundation; either version 2, or (at your option) 22 * any later version. 23 * 24 * This SCTP implementation is distributed in the hope that it 25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 26 * ************************ 27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 28 * See the GNU General Public License for more details. 29 * 30 * You should have received a copy of the GNU General Public License 31 * along with GNU CC; see the file COPYING. If not, see 32 * <http://www.gnu.org/licenses/>. 33 * 34 * Please send any bug reports or fixes you make to the 35 * email address(es): 36 * lksctp developers <linux-sctp@vger.kernel.org> 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Narasimha Budihal <narsi@refcode.org> 41 * Karl Knutson <karl@athena.chicago.il.us> 42 * Jon Grimm <jgrimm@us.ibm.com> 43 * Xingang Guo <xingang.guo@intel.com> 44 * Daisy Chang <daisyc@us.ibm.com> 45 * Sridhar Samudrala <samudrala@us.ibm.com> 46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> 47 * Ardelle Fan <ardelle.fan@intel.com> 48 * Ryan Layer <rmlayer@us.ibm.com> 49 * Anup Pemmaiah <pemmaiah@cc.usu.edu> 50 * Kevin Gao <kevin.gao@intel.com> 51 */ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <crypto/hash.h> 56 #include <linux/types.h> 57 #include <linux/kernel.h> 58 #include <linux/wait.h> 59 #include <linux/time.h> 60 #include <linux/sched/signal.h> 61 #include <linux/ip.h> 62 #include <linux/capability.h> 63 #include <linux/fcntl.h> 64 #include <linux/poll.h> 65 #include <linux/init.h> 66 #include <linux/slab.h> 67 #include <linux/file.h> 68 #include <linux/compat.h> 69 70 #include <net/ip.h> 71 #include <net/icmp.h> 72 #include <net/route.h> 73 #include <net/ipv6.h> 74 #include <net/inet_common.h> 75 #include <net/busy_poll.h> 76 77 #include <linux/socket.h> /* for sa_family_t */ 78 #include <linux/export.h> 79 #include <net/sock.h> 80 #include <net/sctp/sctp.h> 81 #include <net/sctp/sm.h> 82 #include <net/sctp/stream_sched.h> 83 84 /* Forward declarations for internal helper functions. */ 85 static int sctp_writeable(struct sock *sk); 86 static void sctp_wfree(struct sk_buff *skb); 87 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 88 size_t msg_len, struct sock **orig_sk); 89 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); 90 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); 91 static int sctp_wait_for_accept(struct sock *sk, long timeo); 92 static void sctp_wait_for_close(struct sock *sk, long timeo); 93 static void sctp_destruct_sock(struct sock *sk); 94 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 95 union sctp_addr *addr, int len); 96 static int sctp_bindx_add(struct sock *, struct sockaddr *, int); 97 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); 98 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); 99 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); 100 static int sctp_send_asconf(struct sctp_association *asoc, 101 struct sctp_chunk *chunk); 102 static int sctp_do_bind(struct sock *, union sctp_addr *, int); 103 static int sctp_autobind(struct sock *sk); 104 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 105 struct sctp_association *assoc, 106 enum sctp_socket_type type); 107 108 static unsigned long sctp_memory_pressure; 109 static atomic_long_t sctp_memory_allocated; 110 struct percpu_counter sctp_sockets_allocated; 111 112 static void sctp_enter_memory_pressure(struct sock *sk) 113 { 114 sctp_memory_pressure = 1; 115 } 116 117 118 /* Get the sndbuf space available at the time on the association. */ 119 static inline int sctp_wspace(struct sctp_association *asoc) 120 { 121 int amt; 122 123 if (asoc->ep->sndbuf_policy) 124 amt = asoc->sndbuf_used; 125 else 126 amt = sk_wmem_alloc_get(asoc->base.sk); 127 128 if (amt >= asoc->base.sk->sk_sndbuf) { 129 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 130 amt = 0; 131 else { 132 amt = sk_stream_wspace(asoc->base.sk); 133 if (amt < 0) 134 amt = 0; 135 } 136 } else { 137 amt = asoc->base.sk->sk_sndbuf - amt; 138 } 139 return amt; 140 } 141 142 /* Increment the used sndbuf space count of the corresponding association by 143 * the size of the outgoing data chunk. 144 * Also, set the skb destructor for sndbuf accounting later. 145 * 146 * Since it is always 1-1 between chunk and skb, and also a new skb is always 147 * allocated for chunk bundling in sctp_packet_transmit(), we can use the 148 * destructor in the data chunk skb for the purpose of the sndbuf space 149 * tracking. 150 */ 151 static inline void sctp_set_owner_w(struct sctp_chunk *chunk) 152 { 153 struct sctp_association *asoc = chunk->asoc; 154 struct sock *sk = asoc->base.sk; 155 156 /* The sndbuf space is tracked per association. */ 157 sctp_association_hold(asoc); 158 159 skb_set_owner_w(chunk->skb, sk); 160 161 chunk->skb->destructor = sctp_wfree; 162 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 163 skb_shinfo(chunk->skb)->destructor_arg = chunk; 164 165 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + 166 sizeof(struct sk_buff) + 167 sizeof(struct sctp_chunk); 168 169 refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); 170 sk->sk_wmem_queued += chunk->skb->truesize; 171 sk_mem_charge(sk, chunk->skb->truesize); 172 } 173 174 static void sctp_clear_owner_w(struct sctp_chunk *chunk) 175 { 176 skb_orphan(chunk->skb); 177 } 178 179 static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, 180 void (*cb)(struct sctp_chunk *)) 181 182 { 183 struct sctp_outq *q = &asoc->outqueue; 184 struct sctp_transport *t; 185 struct sctp_chunk *chunk; 186 187 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) 188 list_for_each_entry(chunk, &t->transmitted, transmitted_list) 189 cb(chunk); 190 191 list_for_each_entry(chunk, &q->retransmit, transmitted_list) 192 cb(chunk); 193 194 list_for_each_entry(chunk, &q->sacked, transmitted_list) 195 cb(chunk); 196 197 list_for_each_entry(chunk, &q->abandoned, transmitted_list) 198 cb(chunk); 199 200 list_for_each_entry(chunk, &q->out_chunk_list, list) 201 cb(chunk); 202 } 203 204 static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk, 205 void (*cb)(struct sk_buff *, struct sock *)) 206 207 { 208 struct sk_buff *skb, *tmp; 209 210 sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp) 211 cb(skb, sk); 212 213 sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp) 214 cb(skb, sk); 215 216 sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp) 217 cb(skb, sk); 218 } 219 220 /* Verify that this is a valid address. */ 221 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 222 int len) 223 { 224 struct sctp_af *af; 225 226 /* Verify basic sockaddr. */ 227 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); 228 if (!af) 229 return -EINVAL; 230 231 /* Is this a valid SCTP address? */ 232 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) 233 return -EINVAL; 234 235 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) 236 return -EINVAL; 237 238 return 0; 239 } 240 241 /* Look up the association by its id. If this is not a UDP-style 242 * socket, the ID field is always ignored. 243 */ 244 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) 245 { 246 struct sctp_association *asoc = NULL; 247 248 /* If this is not a UDP-style socket, assoc id should be ignored. */ 249 if (!sctp_style(sk, UDP)) { 250 /* Return NULL if the socket state is not ESTABLISHED. It 251 * could be a TCP-style listening socket or a socket which 252 * hasn't yet called connect() to establish an association. 253 */ 254 if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING)) 255 return NULL; 256 257 /* Get the first and the only association from the list. */ 258 if (!list_empty(&sctp_sk(sk)->ep->asocs)) 259 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, 260 struct sctp_association, asocs); 261 return asoc; 262 } 263 264 /* Otherwise this is a UDP-style socket. */ 265 if (!id || (id == (sctp_assoc_t)-1)) 266 return NULL; 267 268 spin_lock_bh(&sctp_assocs_id_lock); 269 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); 270 spin_unlock_bh(&sctp_assocs_id_lock); 271 272 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) 273 return NULL; 274 275 return asoc; 276 } 277 278 /* Look up the transport from an address and an assoc id. If both address and 279 * id are specified, the associations matching the address and the id should be 280 * the same. 281 */ 282 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, 283 struct sockaddr_storage *addr, 284 sctp_assoc_t id) 285 { 286 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 287 struct sctp_af *af = sctp_get_af_specific(addr->ss_family); 288 union sctp_addr *laddr = (union sctp_addr *)addr; 289 struct sctp_transport *transport; 290 291 if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) 292 return NULL; 293 294 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 295 laddr, 296 &transport); 297 298 if (!addr_asoc) 299 return NULL; 300 301 id_asoc = sctp_id2assoc(sk, id); 302 if (id_asoc && (id_asoc != addr_asoc)) 303 return NULL; 304 305 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 306 (union sctp_addr *)addr); 307 308 return transport; 309 } 310 311 /* API 3.1.2 bind() - UDP Style Syntax 312 * The syntax of bind() is, 313 * 314 * ret = bind(int sd, struct sockaddr *addr, int addrlen); 315 * 316 * sd - the socket descriptor returned by socket(). 317 * addr - the address structure (struct sockaddr_in or struct 318 * sockaddr_in6 [RFC 2553]), 319 * addr_len - the size of the address structure. 320 */ 321 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) 322 { 323 int retval = 0; 324 325 lock_sock(sk); 326 327 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, 328 addr, addr_len); 329 330 /* Disallow binding twice. */ 331 if (!sctp_sk(sk)->ep->base.bind_addr.port) 332 retval = sctp_do_bind(sk, (union sctp_addr *)addr, 333 addr_len); 334 else 335 retval = -EINVAL; 336 337 release_sock(sk); 338 339 return retval; 340 } 341 342 static long sctp_get_port_local(struct sock *, union sctp_addr *); 343 344 /* Verify this is a valid sockaddr. */ 345 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 346 union sctp_addr *addr, int len) 347 { 348 struct sctp_af *af; 349 350 /* Check minimum size. */ 351 if (len < sizeof (struct sockaddr)) 352 return NULL; 353 354 /* V4 mapped address are really of AF_INET family */ 355 if (addr->sa.sa_family == AF_INET6 && 356 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { 357 if (!opt->pf->af_supported(AF_INET, opt)) 358 return NULL; 359 } else { 360 /* Does this PF support this AF? */ 361 if (!opt->pf->af_supported(addr->sa.sa_family, opt)) 362 return NULL; 363 } 364 365 /* If we get this far, af is valid. */ 366 af = sctp_get_af_specific(addr->sa.sa_family); 367 368 if (len < af->sockaddr_len) 369 return NULL; 370 371 return af; 372 } 373 374 /* Bind a local address either to an endpoint or to an association. */ 375 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 376 { 377 struct net *net = sock_net(sk); 378 struct sctp_sock *sp = sctp_sk(sk); 379 struct sctp_endpoint *ep = sp->ep; 380 struct sctp_bind_addr *bp = &ep->base.bind_addr; 381 struct sctp_af *af; 382 unsigned short snum; 383 int ret = 0; 384 385 /* Common sockaddr verification. */ 386 af = sctp_sockaddr_af(sp, addr, len); 387 if (!af) { 388 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", 389 __func__, sk, addr, len); 390 return -EINVAL; 391 } 392 393 snum = ntohs(addr->v4.sin_port); 394 395 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", 396 __func__, sk, &addr->sa, bp->port, snum, len); 397 398 /* PF specific bind() address verification. */ 399 if (!sp->pf->bind_verify(sp, addr)) 400 return -EADDRNOTAVAIL; 401 402 /* We must either be unbound, or bind to the same port. 403 * It's OK to allow 0 ports if we are already bound. 404 * We'll just inhert an already bound port in this case 405 */ 406 if (bp->port) { 407 if (!snum) 408 snum = bp->port; 409 else if (snum != bp->port) { 410 pr_debug("%s: new port %d doesn't match existing port " 411 "%d\n", __func__, snum, bp->port); 412 return -EINVAL; 413 } 414 } 415 416 if (snum && snum < inet_prot_sock(net) && 417 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) 418 return -EACCES; 419 420 /* See if the address matches any of the addresses we may have 421 * already bound before checking against other endpoints. 422 */ 423 if (sctp_bind_addr_match(bp, addr, sp)) 424 return -EINVAL; 425 426 /* Make sure we are allowed to bind here. 427 * The function sctp_get_port_local() does duplicate address 428 * detection. 429 */ 430 addr->v4.sin_port = htons(snum); 431 if ((ret = sctp_get_port_local(sk, addr))) { 432 return -EADDRINUSE; 433 } 434 435 /* Refresh ephemeral port. */ 436 if (!bp->port) 437 bp->port = inet_sk(sk)->inet_num; 438 439 /* Add the address to the bind address list. 440 * Use GFP_ATOMIC since BHs will be disabled. 441 */ 442 ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, 443 SCTP_ADDR_SRC, GFP_ATOMIC); 444 445 /* Copy back into socket for getsockname() use. */ 446 if (!ret) { 447 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 448 sp->pf->to_sk_saddr(addr, sk); 449 } 450 451 return ret; 452 } 453 454 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks 455 * 456 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged 457 * at any one time. If a sender, after sending an ASCONF chunk, decides 458 * it needs to transfer another ASCONF Chunk, it MUST wait until the 459 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a 460 * subsequent ASCONF. Note this restriction binds each side, so at any 461 * time two ASCONF may be in-transit on any given association (one sent 462 * from each endpoint). 463 */ 464 static int sctp_send_asconf(struct sctp_association *asoc, 465 struct sctp_chunk *chunk) 466 { 467 struct net *net = sock_net(asoc->base.sk); 468 int retval = 0; 469 470 /* If there is an outstanding ASCONF chunk, queue it for later 471 * transmission. 472 */ 473 if (asoc->addip_last_asconf) { 474 list_add_tail(&chunk->list, &asoc->addip_chunk_list); 475 goto out; 476 } 477 478 /* Hold the chunk until an ASCONF_ACK is received. */ 479 sctp_chunk_hold(chunk); 480 retval = sctp_primitive_ASCONF(net, asoc, chunk); 481 if (retval) 482 sctp_chunk_free(chunk); 483 else 484 asoc->addip_last_asconf = chunk; 485 486 out: 487 return retval; 488 } 489 490 /* Add a list of addresses as bind addresses to local endpoint or 491 * association. 492 * 493 * Basically run through each address specified in the addrs/addrcnt 494 * array/length pair, determine if it is IPv6 or IPv4 and call 495 * sctp_do_bind() on it. 496 * 497 * If any of them fails, then the operation will be reversed and the 498 * ones that were added will be removed. 499 * 500 * Only sctp_setsockopt_bindx() is supposed to call this function. 501 */ 502 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) 503 { 504 int cnt; 505 int retval = 0; 506 void *addr_buf; 507 struct sockaddr *sa_addr; 508 struct sctp_af *af; 509 510 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, 511 addrs, addrcnt); 512 513 addr_buf = addrs; 514 for (cnt = 0; cnt < addrcnt; cnt++) { 515 /* The list may contain either IPv4 or IPv6 address; 516 * determine the address length for walking thru the list. 517 */ 518 sa_addr = addr_buf; 519 af = sctp_get_af_specific(sa_addr->sa_family); 520 if (!af) { 521 retval = -EINVAL; 522 goto err_bindx_add; 523 } 524 525 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, 526 af->sockaddr_len); 527 528 addr_buf += af->sockaddr_len; 529 530 err_bindx_add: 531 if (retval < 0) { 532 /* Failed. Cleanup the ones that have been added */ 533 if (cnt > 0) 534 sctp_bindx_rem(sk, addrs, cnt); 535 return retval; 536 } 537 } 538 539 return retval; 540 } 541 542 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the 543 * associations that are part of the endpoint indicating that a list of local 544 * addresses are added to the endpoint. 545 * 546 * If any of the addresses is already in the bind address list of the 547 * association, we do not send the chunk for that association. But it will not 548 * affect other associations. 549 * 550 * Only sctp_setsockopt_bindx() is supposed to call this function. 551 */ 552 static int sctp_send_asconf_add_ip(struct sock *sk, 553 struct sockaddr *addrs, 554 int addrcnt) 555 { 556 struct net *net = sock_net(sk); 557 struct sctp_sock *sp; 558 struct sctp_endpoint *ep; 559 struct sctp_association *asoc; 560 struct sctp_bind_addr *bp; 561 struct sctp_chunk *chunk; 562 struct sctp_sockaddr_entry *laddr; 563 union sctp_addr *addr; 564 union sctp_addr saveaddr; 565 void *addr_buf; 566 struct sctp_af *af; 567 struct list_head *p; 568 int i; 569 int retval = 0; 570 571 if (!net->sctp.addip_enable) 572 return retval; 573 574 sp = sctp_sk(sk); 575 ep = sp->ep; 576 577 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 578 __func__, sk, addrs, addrcnt); 579 580 list_for_each_entry(asoc, &ep->asocs, asocs) { 581 if (!asoc->peer.asconf_capable) 582 continue; 583 584 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) 585 continue; 586 587 if (!sctp_state(asoc, ESTABLISHED)) 588 continue; 589 590 /* Check if any address in the packed array of addresses is 591 * in the bind address list of the association. If so, 592 * do not send the asconf chunk to its peer, but continue with 593 * other associations. 594 */ 595 addr_buf = addrs; 596 for (i = 0; i < addrcnt; i++) { 597 addr = addr_buf; 598 af = sctp_get_af_specific(addr->v4.sin_family); 599 if (!af) { 600 retval = -EINVAL; 601 goto out; 602 } 603 604 if (sctp_assoc_lookup_laddr(asoc, addr)) 605 break; 606 607 addr_buf += af->sockaddr_len; 608 } 609 if (i < addrcnt) 610 continue; 611 612 /* Use the first valid address in bind addr list of 613 * association as Address Parameter of ASCONF CHUNK. 614 */ 615 bp = &asoc->base.bind_addr; 616 p = bp->address_list.next; 617 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 618 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 619 addrcnt, SCTP_PARAM_ADD_IP); 620 if (!chunk) { 621 retval = -ENOMEM; 622 goto out; 623 } 624 625 /* Add the new addresses to the bind address list with 626 * use_as_src set to 0. 627 */ 628 addr_buf = addrs; 629 for (i = 0; i < addrcnt; i++) { 630 addr = addr_buf; 631 af = sctp_get_af_specific(addr->v4.sin_family); 632 memcpy(&saveaddr, addr, af->sockaddr_len); 633 retval = sctp_add_bind_addr(bp, &saveaddr, 634 sizeof(saveaddr), 635 SCTP_ADDR_NEW, GFP_ATOMIC); 636 addr_buf += af->sockaddr_len; 637 } 638 if (asoc->src_out_of_asoc_ok) { 639 struct sctp_transport *trans; 640 641 list_for_each_entry(trans, 642 &asoc->peer.transport_addr_list, transports) { 643 /* Clear the source and route cache */ 644 sctp_transport_dst_release(trans); 645 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 646 2*asoc->pathmtu, 4380)); 647 trans->ssthresh = asoc->peer.i.a_rwnd; 648 trans->rto = asoc->rto_initial; 649 sctp_max_rto(asoc, trans); 650 trans->rtt = trans->srtt = trans->rttvar = 0; 651 sctp_transport_route(trans, NULL, 652 sctp_sk(asoc->base.sk)); 653 } 654 } 655 retval = sctp_send_asconf(asoc, chunk); 656 } 657 658 out: 659 return retval; 660 } 661 662 /* Remove a list of addresses from bind addresses list. Do not remove the 663 * last address. 664 * 665 * Basically run through each address specified in the addrs/addrcnt 666 * array/length pair, determine if it is IPv6 or IPv4 and call 667 * sctp_del_bind() on it. 668 * 669 * If any of them fails, then the operation will be reversed and the 670 * ones that were removed will be added back. 671 * 672 * At least one address has to be left; if only one address is 673 * available, the operation will return -EBUSY. 674 * 675 * Only sctp_setsockopt_bindx() is supposed to call this function. 676 */ 677 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) 678 { 679 struct sctp_sock *sp = sctp_sk(sk); 680 struct sctp_endpoint *ep = sp->ep; 681 int cnt; 682 struct sctp_bind_addr *bp = &ep->base.bind_addr; 683 int retval = 0; 684 void *addr_buf; 685 union sctp_addr *sa_addr; 686 struct sctp_af *af; 687 688 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 689 __func__, sk, addrs, addrcnt); 690 691 addr_buf = addrs; 692 for (cnt = 0; cnt < addrcnt; cnt++) { 693 /* If the bind address list is empty or if there is only one 694 * bind address, there is nothing more to be removed (we need 695 * at least one address here). 696 */ 697 if (list_empty(&bp->address_list) || 698 (sctp_list_single_entry(&bp->address_list))) { 699 retval = -EBUSY; 700 goto err_bindx_rem; 701 } 702 703 sa_addr = addr_buf; 704 af = sctp_get_af_specific(sa_addr->sa.sa_family); 705 if (!af) { 706 retval = -EINVAL; 707 goto err_bindx_rem; 708 } 709 710 if (!af->addr_valid(sa_addr, sp, NULL)) { 711 retval = -EADDRNOTAVAIL; 712 goto err_bindx_rem; 713 } 714 715 if (sa_addr->v4.sin_port && 716 sa_addr->v4.sin_port != htons(bp->port)) { 717 retval = -EINVAL; 718 goto err_bindx_rem; 719 } 720 721 if (!sa_addr->v4.sin_port) 722 sa_addr->v4.sin_port = htons(bp->port); 723 724 /* FIXME - There is probably a need to check if sk->sk_saddr and 725 * sk->sk_rcv_addr are currently set to one of the addresses to 726 * be removed. This is something which needs to be looked into 727 * when we are fixing the outstanding issues with multi-homing 728 * socket routing and failover schemes. Refer to comments in 729 * sctp_do_bind(). -daisy 730 */ 731 retval = sctp_del_bind_addr(bp, sa_addr); 732 733 addr_buf += af->sockaddr_len; 734 err_bindx_rem: 735 if (retval < 0) { 736 /* Failed. Add the ones that has been removed back */ 737 if (cnt > 0) 738 sctp_bindx_add(sk, addrs, cnt); 739 return retval; 740 } 741 } 742 743 return retval; 744 } 745 746 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of 747 * the associations that are part of the endpoint indicating that a list of 748 * local addresses are removed from the endpoint. 749 * 750 * If any of the addresses is already in the bind address list of the 751 * association, we do not send the chunk for that association. But it will not 752 * affect other associations. 753 * 754 * Only sctp_setsockopt_bindx() is supposed to call this function. 755 */ 756 static int sctp_send_asconf_del_ip(struct sock *sk, 757 struct sockaddr *addrs, 758 int addrcnt) 759 { 760 struct net *net = sock_net(sk); 761 struct sctp_sock *sp; 762 struct sctp_endpoint *ep; 763 struct sctp_association *asoc; 764 struct sctp_transport *transport; 765 struct sctp_bind_addr *bp; 766 struct sctp_chunk *chunk; 767 union sctp_addr *laddr; 768 void *addr_buf; 769 struct sctp_af *af; 770 struct sctp_sockaddr_entry *saddr; 771 int i; 772 int retval = 0; 773 int stored = 0; 774 775 chunk = NULL; 776 if (!net->sctp.addip_enable) 777 return retval; 778 779 sp = sctp_sk(sk); 780 ep = sp->ep; 781 782 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", 783 __func__, sk, addrs, addrcnt); 784 785 list_for_each_entry(asoc, &ep->asocs, asocs) { 786 787 if (!asoc->peer.asconf_capable) 788 continue; 789 790 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) 791 continue; 792 793 if (!sctp_state(asoc, ESTABLISHED)) 794 continue; 795 796 /* Check if any address in the packed array of addresses is 797 * not present in the bind address list of the association. 798 * If so, do not send the asconf chunk to its peer, but 799 * continue with other associations. 800 */ 801 addr_buf = addrs; 802 for (i = 0; i < addrcnt; i++) { 803 laddr = addr_buf; 804 af = sctp_get_af_specific(laddr->v4.sin_family); 805 if (!af) { 806 retval = -EINVAL; 807 goto out; 808 } 809 810 if (!sctp_assoc_lookup_laddr(asoc, laddr)) 811 break; 812 813 addr_buf += af->sockaddr_len; 814 } 815 if (i < addrcnt) 816 continue; 817 818 /* Find one address in the association's bind address list 819 * that is not in the packed array of addresses. This is to 820 * make sure that we do not delete all the addresses in the 821 * association. 822 */ 823 bp = &asoc->base.bind_addr; 824 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 825 addrcnt, sp); 826 if ((laddr == NULL) && (addrcnt == 1)) { 827 if (asoc->asconf_addr_del_pending) 828 continue; 829 asoc->asconf_addr_del_pending = 830 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); 831 if (asoc->asconf_addr_del_pending == NULL) { 832 retval = -ENOMEM; 833 goto out; 834 } 835 asoc->asconf_addr_del_pending->sa.sa_family = 836 addrs->sa_family; 837 asoc->asconf_addr_del_pending->v4.sin_port = 838 htons(bp->port); 839 if (addrs->sa_family == AF_INET) { 840 struct sockaddr_in *sin; 841 842 sin = (struct sockaddr_in *)addrs; 843 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; 844 } else if (addrs->sa_family == AF_INET6) { 845 struct sockaddr_in6 *sin6; 846 847 sin6 = (struct sockaddr_in6 *)addrs; 848 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; 849 } 850 851 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", 852 __func__, asoc, &asoc->asconf_addr_del_pending->sa, 853 asoc->asconf_addr_del_pending); 854 855 asoc->src_out_of_asoc_ok = 1; 856 stored = 1; 857 goto skip_mkasconf; 858 } 859 860 if (laddr == NULL) 861 return -EINVAL; 862 863 /* We do not need RCU protection throughout this loop 864 * because this is done under a socket lock from the 865 * setsockopt call. 866 */ 867 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 868 SCTP_PARAM_DEL_IP); 869 if (!chunk) { 870 retval = -ENOMEM; 871 goto out; 872 } 873 874 skip_mkasconf: 875 /* Reset use_as_src flag for the addresses in the bind address 876 * list that are to be deleted. 877 */ 878 addr_buf = addrs; 879 for (i = 0; i < addrcnt; i++) { 880 laddr = addr_buf; 881 af = sctp_get_af_specific(laddr->v4.sin_family); 882 list_for_each_entry(saddr, &bp->address_list, list) { 883 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 884 saddr->state = SCTP_ADDR_DEL; 885 } 886 addr_buf += af->sockaddr_len; 887 } 888 889 /* Update the route and saddr entries for all the transports 890 * as some of the addresses in the bind address list are 891 * about to be deleted and cannot be used as source addresses. 892 */ 893 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 894 transports) { 895 sctp_transport_dst_release(transport); 896 sctp_transport_route(transport, NULL, 897 sctp_sk(asoc->base.sk)); 898 } 899 900 if (stored) 901 /* We don't need to transmit ASCONF */ 902 continue; 903 retval = sctp_send_asconf(asoc, chunk); 904 } 905 out: 906 return retval; 907 } 908 909 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ 910 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) 911 { 912 struct sock *sk = sctp_opt2sk(sp); 913 union sctp_addr *addr; 914 struct sctp_af *af; 915 916 /* It is safe to write port space in caller. */ 917 addr = &addrw->a; 918 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); 919 af = sctp_get_af_specific(addr->sa.sa_family); 920 if (!af) 921 return -EINVAL; 922 if (sctp_verify_addr(sk, addr, af->sockaddr_len)) 923 return -EINVAL; 924 925 if (addrw->state == SCTP_ADDR_NEW) 926 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); 927 else 928 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); 929 } 930 931 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 932 * 933 * API 8.1 934 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, 935 * int flags); 936 * 937 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 938 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 939 * or IPv6 addresses. 940 * 941 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 942 * Section 3.1.2 for this usage. 943 * 944 * addrs is a pointer to an array of one or more socket addresses. Each 945 * address is contained in its appropriate structure (i.e. struct 946 * sockaddr_in or struct sockaddr_in6) the family of the address type 947 * must be used to distinguish the address length (note that this 948 * representation is termed a "packed array" of addresses). The caller 949 * specifies the number of addresses in the array with addrcnt. 950 * 951 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns 952 * -1, and sets errno to the appropriate error code. 953 * 954 * For SCTP, the port given in each socket address must be the same, or 955 * sctp_bindx() will fail, setting errno to EINVAL. 956 * 957 * The flags parameter is formed from the bitwise OR of zero or more of 958 * the following currently defined flags: 959 * 960 * SCTP_BINDX_ADD_ADDR 961 * 962 * SCTP_BINDX_REM_ADDR 963 * 964 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the 965 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given 966 * addresses from the association. The two flags are mutually exclusive; 967 * if both are given, sctp_bindx() will fail with EINVAL. A caller may 968 * not remove all addresses from an association; sctp_bindx() will 969 * reject such an attempt with EINVAL. 970 * 971 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate 972 * additional addresses with an endpoint after calling bind(). Or use 973 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening 974 * socket is associated with so that no new association accepted will be 975 * associated with those addresses. If the endpoint supports dynamic 976 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a 977 * endpoint to send the appropriate message to the peer to change the 978 * peers address lists. 979 * 980 * Adding and removing addresses from a connected association is 981 * optional functionality. Implementations that do not support this 982 * functionality should return EOPNOTSUPP. 983 * 984 * Basically do nothing but copying the addresses from user to kernel 985 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. 986 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() 987 * from userspace. 988 * 989 * We don't use copy_from_user() for optimization: we first do the 990 * sanity checks (buffer size -fast- and access check-healthy 991 * pointer); if all of those succeed, then we can alloc the memory 992 * (expensive operation) needed to copy the data to kernel. Then we do 993 * the copying without checking the user space area 994 * (__copy_from_user()). 995 * 996 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 997 * it. 998 * 999 * sk The sk of the socket 1000 * addrs The pointer to the addresses in user land 1001 * addrssize Size of the addrs buffer 1002 * op Operation to perform (add or remove, see the flags of 1003 * sctp_bindx) 1004 * 1005 * Returns 0 if ok, <0 errno code on error. 1006 */ 1007 static int sctp_setsockopt_bindx(struct sock *sk, 1008 struct sockaddr __user *addrs, 1009 int addrs_size, int op) 1010 { 1011 struct sockaddr *kaddrs; 1012 int err; 1013 int addrcnt = 0; 1014 int walk_size = 0; 1015 struct sockaddr *sa_addr; 1016 void *addr_buf; 1017 struct sctp_af *af; 1018 1019 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", 1020 __func__, sk, addrs, addrs_size, op); 1021 1022 if (unlikely(addrs_size <= 0)) 1023 return -EINVAL; 1024 1025 /* Check the user passed a healthy pointer. */ 1026 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1027 return -EFAULT; 1028 1029 /* Alloc space for the address array in kernel memory. */ 1030 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); 1031 if (unlikely(!kaddrs)) 1032 return -ENOMEM; 1033 1034 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1035 kfree(kaddrs); 1036 return -EFAULT; 1037 } 1038 1039 /* Walk through the addrs buffer and count the number of addresses. */ 1040 addr_buf = kaddrs; 1041 while (walk_size < addrs_size) { 1042 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1043 kfree(kaddrs); 1044 return -EINVAL; 1045 } 1046 1047 sa_addr = addr_buf; 1048 af = sctp_get_af_specific(sa_addr->sa_family); 1049 1050 /* If the address family is not supported or if this address 1051 * causes the address buffer to overflow return EINVAL. 1052 */ 1053 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1054 kfree(kaddrs); 1055 return -EINVAL; 1056 } 1057 addrcnt++; 1058 addr_buf += af->sockaddr_len; 1059 walk_size += af->sockaddr_len; 1060 } 1061 1062 /* Do the work. */ 1063 switch (op) { 1064 case SCTP_BINDX_ADD_ADDR: 1065 err = sctp_bindx_add(sk, kaddrs, addrcnt); 1066 if (err) 1067 goto out; 1068 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); 1069 break; 1070 1071 case SCTP_BINDX_REM_ADDR: 1072 err = sctp_bindx_rem(sk, kaddrs, addrcnt); 1073 if (err) 1074 goto out; 1075 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); 1076 break; 1077 1078 default: 1079 err = -EINVAL; 1080 break; 1081 } 1082 1083 out: 1084 kfree(kaddrs); 1085 1086 return err; 1087 } 1088 1089 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) 1090 * 1091 * Common routine for handling connect() and sctp_connectx(). 1092 * Connect will come in with just a single address. 1093 */ 1094 static int __sctp_connect(struct sock *sk, 1095 struct sockaddr *kaddrs, 1096 int addrs_size, 1097 sctp_assoc_t *assoc_id) 1098 { 1099 struct net *net = sock_net(sk); 1100 struct sctp_sock *sp; 1101 struct sctp_endpoint *ep; 1102 struct sctp_association *asoc = NULL; 1103 struct sctp_association *asoc2; 1104 struct sctp_transport *transport; 1105 union sctp_addr to; 1106 enum sctp_scope scope; 1107 long timeo; 1108 int err = 0; 1109 int addrcnt = 0; 1110 int walk_size = 0; 1111 union sctp_addr *sa_addr = NULL; 1112 void *addr_buf; 1113 unsigned short port; 1114 unsigned int f_flags = 0; 1115 1116 sp = sctp_sk(sk); 1117 ep = sp->ep; 1118 1119 /* connect() cannot be done on a socket that is already in ESTABLISHED 1120 * state - UDP-style peeled off socket or a TCP-style socket that 1121 * is already connected. 1122 * It cannot be done even on a TCP-style listening socket. 1123 */ 1124 if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || 1125 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { 1126 err = -EISCONN; 1127 goto out_free; 1128 } 1129 1130 /* Walk through the addrs buffer and count the number of addresses. */ 1131 addr_buf = kaddrs; 1132 while (walk_size < addrs_size) { 1133 struct sctp_af *af; 1134 1135 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1136 err = -EINVAL; 1137 goto out_free; 1138 } 1139 1140 sa_addr = addr_buf; 1141 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1142 1143 /* If the address family is not supported or if this address 1144 * causes the address buffer to overflow return EINVAL. 1145 */ 1146 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1147 err = -EINVAL; 1148 goto out_free; 1149 } 1150 1151 port = ntohs(sa_addr->v4.sin_port); 1152 1153 /* Save current address so we can work with it */ 1154 memcpy(&to, sa_addr, af->sockaddr_len); 1155 1156 err = sctp_verify_addr(sk, &to, af->sockaddr_len); 1157 if (err) 1158 goto out_free; 1159 1160 /* Make sure the destination port is correctly set 1161 * in all addresses. 1162 */ 1163 if (asoc && asoc->peer.port && asoc->peer.port != port) { 1164 err = -EINVAL; 1165 goto out_free; 1166 } 1167 1168 /* Check if there already is a matching association on the 1169 * endpoint (other than the one created here). 1170 */ 1171 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1172 if (asoc2 && asoc2 != asoc) { 1173 if (asoc2->state >= SCTP_STATE_ESTABLISHED) 1174 err = -EISCONN; 1175 else 1176 err = -EALREADY; 1177 goto out_free; 1178 } 1179 1180 /* If we could not find a matching association on the endpoint, 1181 * make sure that there is no peeled-off association matching 1182 * the peer address even on another socket. 1183 */ 1184 if (sctp_endpoint_is_peeled_off(ep, &to)) { 1185 err = -EADDRNOTAVAIL; 1186 goto out_free; 1187 } 1188 1189 if (!asoc) { 1190 /* If a bind() or sctp_bindx() is not called prior to 1191 * an sctp_connectx() call, the system picks an 1192 * ephemeral port and will choose an address set 1193 * equivalent to binding with a wildcard address. 1194 */ 1195 if (!ep->base.bind_addr.port) { 1196 if (sctp_autobind(sk)) { 1197 err = -EAGAIN; 1198 goto out_free; 1199 } 1200 } else { 1201 /* 1202 * If an unprivileged user inherits a 1-many 1203 * style socket with open associations on a 1204 * privileged port, it MAY be permitted to 1205 * accept new associations, but it SHOULD NOT 1206 * be permitted to open new associations. 1207 */ 1208 if (ep->base.bind_addr.port < 1209 inet_prot_sock(net) && 1210 !ns_capable(net->user_ns, 1211 CAP_NET_BIND_SERVICE)) { 1212 err = -EACCES; 1213 goto out_free; 1214 } 1215 } 1216 1217 scope = sctp_scope(&to); 1218 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1219 if (!asoc) { 1220 err = -ENOMEM; 1221 goto out_free; 1222 } 1223 1224 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, 1225 GFP_KERNEL); 1226 if (err < 0) { 1227 goto out_free; 1228 } 1229 1230 } 1231 1232 /* Prime the peer's transport structures. */ 1233 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, 1234 SCTP_UNKNOWN); 1235 if (!transport) { 1236 err = -ENOMEM; 1237 goto out_free; 1238 } 1239 1240 addrcnt++; 1241 addr_buf += af->sockaddr_len; 1242 walk_size += af->sockaddr_len; 1243 } 1244 1245 /* In case the user of sctp_connectx() wants an association 1246 * id back, assign one now. 1247 */ 1248 if (assoc_id) { 1249 err = sctp_assoc_set_id(asoc, GFP_KERNEL); 1250 if (err < 0) 1251 goto out_free; 1252 } 1253 1254 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 1255 if (err < 0) { 1256 goto out_free; 1257 } 1258 1259 /* Initialize sk's dport and daddr for getpeername() */ 1260 inet_sk(sk)->inet_dport = htons(asoc->peer.port); 1261 sp->pf->to_sk_daddr(sa_addr, sk); 1262 sk->sk_err = 0; 1263 1264 /* in-kernel sockets don't generally have a file allocated to them 1265 * if all they do is call sock_create_kern(). 1266 */ 1267 if (sk->sk_socket->file) 1268 f_flags = sk->sk_socket->file->f_flags; 1269 1270 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1271 1272 if (assoc_id) 1273 *assoc_id = asoc->assoc_id; 1274 err = sctp_wait_for_connect(asoc, &timeo); 1275 /* Note: the asoc may be freed after the return of 1276 * sctp_wait_for_connect. 1277 */ 1278 1279 /* Don't free association on exit. */ 1280 asoc = NULL; 1281 1282 out_free: 1283 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", 1284 __func__, asoc, kaddrs, err); 1285 1286 if (asoc) { 1287 /* sctp_primitive_ASSOCIATE may have added this association 1288 * To the hash table, try to unhash it, just in case, its a noop 1289 * if it wasn't hashed so we're safe 1290 */ 1291 sctp_association_free(asoc); 1292 } 1293 return err; 1294 } 1295 1296 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1297 * 1298 * API 8.9 1299 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, 1300 * sctp_assoc_t *asoc); 1301 * 1302 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1303 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1304 * or IPv6 addresses. 1305 * 1306 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see 1307 * Section 3.1.2 for this usage. 1308 * 1309 * addrs is a pointer to an array of one or more socket addresses. Each 1310 * address is contained in its appropriate structure (i.e. struct 1311 * sockaddr_in or struct sockaddr_in6) the family of the address type 1312 * must be used to distengish the address length (note that this 1313 * representation is termed a "packed array" of addresses). The caller 1314 * specifies the number of addresses in the array with addrcnt. 1315 * 1316 * On success, sctp_connectx() returns 0. It also sets the assoc_id to 1317 * the association id of the new association. On failure, sctp_connectx() 1318 * returns -1, and sets errno to the appropriate error code. The assoc_id 1319 * is not touched by the kernel. 1320 * 1321 * For SCTP, the port given in each socket address must be the same, or 1322 * sctp_connectx() will fail, setting errno to EINVAL. 1323 * 1324 * An application can use sctp_connectx to initiate an association with 1325 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1326 * allows a caller to specify multiple addresses at which a peer can be 1327 * reached. The way the SCTP stack uses the list of addresses to set up 1328 * the association is implementation dependent. This function only 1329 * specifies that the stack will try to make use of all the addresses in 1330 * the list when needed. 1331 * 1332 * Note that the list of addresses passed in is only used for setting up 1333 * the association. It does not necessarily equal the set of addresses 1334 * the peer uses for the resulting association. If the caller wants to 1335 * find out the set of peer addresses, it must use sctp_getpaddrs() to 1336 * retrieve them after the association has been set up. 1337 * 1338 * Basically do nothing but copying the addresses from user to kernel 1339 * land and invoking either sctp_connectx(). This is used for tunneling 1340 * the sctp_connectx() request through sctp_setsockopt() from userspace. 1341 * 1342 * We don't use copy_from_user() for optimization: we first do the 1343 * sanity checks (buffer size -fast- and access check-healthy 1344 * pointer); if all of those succeed, then we can alloc the memory 1345 * (expensive operation) needed to copy the data to kernel. Then we do 1346 * the copying without checking the user space area 1347 * (__copy_from_user()). 1348 * 1349 * On exit there is no need to do sockfd_put(), sys_setsockopt() does 1350 * it. 1351 * 1352 * sk The sk of the socket 1353 * addrs The pointer to the addresses in user land 1354 * addrssize Size of the addrs buffer 1355 * 1356 * Returns >=0 if ok, <0 errno code on error. 1357 */ 1358 static int __sctp_setsockopt_connectx(struct sock *sk, 1359 struct sockaddr __user *addrs, 1360 int addrs_size, 1361 sctp_assoc_t *assoc_id) 1362 { 1363 struct sockaddr *kaddrs; 1364 gfp_t gfp = GFP_KERNEL; 1365 int err = 0; 1366 1367 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1368 __func__, sk, addrs, addrs_size); 1369 1370 if (unlikely(addrs_size <= 0)) 1371 return -EINVAL; 1372 1373 /* Check the user passed a healthy pointer. */ 1374 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) 1375 return -EFAULT; 1376 1377 /* Alloc space for the address array in kernel memory. */ 1378 if (sk->sk_socket->file) 1379 gfp = GFP_USER | __GFP_NOWARN; 1380 kaddrs = kmalloc(addrs_size, gfp); 1381 if (unlikely(!kaddrs)) 1382 return -ENOMEM; 1383 1384 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1385 err = -EFAULT; 1386 } else { 1387 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1388 } 1389 1390 kfree(kaddrs); 1391 1392 return err; 1393 } 1394 1395 /* 1396 * This is an older interface. It's kept for backward compatibility 1397 * to the option that doesn't provide association id. 1398 */ 1399 static int sctp_setsockopt_connectx_old(struct sock *sk, 1400 struct sockaddr __user *addrs, 1401 int addrs_size) 1402 { 1403 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); 1404 } 1405 1406 /* 1407 * New interface for the API. The since the API is done with a socket 1408 * option, to make it simple we feed back the association id is as a return 1409 * indication to the call. Error is always negative and association id is 1410 * always positive. 1411 */ 1412 static int sctp_setsockopt_connectx(struct sock *sk, 1413 struct sockaddr __user *addrs, 1414 int addrs_size) 1415 { 1416 sctp_assoc_t assoc_id = 0; 1417 int err = 0; 1418 1419 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); 1420 1421 if (err) 1422 return err; 1423 else 1424 return assoc_id; 1425 } 1426 1427 /* 1428 * New (hopefully final) interface for the API. 1429 * We use the sctp_getaddrs_old structure so that use-space library 1430 * can avoid any unnecessary allocations. The only different part 1431 * is that we store the actual length of the address buffer into the 1432 * addrs_num structure member. That way we can re-use the existing 1433 * code. 1434 */ 1435 #ifdef CONFIG_COMPAT 1436 struct compat_sctp_getaddrs_old { 1437 sctp_assoc_t assoc_id; 1438 s32 addr_num; 1439 compat_uptr_t addrs; /* struct sockaddr * */ 1440 }; 1441 #endif 1442 1443 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1444 char __user *optval, 1445 int __user *optlen) 1446 { 1447 struct sctp_getaddrs_old param; 1448 sctp_assoc_t assoc_id = 0; 1449 int err = 0; 1450 1451 #ifdef CONFIG_COMPAT 1452 if (in_compat_syscall()) { 1453 struct compat_sctp_getaddrs_old param32; 1454 1455 if (len < sizeof(param32)) 1456 return -EINVAL; 1457 if (copy_from_user(¶m32, optval, sizeof(param32))) 1458 return -EFAULT; 1459 1460 param.assoc_id = param32.assoc_id; 1461 param.addr_num = param32.addr_num; 1462 param.addrs = compat_ptr(param32.addrs); 1463 } else 1464 #endif 1465 { 1466 if (len < sizeof(param)) 1467 return -EINVAL; 1468 if (copy_from_user(¶m, optval, sizeof(param))) 1469 return -EFAULT; 1470 } 1471 1472 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1473 param.addrs, param.addr_num, 1474 &assoc_id); 1475 if (err == 0 || err == -EINPROGRESS) { 1476 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1477 return -EFAULT; 1478 if (put_user(sizeof(assoc_id), optlen)) 1479 return -EFAULT; 1480 } 1481 1482 return err; 1483 } 1484 1485 /* API 3.1.4 close() - UDP Style Syntax 1486 * Applications use close() to perform graceful shutdown (as described in 1487 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1488 * by a UDP-style socket. 1489 * 1490 * The syntax is 1491 * 1492 * ret = close(int sd); 1493 * 1494 * sd - the socket descriptor of the associations to be closed. 1495 * 1496 * To gracefully shutdown a specific association represented by the 1497 * UDP-style socket, an application should use the sendmsg() call, 1498 * passing no user data, but including the appropriate flag in the 1499 * ancillary data (see Section xxxx). 1500 * 1501 * If sd in the close() call is a branched-off socket representing only 1502 * one association, the shutdown is performed on that association only. 1503 * 1504 * 4.1.6 close() - TCP Style Syntax 1505 * 1506 * Applications use close() to gracefully close down an association. 1507 * 1508 * The syntax is: 1509 * 1510 * int close(int sd); 1511 * 1512 * sd - the socket descriptor of the association to be closed. 1513 * 1514 * After an application calls close() on a socket descriptor, no further 1515 * socket operations will succeed on that descriptor. 1516 * 1517 * API 7.1.4 SO_LINGER 1518 * 1519 * An application using the TCP-style socket can use this option to 1520 * perform the SCTP ABORT primitive. The linger option structure is: 1521 * 1522 * struct linger { 1523 * int l_onoff; // option on/off 1524 * int l_linger; // linger time 1525 * }; 1526 * 1527 * To enable the option, set l_onoff to 1. If the l_linger value is set 1528 * to 0, calling close() is the same as the ABORT primitive. If the 1529 * value is set to a negative value, the setsockopt() call will return 1530 * an error. If the value is set to a positive value linger_time, the 1531 * close() can be blocked for at most linger_time ms. If the graceful 1532 * shutdown phase does not finish during this period, close() will 1533 * return but the graceful shutdown phase continues in the system. 1534 */ 1535 static void sctp_close(struct sock *sk, long timeout) 1536 { 1537 struct net *net = sock_net(sk); 1538 struct sctp_endpoint *ep; 1539 struct sctp_association *asoc; 1540 struct list_head *pos, *temp; 1541 unsigned int data_was_unread; 1542 1543 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); 1544 1545 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 1546 sk->sk_shutdown = SHUTDOWN_MASK; 1547 inet_sk_set_state(sk, SCTP_SS_CLOSING); 1548 1549 ep = sctp_sk(sk)->ep; 1550 1551 /* Clean up any skbs sitting on the receive queue. */ 1552 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); 1553 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); 1554 1555 /* Walk all associations on an endpoint. */ 1556 list_for_each_safe(pos, temp, &ep->asocs) { 1557 asoc = list_entry(pos, struct sctp_association, asocs); 1558 1559 if (sctp_style(sk, TCP)) { 1560 /* A closed association can still be in the list if 1561 * it belongs to a TCP-style listening socket that is 1562 * not yet accepted. If so, free it. If not, send an 1563 * ABORT or SHUTDOWN based on the linger options. 1564 */ 1565 if (sctp_state(asoc, CLOSED)) { 1566 sctp_association_free(asoc); 1567 continue; 1568 } 1569 } 1570 1571 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1572 !skb_queue_empty(&asoc->ulpq.reasm) || 1573 !skb_queue_empty(&asoc->ulpq.reasm_uo) || 1574 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1575 struct sctp_chunk *chunk; 1576 1577 chunk = sctp_make_abort_user(asoc, NULL, 0); 1578 sctp_primitive_ABORT(net, asoc, chunk); 1579 } else 1580 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1581 } 1582 1583 /* On a TCP-style socket, block for at most linger_time if set. */ 1584 if (sctp_style(sk, TCP) && timeout) 1585 sctp_wait_for_close(sk, timeout); 1586 1587 /* This will run the backlog queue. */ 1588 release_sock(sk); 1589 1590 /* Supposedly, no process has access to the socket, but 1591 * the net layers still may. 1592 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock 1593 * held and that should be grabbed before socket lock. 1594 */ 1595 spin_lock_bh(&net->sctp.addr_wq_lock); 1596 bh_lock_sock_nested(sk); 1597 1598 /* Hold the sock, since sk_common_release() will put sock_put() 1599 * and we have just a little more cleanup. 1600 */ 1601 sock_hold(sk); 1602 sk_common_release(sk); 1603 1604 bh_unlock_sock(sk); 1605 spin_unlock_bh(&net->sctp.addr_wq_lock); 1606 1607 sock_put(sk); 1608 1609 SCTP_DBG_OBJCNT_DEC(sock); 1610 } 1611 1612 /* Handle EPIPE error. */ 1613 static int sctp_error(struct sock *sk, int flags, int err) 1614 { 1615 if (err == -EPIPE) 1616 err = sock_error(sk) ? : -EPIPE; 1617 if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) 1618 send_sig(SIGPIPE, current, 0); 1619 return err; 1620 } 1621 1622 /* API 3.1.3 sendmsg() - UDP Style Syntax 1623 * 1624 * An application uses sendmsg() and recvmsg() calls to transmit data to 1625 * and receive data from its peer. 1626 * 1627 * ssize_t sendmsg(int socket, const struct msghdr *message, 1628 * int flags); 1629 * 1630 * socket - the socket descriptor of the endpoint. 1631 * message - pointer to the msghdr structure which contains a single 1632 * user message and possibly some ancillary data. 1633 * 1634 * See Section 5 for complete description of the data 1635 * structures. 1636 * 1637 * flags - flags sent or received with the user message, see Section 1638 * 5 for complete description of the flags. 1639 * 1640 * Note: This function could use a rewrite especially when explicit 1641 * connect support comes in. 1642 */ 1643 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ 1644 1645 static int sctp_msghdr_parse(const struct msghdr *msg, 1646 struct sctp_cmsgs *cmsgs); 1647 1648 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) 1649 { 1650 struct net *net = sock_net(sk); 1651 struct sctp_sock *sp; 1652 struct sctp_endpoint *ep; 1653 struct sctp_association *new_asoc = NULL, *asoc = NULL; 1654 struct sctp_transport *transport, *chunk_tp; 1655 struct sctp_chunk *chunk; 1656 union sctp_addr to; 1657 struct sockaddr *msg_name = NULL; 1658 struct sctp_sndrcvinfo default_sinfo; 1659 struct sctp_sndrcvinfo *sinfo; 1660 struct sctp_initmsg *sinit; 1661 sctp_assoc_t associd = 0; 1662 struct sctp_cmsgs cmsgs = { NULL }; 1663 enum sctp_scope scope; 1664 bool fill_sinfo_ttl = false, wait_connect = false; 1665 struct sctp_datamsg *datamsg; 1666 int msg_flags = msg->msg_flags; 1667 __u16 sinfo_flags = 0; 1668 long timeo; 1669 int err; 1670 1671 err = 0; 1672 sp = sctp_sk(sk); 1673 ep = sp->ep; 1674 1675 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, 1676 msg, msg_len, ep); 1677 1678 /* We cannot send a message over a TCP-style listening socket. */ 1679 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { 1680 err = -EPIPE; 1681 goto out_nounlock; 1682 } 1683 1684 /* Parse out the SCTP CMSGs. */ 1685 err = sctp_msghdr_parse(msg, &cmsgs); 1686 if (err) { 1687 pr_debug("%s: msghdr parse err:%x\n", __func__, err); 1688 goto out_nounlock; 1689 } 1690 1691 /* Fetch the destination address for this packet. This 1692 * address only selects the association--it is not necessarily 1693 * the address we will send to. 1694 * For a peeled-off socket, msg_name is ignored. 1695 */ 1696 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { 1697 int msg_namelen = msg->msg_namelen; 1698 1699 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, 1700 msg_namelen); 1701 if (err) 1702 return err; 1703 1704 if (msg_namelen > sizeof(to)) 1705 msg_namelen = sizeof(to); 1706 memcpy(&to, msg->msg_name, msg_namelen); 1707 msg_name = msg->msg_name; 1708 } 1709 1710 sinit = cmsgs.init; 1711 if (cmsgs.sinfo != NULL) { 1712 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1713 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; 1714 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; 1715 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; 1716 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; 1717 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; 1718 1719 sinfo = &default_sinfo; 1720 fill_sinfo_ttl = true; 1721 } else { 1722 sinfo = cmsgs.srinfo; 1723 } 1724 /* Did the user specify SNDINFO/SNDRCVINFO? */ 1725 if (sinfo) { 1726 sinfo_flags = sinfo->sinfo_flags; 1727 associd = sinfo->sinfo_assoc_id; 1728 } 1729 1730 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, 1731 msg_len, sinfo_flags); 1732 1733 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ 1734 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { 1735 err = -EINVAL; 1736 goto out_nounlock; 1737 } 1738 1739 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero 1740 * length messages when SCTP_EOF|SCTP_ABORT is not set. 1741 * If SCTP_ABORT is set, the message length could be non zero with 1742 * the msg_iov set to the user abort reason. 1743 */ 1744 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || 1745 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { 1746 err = -EINVAL; 1747 goto out_nounlock; 1748 } 1749 1750 /* If SCTP_ADDR_OVER is set, there must be an address 1751 * specified in msg_name. 1752 */ 1753 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { 1754 err = -EINVAL; 1755 goto out_nounlock; 1756 } 1757 1758 transport = NULL; 1759 1760 pr_debug("%s: about to look up association\n", __func__); 1761 1762 lock_sock(sk); 1763 1764 /* If a msg_name has been specified, assume this is to be used. */ 1765 if (msg_name) { 1766 /* Look for a matching association on the endpoint. */ 1767 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); 1768 1769 /* If we could not find a matching association on the 1770 * endpoint, make sure that it is not a TCP-style 1771 * socket that already has an association or there is 1772 * no peeled-off association on another socket. 1773 */ 1774 if (!asoc && 1775 ((sctp_style(sk, TCP) && 1776 (sctp_sstate(sk, ESTABLISHED) || 1777 sctp_sstate(sk, CLOSING))) || 1778 sctp_endpoint_is_peeled_off(ep, &to))) { 1779 err = -EADDRNOTAVAIL; 1780 goto out_unlock; 1781 } 1782 } else { 1783 asoc = sctp_id2assoc(sk, associd); 1784 if (!asoc) { 1785 err = -EPIPE; 1786 goto out_unlock; 1787 } 1788 } 1789 1790 if (asoc) { 1791 pr_debug("%s: just looked up association:%p\n", __func__, asoc); 1792 1793 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED 1794 * socket that has an association in CLOSED state. This can 1795 * happen when an accepted socket has an association that is 1796 * already CLOSED. 1797 */ 1798 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { 1799 err = -EPIPE; 1800 goto out_unlock; 1801 } 1802 1803 if (sinfo_flags & SCTP_EOF) { 1804 pr_debug("%s: shutting down association:%p\n", 1805 __func__, asoc); 1806 1807 sctp_primitive_SHUTDOWN(net, asoc, NULL); 1808 err = 0; 1809 goto out_unlock; 1810 } 1811 if (sinfo_flags & SCTP_ABORT) { 1812 1813 chunk = sctp_make_abort_user(asoc, msg, msg_len); 1814 if (!chunk) { 1815 err = -ENOMEM; 1816 goto out_unlock; 1817 } 1818 1819 pr_debug("%s: aborting association:%p\n", 1820 __func__, asoc); 1821 1822 sctp_primitive_ABORT(net, asoc, chunk); 1823 err = 0; 1824 goto out_unlock; 1825 } 1826 } 1827 1828 /* Do we need to create the association? */ 1829 if (!asoc) { 1830 pr_debug("%s: there is no association yet\n", __func__); 1831 1832 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { 1833 err = -EINVAL; 1834 goto out_unlock; 1835 } 1836 1837 /* Check for invalid stream against the stream counts, 1838 * either the default or the user specified stream counts. 1839 */ 1840 if (sinfo) { 1841 if (!sinit || !sinit->sinit_num_ostreams) { 1842 /* Check against the defaults. */ 1843 if (sinfo->sinfo_stream >= 1844 sp->initmsg.sinit_num_ostreams) { 1845 err = -EINVAL; 1846 goto out_unlock; 1847 } 1848 } else { 1849 /* Check against the requested. */ 1850 if (sinfo->sinfo_stream >= 1851 sinit->sinit_num_ostreams) { 1852 err = -EINVAL; 1853 goto out_unlock; 1854 } 1855 } 1856 } 1857 1858 /* 1859 * API 3.1.2 bind() - UDP Style Syntax 1860 * If a bind() or sctp_bindx() is not called prior to a 1861 * sendmsg() call that initiates a new association, the 1862 * system picks an ephemeral port and will choose an address 1863 * set equivalent to binding with a wildcard address. 1864 */ 1865 if (!ep->base.bind_addr.port) { 1866 if (sctp_autobind(sk)) { 1867 err = -EAGAIN; 1868 goto out_unlock; 1869 } 1870 } else { 1871 /* 1872 * If an unprivileged user inherits a one-to-many 1873 * style socket with open associations on a privileged 1874 * port, it MAY be permitted to accept new associations, 1875 * but it SHOULD NOT be permitted to open new 1876 * associations. 1877 */ 1878 if (ep->base.bind_addr.port < inet_prot_sock(net) && 1879 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { 1880 err = -EACCES; 1881 goto out_unlock; 1882 } 1883 } 1884 1885 scope = sctp_scope(&to); 1886 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); 1887 if (!new_asoc) { 1888 err = -ENOMEM; 1889 goto out_unlock; 1890 } 1891 asoc = new_asoc; 1892 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); 1893 if (err < 0) { 1894 err = -ENOMEM; 1895 goto out_free; 1896 } 1897 1898 /* If the SCTP_INIT ancillary data is specified, set all 1899 * the association init values accordingly. 1900 */ 1901 if (sinit) { 1902 if (sinit->sinit_num_ostreams) { 1903 asoc->c.sinit_num_ostreams = 1904 sinit->sinit_num_ostreams; 1905 } 1906 if (sinit->sinit_max_instreams) { 1907 asoc->c.sinit_max_instreams = 1908 sinit->sinit_max_instreams; 1909 } 1910 if (sinit->sinit_max_attempts) { 1911 asoc->max_init_attempts 1912 = sinit->sinit_max_attempts; 1913 } 1914 if (sinit->sinit_max_init_timeo) { 1915 asoc->max_init_timeo = 1916 msecs_to_jiffies(sinit->sinit_max_init_timeo); 1917 } 1918 } 1919 1920 /* Prime the peer's transport structures. */ 1921 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); 1922 if (!transport) { 1923 err = -ENOMEM; 1924 goto out_free; 1925 } 1926 } 1927 1928 /* ASSERT: we have a valid association at this point. */ 1929 pr_debug("%s: we have a valid association\n", __func__); 1930 1931 if (!sinfo) { 1932 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up 1933 * one with some defaults. 1934 */ 1935 memset(&default_sinfo, 0, sizeof(default_sinfo)); 1936 default_sinfo.sinfo_stream = asoc->default_stream; 1937 default_sinfo.sinfo_flags = asoc->default_flags; 1938 default_sinfo.sinfo_ppid = asoc->default_ppid; 1939 default_sinfo.sinfo_context = asoc->default_context; 1940 default_sinfo.sinfo_timetolive = asoc->default_timetolive; 1941 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); 1942 1943 sinfo = &default_sinfo; 1944 } else if (fill_sinfo_ttl) { 1945 /* In case SNDINFO was specified, we still need to fill 1946 * it with a default ttl from the assoc here. 1947 */ 1948 sinfo->sinfo_timetolive = asoc->default_timetolive; 1949 } 1950 1951 /* API 7.1.7, the sndbuf size per association bounds the 1952 * maximum size of data that can be sent in a single send call. 1953 */ 1954 if (msg_len > sk->sk_sndbuf) { 1955 err = -EMSGSIZE; 1956 goto out_free; 1957 } 1958 1959 if (asoc->pmtu_pending) 1960 sctp_assoc_pending_pmtu(asoc); 1961 1962 /* If fragmentation is disabled and the message length exceeds the 1963 * association fragmentation point, return EMSGSIZE. The I-D 1964 * does not specify what this error is, but this looks like 1965 * a great fit. 1966 */ 1967 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { 1968 err = -EMSGSIZE; 1969 goto out_free; 1970 } 1971 1972 /* Check for invalid stream. */ 1973 if (sinfo->sinfo_stream >= asoc->stream.outcnt) { 1974 err = -EINVAL; 1975 goto out_free; 1976 } 1977 1978 /* Allocate sctp_stream_out_ext if not already done */ 1979 if (unlikely(!asoc->stream.out[sinfo->sinfo_stream].ext)) { 1980 err = sctp_stream_init_ext(&asoc->stream, sinfo->sinfo_stream); 1981 if (err) 1982 goto out_free; 1983 } 1984 1985 if (sctp_wspace(asoc) < msg_len) 1986 sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); 1987 1988 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1989 if (!sctp_wspace(asoc)) { 1990 /* sk can be changed by peel off when waiting for buf. */ 1991 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk); 1992 if (err) { 1993 if (err == -ESRCH) { 1994 /* asoc is already dead. */ 1995 new_asoc = NULL; 1996 err = -EPIPE; 1997 } 1998 goto out_free; 1999 } 2000 } 2001 2002 /* If an address is passed with the sendto/sendmsg call, it is used 2003 * to override the primary destination address in the TCP model, or 2004 * when SCTP_ADDR_OVER flag is set in the UDP model. 2005 */ 2006 if ((sctp_style(sk, TCP) && msg_name) || 2007 (sinfo_flags & SCTP_ADDR_OVER)) { 2008 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); 2009 if (!chunk_tp) { 2010 err = -EINVAL; 2011 goto out_free; 2012 } 2013 } else 2014 chunk_tp = NULL; 2015 2016 /* Auto-connect, if we aren't connected already. */ 2017 if (sctp_state(asoc, CLOSED)) { 2018 err = sctp_primitive_ASSOCIATE(net, asoc, NULL); 2019 if (err < 0) 2020 goto out_free; 2021 2022 /* If stream interleave is enabled, wait_connect has to be 2023 * done earlier than data enqueue, as it needs to make data 2024 * or idata according to asoc->intl_enable which is set 2025 * after connection is done. 2026 */ 2027 if (sctp_sk(asoc->base.sk)->strm_interleave) { 2028 timeo = sock_sndtimeo(sk, 0); 2029 err = sctp_wait_for_connect(asoc, &timeo); 2030 if (err) 2031 goto out_unlock; 2032 } else { 2033 wait_connect = true; 2034 } 2035 2036 pr_debug("%s: we associated primitively\n", __func__); 2037 } 2038 2039 /* Break the message into multiple chunks of maximum size. */ 2040 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); 2041 if (IS_ERR(datamsg)) { 2042 err = PTR_ERR(datamsg); 2043 goto out_free; 2044 } 2045 asoc->force_delay = !!(msg->msg_flags & MSG_MORE); 2046 2047 /* Now send the (possibly) fragmented message. */ 2048 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 2049 sctp_chunk_hold(chunk); 2050 2051 /* Do accounting for the write space. */ 2052 sctp_set_owner_w(chunk); 2053 2054 chunk->transport = chunk_tp; 2055 } 2056 2057 /* Send it to the lower layers. Note: all chunks 2058 * must either fail or succeed. The lower layer 2059 * works that way today. Keep it that way or this 2060 * breaks. 2061 */ 2062 err = sctp_primitive_SEND(net, asoc, datamsg); 2063 /* Did the lower layer accept the chunk? */ 2064 if (err) { 2065 sctp_datamsg_free(datamsg); 2066 goto out_free; 2067 } 2068 2069 pr_debug("%s: we sent primitively\n", __func__); 2070 2071 sctp_datamsg_put(datamsg); 2072 err = msg_len; 2073 2074 if (unlikely(wait_connect)) { 2075 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); 2076 sctp_wait_for_connect(asoc, &timeo); 2077 } 2078 2079 /* If we are already past ASSOCIATE, the lower 2080 * layers are responsible for association cleanup. 2081 */ 2082 goto out_unlock; 2083 2084 out_free: 2085 if (new_asoc) 2086 sctp_association_free(asoc); 2087 out_unlock: 2088 release_sock(sk); 2089 2090 out_nounlock: 2091 return sctp_error(sk, msg_flags, err); 2092 2093 #if 0 2094 do_sock_err: 2095 if (msg_len) 2096 err = msg_len; 2097 else 2098 err = sock_error(sk); 2099 goto out; 2100 2101 do_interrupted: 2102 if (msg_len) 2103 err = msg_len; 2104 goto out; 2105 #endif /* 0 */ 2106 } 2107 2108 /* This is an extended version of skb_pull() that removes the data from the 2109 * start of a skb even when data is spread across the list of skb's in the 2110 * frag_list. len specifies the total amount of data that needs to be removed. 2111 * when 'len' bytes could be removed from the skb, it returns 0. 2112 * If 'len' exceeds the total skb length, it returns the no. of bytes that 2113 * could not be removed. 2114 */ 2115 static int sctp_skb_pull(struct sk_buff *skb, int len) 2116 { 2117 struct sk_buff *list; 2118 int skb_len = skb_headlen(skb); 2119 int rlen; 2120 2121 if (len <= skb_len) { 2122 __skb_pull(skb, len); 2123 return 0; 2124 } 2125 len -= skb_len; 2126 __skb_pull(skb, skb_len); 2127 2128 skb_walk_frags(skb, list) { 2129 rlen = sctp_skb_pull(list, len); 2130 skb->len -= (len-rlen); 2131 skb->data_len -= (len-rlen); 2132 2133 if (!rlen) 2134 return 0; 2135 2136 len = rlen; 2137 } 2138 2139 return len; 2140 } 2141 2142 /* API 3.1.3 recvmsg() - UDP Style Syntax 2143 * 2144 * ssize_t recvmsg(int socket, struct msghdr *message, 2145 * int flags); 2146 * 2147 * socket - the socket descriptor of the endpoint. 2148 * message - pointer to the msghdr structure which contains a single 2149 * user message and possibly some ancillary data. 2150 * 2151 * See Section 5 for complete description of the data 2152 * structures. 2153 * 2154 * flags - flags sent or received with the user message, see Section 2155 * 5 for complete description of the flags. 2156 */ 2157 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 2158 int noblock, int flags, int *addr_len) 2159 { 2160 struct sctp_ulpevent *event = NULL; 2161 struct sctp_sock *sp = sctp_sk(sk); 2162 struct sk_buff *skb, *head_skb; 2163 int copied; 2164 int err = 0; 2165 int skb_len; 2166 2167 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " 2168 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, 2169 addr_len); 2170 2171 lock_sock(sk); 2172 2173 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) && 2174 !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) { 2175 err = -ENOTCONN; 2176 goto out; 2177 } 2178 2179 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); 2180 if (!skb) 2181 goto out; 2182 2183 /* Get the total length of the skb including any skb's in the 2184 * frag_list. 2185 */ 2186 skb_len = skb->len; 2187 2188 copied = skb_len; 2189 if (copied > len) 2190 copied = len; 2191 2192 err = skb_copy_datagram_msg(skb, 0, msg, copied); 2193 2194 event = sctp_skb2event(skb); 2195 2196 if (err) 2197 goto out_free; 2198 2199 if (event->chunk && event->chunk->head_skb) 2200 head_skb = event->chunk->head_skb; 2201 else 2202 head_skb = skb; 2203 sock_recv_ts_and_drops(msg, sk, head_skb); 2204 if (sctp_ulpevent_is_notification(event)) { 2205 msg->msg_flags |= MSG_NOTIFICATION; 2206 sp->pf->event_msgname(event, msg->msg_name, addr_len); 2207 } else { 2208 sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len); 2209 } 2210 2211 /* Check if we allow SCTP_NXTINFO. */ 2212 if (sp->recvnxtinfo) 2213 sctp_ulpevent_read_nxtinfo(event, msg, sk); 2214 /* Check if we allow SCTP_RCVINFO. */ 2215 if (sp->recvrcvinfo) 2216 sctp_ulpevent_read_rcvinfo(event, msg); 2217 /* Check if we allow SCTP_SNDRCVINFO. */ 2218 if (sp->subscribe.sctp_data_io_event) 2219 sctp_ulpevent_read_sndrcvinfo(event, msg); 2220 2221 err = copied; 2222 2223 /* If skb's length exceeds the user's buffer, update the skb and 2224 * push it back to the receive_queue so that the next call to 2225 * recvmsg() will return the remaining data. Don't set MSG_EOR. 2226 */ 2227 if (skb_len > copied) { 2228 msg->msg_flags &= ~MSG_EOR; 2229 if (flags & MSG_PEEK) 2230 goto out_free; 2231 sctp_skb_pull(skb, copied); 2232 skb_queue_head(&sk->sk_receive_queue, skb); 2233 2234 /* When only partial message is copied to the user, increase 2235 * rwnd by that amount. If all the data in the skb is read, 2236 * rwnd is updated when the event is freed. 2237 */ 2238 if (!sctp_ulpevent_is_notification(event)) 2239 sctp_assoc_rwnd_increase(event->asoc, copied); 2240 goto out; 2241 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2242 (event->msg_flags & MSG_EOR)) 2243 msg->msg_flags |= MSG_EOR; 2244 else 2245 msg->msg_flags &= ~MSG_EOR; 2246 2247 out_free: 2248 if (flags & MSG_PEEK) { 2249 /* Release the skb reference acquired after peeking the skb in 2250 * sctp_skb_recv_datagram(). 2251 */ 2252 kfree_skb(skb); 2253 } else { 2254 /* Free the event which includes releasing the reference to 2255 * the owner of the skb, freeing the skb and updating the 2256 * rwnd. 2257 */ 2258 sctp_ulpevent_free(event); 2259 } 2260 out: 2261 release_sock(sk); 2262 return err; 2263 } 2264 2265 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 2266 * 2267 * This option is a on/off flag. If enabled no SCTP message 2268 * fragmentation will be performed. Instead if a message being sent 2269 * exceeds the current PMTU size, the message will NOT be sent and 2270 * instead a error will be indicated to the user. 2271 */ 2272 static int sctp_setsockopt_disable_fragments(struct sock *sk, 2273 char __user *optval, 2274 unsigned int optlen) 2275 { 2276 int val; 2277 2278 if (optlen < sizeof(int)) 2279 return -EINVAL; 2280 2281 if (get_user(val, (int __user *)optval)) 2282 return -EFAULT; 2283 2284 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; 2285 2286 return 0; 2287 } 2288 2289 static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2290 unsigned int optlen) 2291 { 2292 struct sctp_association *asoc; 2293 struct sctp_ulpevent *event; 2294 2295 if (optlen > sizeof(struct sctp_event_subscribe)) 2296 return -EINVAL; 2297 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2298 return -EFAULT; 2299 2300 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2301 * if there is no data to be sent or retransmit, the stack will 2302 * immediately send up this notification. 2303 */ 2304 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, 2305 &sctp_sk(sk)->subscribe)) { 2306 asoc = sctp_id2assoc(sk, 0); 2307 2308 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2309 event = sctp_ulpevent_make_sender_dry_event(asoc, 2310 GFP_ATOMIC); 2311 if (!event) 2312 return -ENOMEM; 2313 2314 asoc->stream.si->enqueue_event(&asoc->ulpq, event); 2315 } 2316 } 2317 2318 return 0; 2319 } 2320 2321 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 2322 * 2323 * This socket option is applicable to the UDP-style socket only. When 2324 * set it will cause associations that are idle for more than the 2325 * specified number of seconds to automatically close. An association 2326 * being idle is defined an association that has NOT sent or received 2327 * user data. The special value of '0' indicates that no automatic 2328 * close of any associations should be performed. The option expects an 2329 * integer defining the number of seconds of idle time before an 2330 * association is closed. 2331 */ 2332 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2333 unsigned int optlen) 2334 { 2335 struct sctp_sock *sp = sctp_sk(sk); 2336 struct net *net = sock_net(sk); 2337 2338 /* Applicable to UDP-style socket only */ 2339 if (sctp_style(sk, TCP)) 2340 return -EOPNOTSUPP; 2341 if (optlen != sizeof(int)) 2342 return -EINVAL; 2343 if (copy_from_user(&sp->autoclose, optval, optlen)) 2344 return -EFAULT; 2345 2346 if (sp->autoclose > net->sctp.max_autoclose) 2347 sp->autoclose = net->sctp.max_autoclose; 2348 2349 return 0; 2350 } 2351 2352 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 2353 * 2354 * Applications can enable or disable heartbeats for any peer address of 2355 * an association, modify an address's heartbeat interval, force a 2356 * heartbeat to be sent immediately, and adjust the address's maximum 2357 * number of retransmissions sent before an address is considered 2358 * unreachable. The following structure is used to access and modify an 2359 * address's parameters: 2360 * 2361 * struct sctp_paddrparams { 2362 * sctp_assoc_t spp_assoc_id; 2363 * struct sockaddr_storage spp_address; 2364 * uint32_t spp_hbinterval; 2365 * uint16_t spp_pathmaxrxt; 2366 * uint32_t spp_pathmtu; 2367 * uint32_t spp_sackdelay; 2368 * uint32_t spp_flags; 2369 * }; 2370 * 2371 * spp_assoc_id - (one-to-many style socket) This is filled in the 2372 * application, and identifies the association for 2373 * this query. 2374 * spp_address - This specifies which address is of interest. 2375 * spp_hbinterval - This contains the value of the heartbeat interval, 2376 * in milliseconds. If a value of zero 2377 * is present in this field then no changes are to 2378 * be made to this parameter. 2379 * spp_pathmaxrxt - This contains the maximum number of 2380 * retransmissions before this address shall be 2381 * considered unreachable. If a value of zero 2382 * is present in this field then no changes are to 2383 * be made to this parameter. 2384 * spp_pathmtu - When Path MTU discovery is disabled the value 2385 * specified here will be the "fixed" path mtu. 2386 * Note that if the spp_address field is empty 2387 * then all associations on this address will 2388 * have this fixed path mtu set upon them. 2389 * 2390 * spp_sackdelay - When delayed sack is enabled, this value specifies 2391 * the number of milliseconds that sacks will be delayed 2392 * for. This value will apply to all addresses of an 2393 * association if the spp_address field is empty. Note 2394 * also, that if delayed sack is enabled and this 2395 * value is set to 0, no change is made to the last 2396 * recorded delayed sack timer value. 2397 * 2398 * spp_flags - These flags are used to control various features 2399 * on an association. The flag field may contain 2400 * zero or more of the following options. 2401 * 2402 * SPP_HB_ENABLE - Enable heartbeats on the 2403 * specified address. Note that if the address 2404 * field is empty all addresses for the association 2405 * have heartbeats enabled upon them. 2406 * 2407 * SPP_HB_DISABLE - Disable heartbeats on the 2408 * speicifed address. Note that if the address 2409 * field is empty all addresses for the association 2410 * will have their heartbeats disabled. Note also 2411 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 2412 * mutually exclusive, only one of these two should 2413 * be specified. Enabling both fields will have 2414 * undetermined results. 2415 * 2416 * SPP_HB_DEMAND - Request a user initiated heartbeat 2417 * to be made immediately. 2418 * 2419 * SPP_HB_TIME_IS_ZERO - Specify's that the time for 2420 * heartbeat delayis to be set to the value of 0 2421 * milliseconds. 2422 * 2423 * SPP_PMTUD_ENABLE - This field will enable PMTU 2424 * discovery upon the specified address. Note that 2425 * if the address feild is empty then all addresses 2426 * on the association are effected. 2427 * 2428 * SPP_PMTUD_DISABLE - This field will disable PMTU 2429 * discovery upon the specified address. Note that 2430 * if the address feild is empty then all addresses 2431 * on the association are effected. Not also that 2432 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 2433 * exclusive. Enabling both will have undetermined 2434 * results. 2435 * 2436 * SPP_SACKDELAY_ENABLE - Setting this flag turns 2437 * on delayed sack. The time specified in spp_sackdelay 2438 * is used to specify the sack delay for this address. Note 2439 * that if spp_address is empty then all addresses will 2440 * enable delayed sack and take on the sack delay 2441 * value specified in spp_sackdelay. 2442 * SPP_SACKDELAY_DISABLE - Setting this flag turns 2443 * off delayed sack. If the spp_address field is blank then 2444 * delayed sack is disabled for the entire association. Note 2445 * also that this field is mutually exclusive to 2446 * SPP_SACKDELAY_ENABLE, setting both will have undefined 2447 * results. 2448 */ 2449 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, 2450 struct sctp_transport *trans, 2451 struct sctp_association *asoc, 2452 struct sctp_sock *sp, 2453 int hb_change, 2454 int pmtud_change, 2455 int sackdelay_change) 2456 { 2457 int error; 2458 2459 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2460 struct net *net = sock_net(trans->asoc->base.sk); 2461 2462 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2463 if (error) 2464 return error; 2465 } 2466 2467 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of 2468 * this field is ignored. Note also that a value of zero indicates 2469 * the current setting should be left unchanged. 2470 */ 2471 if (params->spp_flags & SPP_HB_ENABLE) { 2472 2473 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is 2474 * set. This lets us use 0 value when this flag 2475 * is set. 2476 */ 2477 if (params->spp_flags & SPP_HB_TIME_IS_ZERO) 2478 params->spp_hbinterval = 0; 2479 2480 if (params->spp_hbinterval || 2481 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { 2482 if (trans) { 2483 trans->hbinterval = 2484 msecs_to_jiffies(params->spp_hbinterval); 2485 } else if (asoc) { 2486 asoc->hbinterval = 2487 msecs_to_jiffies(params->spp_hbinterval); 2488 } else { 2489 sp->hbinterval = params->spp_hbinterval; 2490 } 2491 } 2492 } 2493 2494 if (hb_change) { 2495 if (trans) { 2496 trans->param_flags = 2497 (trans->param_flags & ~SPP_HB) | hb_change; 2498 } else if (asoc) { 2499 asoc->param_flags = 2500 (asoc->param_flags & ~SPP_HB) | hb_change; 2501 } else { 2502 sp->param_flags = 2503 (sp->param_flags & ~SPP_HB) | hb_change; 2504 } 2505 } 2506 2507 /* When Path MTU discovery is disabled the value specified here will 2508 * be the "fixed" path mtu (i.e. the value of the spp_flags field must 2509 * include the flag SPP_PMTUD_DISABLE for this field to have any 2510 * effect). 2511 */ 2512 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2513 if (trans) { 2514 trans->pathmtu = params->spp_pathmtu; 2515 sctp_assoc_sync_pmtu(asoc); 2516 } else if (asoc) { 2517 asoc->pathmtu = params->spp_pathmtu; 2518 } else { 2519 sp->pathmtu = params->spp_pathmtu; 2520 } 2521 } 2522 2523 if (pmtud_change) { 2524 if (trans) { 2525 int update = (trans->param_flags & SPP_PMTUD_DISABLE) && 2526 (params->spp_flags & SPP_PMTUD_ENABLE); 2527 trans->param_flags = 2528 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2529 if (update) { 2530 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2531 sctp_assoc_sync_pmtu(asoc); 2532 } 2533 } else if (asoc) { 2534 asoc->param_flags = 2535 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; 2536 } else { 2537 sp->param_flags = 2538 (sp->param_flags & ~SPP_PMTUD) | pmtud_change; 2539 } 2540 } 2541 2542 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the 2543 * value of this field is ignored. Note also that a value of zero 2544 * indicates the current setting should be left unchanged. 2545 */ 2546 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { 2547 if (trans) { 2548 trans->sackdelay = 2549 msecs_to_jiffies(params->spp_sackdelay); 2550 } else if (asoc) { 2551 asoc->sackdelay = 2552 msecs_to_jiffies(params->spp_sackdelay); 2553 } else { 2554 sp->sackdelay = params->spp_sackdelay; 2555 } 2556 } 2557 2558 if (sackdelay_change) { 2559 if (trans) { 2560 trans->param_flags = 2561 (trans->param_flags & ~SPP_SACKDELAY) | 2562 sackdelay_change; 2563 } else if (asoc) { 2564 asoc->param_flags = 2565 (asoc->param_flags & ~SPP_SACKDELAY) | 2566 sackdelay_change; 2567 } else { 2568 sp->param_flags = 2569 (sp->param_flags & ~SPP_SACKDELAY) | 2570 sackdelay_change; 2571 } 2572 } 2573 2574 /* Note that a value of zero indicates the current setting should be 2575 left unchanged. 2576 */ 2577 if (params->spp_pathmaxrxt) { 2578 if (trans) { 2579 trans->pathmaxrxt = params->spp_pathmaxrxt; 2580 } else if (asoc) { 2581 asoc->pathmaxrxt = params->spp_pathmaxrxt; 2582 } else { 2583 sp->pathmaxrxt = params->spp_pathmaxrxt; 2584 } 2585 } 2586 2587 return 0; 2588 } 2589 2590 static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2591 char __user *optval, 2592 unsigned int optlen) 2593 { 2594 struct sctp_paddrparams params; 2595 struct sctp_transport *trans = NULL; 2596 struct sctp_association *asoc = NULL; 2597 struct sctp_sock *sp = sctp_sk(sk); 2598 int error; 2599 int hb_change, pmtud_change, sackdelay_change; 2600 2601 if (optlen != sizeof(struct sctp_paddrparams)) 2602 return -EINVAL; 2603 2604 if (copy_from_user(¶ms, optval, optlen)) 2605 return -EFAULT; 2606 2607 /* Validate flags and value parameters. */ 2608 hb_change = params.spp_flags & SPP_HB; 2609 pmtud_change = params.spp_flags & SPP_PMTUD; 2610 sackdelay_change = params.spp_flags & SPP_SACKDELAY; 2611 2612 if (hb_change == SPP_HB || 2613 pmtud_change == SPP_PMTUD || 2614 sackdelay_change == SPP_SACKDELAY || 2615 params.spp_sackdelay > 500 || 2616 (params.spp_pathmtu && 2617 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) 2618 return -EINVAL; 2619 2620 /* If an address other than INADDR_ANY is specified, and 2621 * no transport is found, then the request is invalid. 2622 */ 2623 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 2624 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 2625 params.spp_assoc_id); 2626 if (!trans) 2627 return -EINVAL; 2628 } 2629 2630 /* Get association, if assoc_id != 0 and the socket is a one 2631 * to many style socket, and an association was not found, then 2632 * the id was invalid. 2633 */ 2634 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 2635 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) 2636 return -EINVAL; 2637 2638 /* Heartbeat demand can only be sent on a transport or 2639 * association, but not a socket. 2640 */ 2641 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) 2642 return -EINVAL; 2643 2644 /* Process parameters. */ 2645 error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2646 hb_change, pmtud_change, 2647 sackdelay_change); 2648 2649 if (error) 2650 return error; 2651 2652 /* If changes are for association, also apply parameters to each 2653 * transport. 2654 */ 2655 if (!trans && asoc) { 2656 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2657 transports) { 2658 sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, 2659 hb_change, pmtud_change, 2660 sackdelay_change); 2661 } 2662 } 2663 2664 return 0; 2665 } 2666 2667 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) 2668 { 2669 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; 2670 } 2671 2672 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) 2673 { 2674 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; 2675 } 2676 2677 /* 2678 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 2679 * 2680 * This option will effect the way delayed acks are performed. This 2681 * option allows you to get or set the delayed ack time, in 2682 * milliseconds. It also allows changing the delayed ack frequency. 2683 * Changing the frequency to 1 disables the delayed sack algorithm. If 2684 * the assoc_id is 0, then this sets or gets the endpoints default 2685 * values. If the assoc_id field is non-zero, then the set or get 2686 * effects the specified association for the one to many model (the 2687 * assoc_id field is ignored by the one to one model). Note that if 2688 * sack_delay or sack_freq are 0 when setting this option, then the 2689 * current values will remain unchanged. 2690 * 2691 * struct sctp_sack_info { 2692 * sctp_assoc_t sack_assoc_id; 2693 * uint32_t sack_delay; 2694 * uint32_t sack_freq; 2695 * }; 2696 * 2697 * sack_assoc_id - This parameter, indicates which association the user 2698 * is performing an action upon. Note that if this field's value is 2699 * zero then the endpoints default value is changed (effecting future 2700 * associations only). 2701 * 2702 * sack_delay - This parameter contains the number of milliseconds that 2703 * the user is requesting the delayed ACK timer be set to. Note that 2704 * this value is defined in the standard to be between 200 and 500 2705 * milliseconds. 2706 * 2707 * sack_freq - This parameter contains the number of packets that must 2708 * be received before a sack is sent without waiting for the delay 2709 * timer to expire. The default value for this is 2, setting this 2710 * value to 1 will disable the delayed sack algorithm. 2711 */ 2712 2713 static int sctp_setsockopt_delayed_ack(struct sock *sk, 2714 char __user *optval, unsigned int optlen) 2715 { 2716 struct sctp_sack_info params; 2717 struct sctp_transport *trans = NULL; 2718 struct sctp_association *asoc = NULL; 2719 struct sctp_sock *sp = sctp_sk(sk); 2720 2721 if (optlen == sizeof(struct sctp_sack_info)) { 2722 if (copy_from_user(¶ms, optval, optlen)) 2723 return -EFAULT; 2724 2725 if (params.sack_delay == 0 && params.sack_freq == 0) 2726 return 0; 2727 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2728 pr_warn_ratelimited(DEPRECATED 2729 "%s (pid %d) " 2730 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 2731 "Use struct sctp_sack_info instead\n", 2732 current->comm, task_pid_nr(current)); 2733 if (copy_from_user(¶ms, optval, optlen)) 2734 return -EFAULT; 2735 2736 if (params.sack_delay == 0) 2737 params.sack_freq = 1; 2738 else 2739 params.sack_freq = 0; 2740 } else 2741 return -EINVAL; 2742 2743 /* Validate value parameter. */ 2744 if (params.sack_delay > 500) 2745 return -EINVAL; 2746 2747 /* Get association, if sack_assoc_id != 0 and the socket is a one 2748 * to many style socket, and an association was not found, then 2749 * the id was invalid. 2750 */ 2751 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 2752 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 2753 return -EINVAL; 2754 2755 if (params.sack_delay) { 2756 if (asoc) { 2757 asoc->sackdelay = 2758 msecs_to_jiffies(params.sack_delay); 2759 asoc->param_flags = 2760 sctp_spp_sackdelay_enable(asoc->param_flags); 2761 } else { 2762 sp->sackdelay = params.sack_delay; 2763 sp->param_flags = 2764 sctp_spp_sackdelay_enable(sp->param_flags); 2765 } 2766 } 2767 2768 if (params.sack_freq == 1) { 2769 if (asoc) { 2770 asoc->param_flags = 2771 sctp_spp_sackdelay_disable(asoc->param_flags); 2772 } else { 2773 sp->param_flags = 2774 sctp_spp_sackdelay_disable(sp->param_flags); 2775 } 2776 } else if (params.sack_freq > 1) { 2777 if (asoc) { 2778 asoc->sackfreq = params.sack_freq; 2779 asoc->param_flags = 2780 sctp_spp_sackdelay_enable(asoc->param_flags); 2781 } else { 2782 sp->sackfreq = params.sack_freq; 2783 sp->param_flags = 2784 sctp_spp_sackdelay_enable(sp->param_flags); 2785 } 2786 } 2787 2788 /* If change is for association, also apply to each transport. */ 2789 if (asoc) { 2790 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2791 transports) { 2792 if (params.sack_delay) { 2793 trans->sackdelay = 2794 msecs_to_jiffies(params.sack_delay); 2795 trans->param_flags = 2796 sctp_spp_sackdelay_enable(trans->param_flags); 2797 } 2798 if (params.sack_freq == 1) { 2799 trans->param_flags = 2800 sctp_spp_sackdelay_disable(trans->param_flags); 2801 } else if (params.sack_freq > 1) { 2802 trans->sackfreq = params.sack_freq; 2803 trans->param_flags = 2804 sctp_spp_sackdelay_enable(trans->param_flags); 2805 } 2806 } 2807 } 2808 2809 return 0; 2810 } 2811 2812 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 2813 * 2814 * Applications can specify protocol parameters for the default association 2815 * initialization. The option name argument to setsockopt() and getsockopt() 2816 * is SCTP_INITMSG. 2817 * 2818 * Setting initialization parameters is effective only on an unconnected 2819 * socket (for UDP-style sockets only future associations are effected 2820 * by the change). With TCP-style sockets, this option is inherited by 2821 * sockets derived from a listener socket. 2822 */ 2823 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) 2824 { 2825 struct sctp_initmsg sinit; 2826 struct sctp_sock *sp = sctp_sk(sk); 2827 2828 if (optlen != sizeof(struct sctp_initmsg)) 2829 return -EINVAL; 2830 if (copy_from_user(&sinit, optval, optlen)) 2831 return -EFAULT; 2832 2833 if (sinit.sinit_num_ostreams) 2834 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; 2835 if (sinit.sinit_max_instreams) 2836 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; 2837 if (sinit.sinit_max_attempts) 2838 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; 2839 if (sinit.sinit_max_init_timeo) 2840 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; 2841 2842 return 0; 2843 } 2844 2845 /* 2846 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 2847 * 2848 * Applications that wish to use the sendto() system call may wish to 2849 * specify a default set of parameters that would normally be supplied 2850 * through the inclusion of ancillary data. This socket option allows 2851 * such an application to set the default sctp_sndrcvinfo structure. 2852 * The application that wishes to use this socket option simply passes 2853 * in to this call the sctp_sndrcvinfo structure defined in Section 2854 * 5.2.2) The input parameters accepted by this call include 2855 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 2856 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 2857 * to this call if the caller is using the UDP model. 2858 */ 2859 static int sctp_setsockopt_default_send_param(struct sock *sk, 2860 char __user *optval, 2861 unsigned int optlen) 2862 { 2863 struct sctp_sock *sp = sctp_sk(sk); 2864 struct sctp_association *asoc; 2865 struct sctp_sndrcvinfo info; 2866 2867 if (optlen != sizeof(info)) 2868 return -EINVAL; 2869 if (copy_from_user(&info, optval, optlen)) 2870 return -EFAULT; 2871 if (info.sinfo_flags & 2872 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2873 SCTP_ABORT | SCTP_EOF)) 2874 return -EINVAL; 2875 2876 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 2877 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 2878 return -EINVAL; 2879 if (asoc) { 2880 asoc->default_stream = info.sinfo_stream; 2881 asoc->default_flags = info.sinfo_flags; 2882 asoc->default_ppid = info.sinfo_ppid; 2883 asoc->default_context = info.sinfo_context; 2884 asoc->default_timetolive = info.sinfo_timetolive; 2885 } else { 2886 sp->default_stream = info.sinfo_stream; 2887 sp->default_flags = info.sinfo_flags; 2888 sp->default_ppid = info.sinfo_ppid; 2889 sp->default_context = info.sinfo_context; 2890 sp->default_timetolive = info.sinfo_timetolive; 2891 } 2892 2893 return 0; 2894 } 2895 2896 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 2897 * (SCTP_DEFAULT_SNDINFO) 2898 */ 2899 static int sctp_setsockopt_default_sndinfo(struct sock *sk, 2900 char __user *optval, 2901 unsigned int optlen) 2902 { 2903 struct sctp_sock *sp = sctp_sk(sk); 2904 struct sctp_association *asoc; 2905 struct sctp_sndinfo info; 2906 2907 if (optlen != sizeof(info)) 2908 return -EINVAL; 2909 if (copy_from_user(&info, optval, optlen)) 2910 return -EFAULT; 2911 if (info.snd_flags & 2912 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 2913 SCTP_ABORT | SCTP_EOF)) 2914 return -EINVAL; 2915 2916 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 2917 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 2918 return -EINVAL; 2919 if (asoc) { 2920 asoc->default_stream = info.snd_sid; 2921 asoc->default_flags = info.snd_flags; 2922 asoc->default_ppid = info.snd_ppid; 2923 asoc->default_context = info.snd_context; 2924 } else { 2925 sp->default_stream = info.snd_sid; 2926 sp->default_flags = info.snd_flags; 2927 sp->default_ppid = info.snd_ppid; 2928 sp->default_context = info.snd_context; 2929 } 2930 2931 return 0; 2932 } 2933 2934 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 2935 * 2936 * Requests that the local SCTP stack use the enclosed peer address as 2937 * the association primary. The enclosed address must be one of the 2938 * association peer's addresses. 2939 */ 2940 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2941 unsigned int optlen) 2942 { 2943 struct sctp_prim prim; 2944 struct sctp_transport *trans; 2945 2946 if (optlen != sizeof(struct sctp_prim)) 2947 return -EINVAL; 2948 2949 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 2950 return -EFAULT; 2951 2952 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); 2953 if (!trans) 2954 return -EINVAL; 2955 2956 sctp_assoc_set_primary(trans->asoc, trans); 2957 2958 return 0; 2959 } 2960 2961 /* 2962 * 7.1.5 SCTP_NODELAY 2963 * 2964 * Turn on/off any Nagle-like algorithm. This means that packets are 2965 * generally sent as soon as possible and no unnecessary delays are 2966 * introduced, at the cost of more packets in the network. Expects an 2967 * integer boolean flag. 2968 */ 2969 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2970 unsigned int optlen) 2971 { 2972 int val; 2973 2974 if (optlen < sizeof(int)) 2975 return -EINVAL; 2976 if (get_user(val, (int __user *)optval)) 2977 return -EFAULT; 2978 2979 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; 2980 return 0; 2981 } 2982 2983 /* 2984 * 2985 * 7.1.1 SCTP_RTOINFO 2986 * 2987 * The protocol parameters used to initialize and bound retransmission 2988 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 2989 * and modify these parameters. 2990 * All parameters are time values, in milliseconds. A value of 0, when 2991 * modifying the parameters, indicates that the current value should not 2992 * be changed. 2993 * 2994 */ 2995 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) 2996 { 2997 struct sctp_rtoinfo rtoinfo; 2998 struct sctp_association *asoc; 2999 unsigned long rto_min, rto_max; 3000 struct sctp_sock *sp = sctp_sk(sk); 3001 3002 if (optlen != sizeof (struct sctp_rtoinfo)) 3003 return -EINVAL; 3004 3005 if (copy_from_user(&rtoinfo, optval, optlen)) 3006 return -EFAULT; 3007 3008 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 3009 3010 /* Set the values to the specific association */ 3011 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 3012 return -EINVAL; 3013 3014 rto_max = rtoinfo.srto_max; 3015 rto_min = rtoinfo.srto_min; 3016 3017 if (rto_max) 3018 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; 3019 else 3020 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; 3021 3022 if (rto_min) 3023 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; 3024 else 3025 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; 3026 3027 if (rto_min > rto_max) 3028 return -EINVAL; 3029 3030 if (asoc) { 3031 if (rtoinfo.srto_initial != 0) 3032 asoc->rto_initial = 3033 msecs_to_jiffies(rtoinfo.srto_initial); 3034 asoc->rto_max = rto_max; 3035 asoc->rto_min = rto_min; 3036 } else { 3037 /* If there is no association or the association-id = 0 3038 * set the values to the endpoint. 3039 */ 3040 if (rtoinfo.srto_initial != 0) 3041 sp->rtoinfo.srto_initial = rtoinfo.srto_initial; 3042 sp->rtoinfo.srto_max = rto_max; 3043 sp->rtoinfo.srto_min = rto_min; 3044 } 3045 3046 return 0; 3047 } 3048 3049 /* 3050 * 3051 * 7.1.2 SCTP_ASSOCINFO 3052 * 3053 * This option is used to tune the maximum retransmission attempts 3054 * of the association. 3055 * Returns an error if the new association retransmission value is 3056 * greater than the sum of the retransmission value of the peer. 3057 * See [SCTP] for more information. 3058 * 3059 */ 3060 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) 3061 { 3062 3063 struct sctp_assocparams assocparams; 3064 struct sctp_association *asoc; 3065 3066 if (optlen != sizeof(struct sctp_assocparams)) 3067 return -EINVAL; 3068 if (copy_from_user(&assocparams, optval, optlen)) 3069 return -EFAULT; 3070 3071 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 3072 3073 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 3074 return -EINVAL; 3075 3076 /* Set the values to the specific association */ 3077 if (asoc) { 3078 if (assocparams.sasoc_asocmaxrxt != 0) { 3079 __u32 path_sum = 0; 3080 int paths = 0; 3081 struct sctp_transport *peer_addr; 3082 3083 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, 3084 transports) { 3085 path_sum += peer_addr->pathmaxrxt; 3086 paths++; 3087 } 3088 3089 /* Only validate asocmaxrxt if we have more than 3090 * one path/transport. We do this because path 3091 * retransmissions are only counted when we have more 3092 * then one path. 3093 */ 3094 if (paths > 1 && 3095 assocparams.sasoc_asocmaxrxt > path_sum) 3096 return -EINVAL; 3097 3098 asoc->max_retrans = assocparams.sasoc_asocmaxrxt; 3099 } 3100 3101 if (assocparams.sasoc_cookie_life != 0) 3102 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); 3103 } else { 3104 /* Set the values to the endpoint */ 3105 struct sctp_sock *sp = sctp_sk(sk); 3106 3107 if (assocparams.sasoc_asocmaxrxt != 0) 3108 sp->assocparams.sasoc_asocmaxrxt = 3109 assocparams.sasoc_asocmaxrxt; 3110 if (assocparams.sasoc_cookie_life != 0) 3111 sp->assocparams.sasoc_cookie_life = 3112 assocparams.sasoc_cookie_life; 3113 } 3114 return 0; 3115 } 3116 3117 /* 3118 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 3119 * 3120 * This socket option is a boolean flag which turns on or off mapped V4 3121 * addresses. If this option is turned on and the socket is type 3122 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 3123 * If this option is turned off, then no mapping will be done of V4 3124 * addresses and a user will receive both PF_INET6 and PF_INET type 3125 * addresses on the socket. 3126 */ 3127 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) 3128 { 3129 int val; 3130 struct sctp_sock *sp = sctp_sk(sk); 3131 3132 if (optlen < sizeof(int)) 3133 return -EINVAL; 3134 if (get_user(val, (int __user *)optval)) 3135 return -EFAULT; 3136 if (val) 3137 sp->v4mapped = 1; 3138 else 3139 sp->v4mapped = 0; 3140 3141 return 0; 3142 } 3143 3144 /* 3145 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 3146 * This option will get or set the maximum size to put in any outgoing 3147 * SCTP DATA chunk. If a message is larger than this size it will be 3148 * fragmented by SCTP into the specified size. Note that the underlying 3149 * SCTP implementation may fragment into smaller sized chunks when the 3150 * PMTU of the underlying association is smaller than the value set by 3151 * the user. The default value for this option is '0' which indicates 3152 * the user is NOT limiting fragmentation and only the PMTU will effect 3153 * SCTP's choice of DATA chunk size. Note also that values set larger 3154 * than the maximum size of an IP datagram will effectively let SCTP 3155 * control fragmentation (i.e. the same as setting this option to 0). 3156 * 3157 * The following structure is used to access and modify this parameter: 3158 * 3159 * struct sctp_assoc_value { 3160 * sctp_assoc_t assoc_id; 3161 * uint32_t assoc_value; 3162 * }; 3163 * 3164 * assoc_id: This parameter is ignored for one-to-one style sockets. 3165 * For one-to-many style sockets this parameter indicates which 3166 * association the user is performing an action upon. Note that if 3167 * this field's value is zero then the endpoints default value is 3168 * changed (effecting future associations only). 3169 * assoc_value: This parameter specifies the maximum size in bytes. 3170 */ 3171 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3172 { 3173 struct sctp_sock *sp = sctp_sk(sk); 3174 struct sctp_assoc_value params; 3175 struct sctp_association *asoc; 3176 int val; 3177 3178 if (optlen == sizeof(int)) { 3179 pr_warn_ratelimited(DEPRECATED 3180 "%s (pid %d) " 3181 "Use of int in maxseg socket option.\n" 3182 "Use struct sctp_assoc_value instead\n", 3183 current->comm, task_pid_nr(current)); 3184 if (copy_from_user(&val, optval, optlen)) 3185 return -EFAULT; 3186 params.assoc_id = 0; 3187 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3188 if (copy_from_user(¶ms, optval, optlen)) 3189 return -EFAULT; 3190 val = params.assoc_value; 3191 } else { 3192 return -EINVAL; 3193 } 3194 3195 if (val) { 3196 int min_len, max_len; 3197 3198 min_len = SCTP_DEFAULT_MINSEGMENT - sp->pf->af->net_header_len; 3199 min_len -= sizeof(struct sctphdr) + 3200 sizeof(struct sctp_data_chunk); 3201 3202 max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk); 3203 3204 if (val < min_len || val > max_len) 3205 return -EINVAL; 3206 } 3207 3208 asoc = sctp_id2assoc(sk, params.assoc_id); 3209 if (asoc) { 3210 if (val == 0) { 3211 val = asoc->pathmtu - sp->pf->af->net_header_len; 3212 val -= sizeof(struct sctphdr) + 3213 sctp_datachk_len(&asoc->stream); 3214 } 3215 asoc->user_frag = val; 3216 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3217 } else { 3218 if (params.assoc_id && sctp_style(sk, UDP)) 3219 return -EINVAL; 3220 sp->user_frag = val; 3221 } 3222 3223 return 0; 3224 } 3225 3226 3227 /* 3228 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) 3229 * 3230 * Requests that the peer mark the enclosed address as the association 3231 * primary. The enclosed address must be one of the association's 3232 * locally bound addresses. The following structure is used to make a 3233 * set primary request: 3234 */ 3235 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3236 unsigned int optlen) 3237 { 3238 struct net *net = sock_net(sk); 3239 struct sctp_sock *sp; 3240 struct sctp_association *asoc = NULL; 3241 struct sctp_setpeerprim prim; 3242 struct sctp_chunk *chunk; 3243 struct sctp_af *af; 3244 int err; 3245 3246 sp = sctp_sk(sk); 3247 3248 if (!net->sctp.addip_enable) 3249 return -EPERM; 3250 3251 if (optlen != sizeof(struct sctp_setpeerprim)) 3252 return -EINVAL; 3253 3254 if (copy_from_user(&prim, optval, optlen)) 3255 return -EFAULT; 3256 3257 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); 3258 if (!asoc) 3259 return -EINVAL; 3260 3261 if (!asoc->peer.asconf_capable) 3262 return -EPERM; 3263 3264 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) 3265 return -EPERM; 3266 3267 if (!sctp_state(asoc, ESTABLISHED)) 3268 return -ENOTCONN; 3269 3270 af = sctp_get_af_specific(prim.sspp_addr.ss_family); 3271 if (!af) 3272 return -EINVAL; 3273 3274 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) 3275 return -EADDRNOTAVAIL; 3276 3277 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 3278 return -EADDRNOTAVAIL; 3279 3280 /* Create an ASCONF chunk with SET_PRIMARY parameter */ 3281 chunk = sctp_make_asconf_set_prim(asoc, 3282 (union sctp_addr *)&prim.sspp_addr); 3283 if (!chunk) 3284 return -ENOMEM; 3285 3286 err = sctp_send_asconf(asoc, chunk); 3287 3288 pr_debug("%s: we set peer primary addr primitively\n", __func__); 3289 3290 return err; 3291 } 3292 3293 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 3294 unsigned int optlen) 3295 { 3296 struct sctp_setadaptation adaptation; 3297 3298 if (optlen != sizeof(struct sctp_setadaptation)) 3299 return -EINVAL; 3300 if (copy_from_user(&adaptation, optval, optlen)) 3301 return -EFAULT; 3302 3303 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; 3304 3305 return 0; 3306 } 3307 3308 /* 3309 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 3310 * 3311 * The context field in the sctp_sndrcvinfo structure is normally only 3312 * used when a failed message is retrieved holding the value that was 3313 * sent down on the actual send call. This option allows the setting of 3314 * a default context on an association basis that will be received on 3315 * reading messages from the peer. This is especially helpful in the 3316 * one-2-many model for an application to keep some reference to an 3317 * internal state machine that is processing messages on the 3318 * association. Note that the setting of this value only effects 3319 * received messages from the peer and does not effect the value that is 3320 * saved with outbound messages. 3321 */ 3322 static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 3323 unsigned int optlen) 3324 { 3325 struct sctp_assoc_value params; 3326 struct sctp_sock *sp; 3327 struct sctp_association *asoc; 3328 3329 if (optlen != sizeof(struct sctp_assoc_value)) 3330 return -EINVAL; 3331 if (copy_from_user(¶ms, optval, optlen)) 3332 return -EFAULT; 3333 3334 sp = sctp_sk(sk); 3335 3336 if (params.assoc_id != 0) { 3337 asoc = sctp_id2assoc(sk, params.assoc_id); 3338 if (!asoc) 3339 return -EINVAL; 3340 asoc->default_rcv_context = params.assoc_value; 3341 } else { 3342 sp->default_rcv_context = params.assoc_value; 3343 } 3344 3345 return 0; 3346 } 3347 3348 /* 3349 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 3350 * 3351 * This options will at a minimum specify if the implementation is doing 3352 * fragmented interleave. Fragmented interleave, for a one to many 3353 * socket, is when subsequent calls to receive a message may return 3354 * parts of messages from different associations. Some implementations 3355 * may allow you to turn this value on or off. If so, when turned off, 3356 * no fragment interleave will occur (which will cause a head of line 3357 * blocking amongst multiple associations sharing the same one to many 3358 * socket). When this option is turned on, then each receive call may 3359 * come from a different association (thus the user must receive data 3360 * with the extended calls (e.g. sctp_recvmsg) to keep track of which 3361 * association each receive belongs to. 3362 * 3363 * This option takes a boolean value. A non-zero value indicates that 3364 * fragmented interleave is on. A value of zero indicates that 3365 * fragmented interleave is off. 3366 * 3367 * Note that it is important that an implementation that allows this 3368 * option to be turned on, have it off by default. Otherwise an unaware 3369 * application using the one to many model may become confused and act 3370 * incorrectly. 3371 */ 3372 static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3373 char __user *optval, 3374 unsigned int optlen) 3375 { 3376 int val; 3377 3378 if (optlen != sizeof(int)) 3379 return -EINVAL; 3380 if (get_user(val, (int __user *)optval)) 3381 return -EFAULT; 3382 3383 sctp_sk(sk)->frag_interleave = !!val; 3384 3385 if (!sctp_sk(sk)->frag_interleave) 3386 sctp_sk(sk)->strm_interleave = 0; 3387 3388 return 0; 3389 } 3390 3391 /* 3392 * 8.1.21. Set or Get the SCTP Partial Delivery Point 3393 * (SCTP_PARTIAL_DELIVERY_POINT) 3394 * 3395 * This option will set or get the SCTP partial delivery point. This 3396 * point is the size of a message where the partial delivery API will be 3397 * invoked to help free up rwnd space for the peer. Setting this to a 3398 * lower value will cause partial deliveries to happen more often. The 3399 * calls argument is an integer that sets or gets the partial delivery 3400 * point. Note also that the call will fail if the user attempts to set 3401 * this value larger than the socket receive buffer size. 3402 * 3403 * Note that any single message having a length smaller than or equal to 3404 * the SCTP partial delivery point will be delivered in one single read 3405 * call as long as the user provided buffer is large enough to hold the 3406 * message. 3407 */ 3408 static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3409 char __user *optval, 3410 unsigned int optlen) 3411 { 3412 u32 val; 3413 3414 if (optlen != sizeof(u32)) 3415 return -EINVAL; 3416 if (get_user(val, (int __user *)optval)) 3417 return -EFAULT; 3418 3419 /* Note: We double the receive buffer from what the user sets 3420 * it to be, also initial rwnd is based on rcvbuf/2. 3421 */ 3422 if (val > (sk->sk_rcvbuf >> 1)) 3423 return -EINVAL; 3424 3425 sctp_sk(sk)->pd_point = val; 3426 3427 return 0; /* is this the right error code? */ 3428 } 3429 3430 /* 3431 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 3432 * 3433 * This option will allow a user to change the maximum burst of packets 3434 * that can be emitted by this association. Note that the default value 3435 * is 4, and some implementations may restrict this setting so that it 3436 * can only be lowered. 3437 * 3438 * NOTE: This text doesn't seem right. Do this on a socket basis with 3439 * future associations inheriting the socket value. 3440 */ 3441 static int sctp_setsockopt_maxburst(struct sock *sk, 3442 char __user *optval, 3443 unsigned int optlen) 3444 { 3445 struct sctp_assoc_value params; 3446 struct sctp_sock *sp; 3447 struct sctp_association *asoc; 3448 int val; 3449 int assoc_id = 0; 3450 3451 if (optlen == sizeof(int)) { 3452 pr_warn_ratelimited(DEPRECATED 3453 "%s (pid %d) " 3454 "Use of int in max_burst socket option deprecated.\n" 3455 "Use struct sctp_assoc_value instead\n", 3456 current->comm, task_pid_nr(current)); 3457 if (copy_from_user(&val, optval, optlen)) 3458 return -EFAULT; 3459 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3460 if (copy_from_user(¶ms, optval, optlen)) 3461 return -EFAULT; 3462 val = params.assoc_value; 3463 assoc_id = params.assoc_id; 3464 } else 3465 return -EINVAL; 3466 3467 sp = sctp_sk(sk); 3468 3469 if (assoc_id != 0) { 3470 asoc = sctp_id2assoc(sk, assoc_id); 3471 if (!asoc) 3472 return -EINVAL; 3473 asoc->max_burst = val; 3474 } else 3475 sp->max_burst = val; 3476 3477 return 0; 3478 } 3479 3480 /* 3481 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) 3482 * 3483 * This set option adds a chunk type that the user is requesting to be 3484 * received only in an authenticated way. Changes to the list of chunks 3485 * will only effect future associations on the socket. 3486 */ 3487 static int sctp_setsockopt_auth_chunk(struct sock *sk, 3488 char __user *optval, 3489 unsigned int optlen) 3490 { 3491 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3492 struct sctp_authchunk val; 3493 3494 if (!ep->auth_enable) 3495 return -EACCES; 3496 3497 if (optlen != sizeof(struct sctp_authchunk)) 3498 return -EINVAL; 3499 if (copy_from_user(&val, optval, optlen)) 3500 return -EFAULT; 3501 3502 switch (val.sauth_chunk) { 3503 case SCTP_CID_INIT: 3504 case SCTP_CID_INIT_ACK: 3505 case SCTP_CID_SHUTDOWN_COMPLETE: 3506 case SCTP_CID_AUTH: 3507 return -EINVAL; 3508 } 3509 3510 /* add this chunk id to the endpoint */ 3511 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); 3512 } 3513 3514 /* 3515 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) 3516 * 3517 * This option gets or sets the list of HMAC algorithms that the local 3518 * endpoint requires the peer to use. 3519 */ 3520 static int sctp_setsockopt_hmac_ident(struct sock *sk, 3521 char __user *optval, 3522 unsigned int optlen) 3523 { 3524 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3525 struct sctp_hmacalgo *hmacs; 3526 u32 idents; 3527 int err; 3528 3529 if (!ep->auth_enable) 3530 return -EACCES; 3531 3532 if (optlen < sizeof(struct sctp_hmacalgo)) 3533 return -EINVAL; 3534 3535 hmacs = memdup_user(optval, optlen); 3536 if (IS_ERR(hmacs)) 3537 return PTR_ERR(hmacs); 3538 3539 idents = hmacs->shmac_num_idents; 3540 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3541 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3542 err = -EINVAL; 3543 goto out; 3544 } 3545 3546 err = sctp_auth_ep_set_hmacs(ep, hmacs); 3547 out: 3548 kfree(hmacs); 3549 return err; 3550 } 3551 3552 /* 3553 * 7.1.20. Set a shared key (SCTP_AUTH_KEY) 3554 * 3555 * This option will set a shared secret key which is used to build an 3556 * association shared key. 3557 */ 3558 static int sctp_setsockopt_auth_key(struct sock *sk, 3559 char __user *optval, 3560 unsigned int optlen) 3561 { 3562 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3563 struct sctp_authkey *authkey; 3564 struct sctp_association *asoc; 3565 int ret; 3566 3567 if (!ep->auth_enable) 3568 return -EACCES; 3569 3570 if (optlen <= sizeof(struct sctp_authkey)) 3571 return -EINVAL; 3572 3573 authkey = memdup_user(optval, optlen); 3574 if (IS_ERR(authkey)) 3575 return PTR_ERR(authkey); 3576 3577 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3578 ret = -EINVAL; 3579 goto out; 3580 } 3581 3582 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3583 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3584 ret = -EINVAL; 3585 goto out; 3586 } 3587 3588 ret = sctp_auth_set_key(ep, asoc, authkey); 3589 out: 3590 kzfree(authkey); 3591 return ret; 3592 } 3593 3594 /* 3595 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) 3596 * 3597 * This option will get or set the active shared key to be used to build 3598 * the association shared key. 3599 */ 3600 static int sctp_setsockopt_active_key(struct sock *sk, 3601 char __user *optval, 3602 unsigned int optlen) 3603 { 3604 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3605 struct sctp_authkeyid val; 3606 struct sctp_association *asoc; 3607 3608 if (!ep->auth_enable) 3609 return -EACCES; 3610 3611 if (optlen != sizeof(struct sctp_authkeyid)) 3612 return -EINVAL; 3613 if (copy_from_user(&val, optval, optlen)) 3614 return -EFAULT; 3615 3616 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3617 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3618 return -EINVAL; 3619 3620 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3621 } 3622 3623 /* 3624 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) 3625 * 3626 * This set option will delete a shared secret key from use. 3627 */ 3628 static int sctp_setsockopt_del_key(struct sock *sk, 3629 char __user *optval, 3630 unsigned int optlen) 3631 { 3632 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 3633 struct sctp_authkeyid val; 3634 struct sctp_association *asoc; 3635 3636 if (!ep->auth_enable) 3637 return -EACCES; 3638 3639 if (optlen != sizeof(struct sctp_authkeyid)) 3640 return -EINVAL; 3641 if (copy_from_user(&val, optval, optlen)) 3642 return -EFAULT; 3643 3644 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 3645 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 3646 return -EINVAL; 3647 3648 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3649 3650 } 3651 3652 /* 3653 * 8.1.23 SCTP_AUTO_ASCONF 3654 * 3655 * This option will enable or disable the use of the automatic generation of 3656 * ASCONF chunks to add and delete addresses to an existing association. Note 3657 * that this option has two caveats namely: a) it only affects sockets that 3658 * are bound to all addresses available to the SCTP stack, and b) the system 3659 * administrator may have an overriding control that turns the ASCONF feature 3660 * off no matter what setting the socket option may have. 3661 * This option expects an integer boolean flag, where a non-zero value turns on 3662 * the option, and a zero value turns off the option. 3663 * Note. In this implementation, socket operation overrides default parameter 3664 * being set by sysctl as well as FreeBSD implementation 3665 */ 3666 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, 3667 unsigned int optlen) 3668 { 3669 int val; 3670 struct sctp_sock *sp = sctp_sk(sk); 3671 3672 if (optlen < sizeof(int)) 3673 return -EINVAL; 3674 if (get_user(val, (int __user *)optval)) 3675 return -EFAULT; 3676 if (!sctp_is_ep_boundall(sk) && val) 3677 return -EINVAL; 3678 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) 3679 return 0; 3680 3681 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3682 if (val == 0 && sp->do_auto_asconf) { 3683 list_del(&sp->auto_asconf_list); 3684 sp->do_auto_asconf = 0; 3685 } else if (val && !sp->do_auto_asconf) { 3686 list_add_tail(&sp->auto_asconf_list, 3687 &sock_net(sk)->sctp.auto_asconf_splist); 3688 sp->do_auto_asconf = 1; 3689 } 3690 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); 3691 return 0; 3692 } 3693 3694 /* 3695 * SCTP_PEER_ADDR_THLDS 3696 * 3697 * This option allows us to alter the partially failed threshold for one or all 3698 * transports in an association. See Section 6.1 of: 3699 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 3700 */ 3701 static int sctp_setsockopt_paddr_thresholds(struct sock *sk, 3702 char __user *optval, 3703 unsigned int optlen) 3704 { 3705 struct sctp_paddrthlds val; 3706 struct sctp_transport *trans; 3707 struct sctp_association *asoc; 3708 3709 if (optlen < sizeof(struct sctp_paddrthlds)) 3710 return -EINVAL; 3711 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, 3712 sizeof(struct sctp_paddrthlds))) 3713 return -EFAULT; 3714 3715 3716 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 3717 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 3718 if (!asoc) 3719 return -ENOENT; 3720 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 3721 transports) { 3722 if (val.spt_pathmaxrxt) 3723 trans->pathmaxrxt = val.spt_pathmaxrxt; 3724 trans->pf_retrans = val.spt_pathpfthld; 3725 } 3726 3727 if (val.spt_pathmaxrxt) 3728 asoc->pathmaxrxt = val.spt_pathmaxrxt; 3729 asoc->pf_retrans = val.spt_pathpfthld; 3730 } else { 3731 trans = sctp_addr_id2transport(sk, &val.spt_address, 3732 val.spt_assoc_id); 3733 if (!trans) 3734 return -ENOENT; 3735 3736 if (val.spt_pathmaxrxt) 3737 trans->pathmaxrxt = val.spt_pathmaxrxt; 3738 trans->pf_retrans = val.spt_pathpfthld; 3739 } 3740 3741 return 0; 3742 } 3743 3744 static int sctp_setsockopt_recvrcvinfo(struct sock *sk, 3745 char __user *optval, 3746 unsigned int optlen) 3747 { 3748 int val; 3749 3750 if (optlen < sizeof(int)) 3751 return -EINVAL; 3752 if (get_user(val, (int __user *) optval)) 3753 return -EFAULT; 3754 3755 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; 3756 3757 return 0; 3758 } 3759 3760 static int sctp_setsockopt_recvnxtinfo(struct sock *sk, 3761 char __user *optval, 3762 unsigned int optlen) 3763 { 3764 int val; 3765 3766 if (optlen < sizeof(int)) 3767 return -EINVAL; 3768 if (get_user(val, (int __user *) optval)) 3769 return -EFAULT; 3770 3771 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; 3772 3773 return 0; 3774 } 3775 3776 static int sctp_setsockopt_pr_supported(struct sock *sk, 3777 char __user *optval, 3778 unsigned int optlen) 3779 { 3780 struct sctp_assoc_value params; 3781 struct sctp_association *asoc; 3782 int retval = -EINVAL; 3783 3784 if (optlen != sizeof(params)) 3785 goto out; 3786 3787 if (copy_from_user(¶ms, optval, optlen)) { 3788 retval = -EFAULT; 3789 goto out; 3790 } 3791 3792 asoc = sctp_id2assoc(sk, params.assoc_id); 3793 if (asoc) { 3794 asoc->prsctp_enable = !!params.assoc_value; 3795 } else if (!params.assoc_id) { 3796 struct sctp_sock *sp = sctp_sk(sk); 3797 3798 sp->ep->prsctp_enable = !!params.assoc_value; 3799 } else { 3800 goto out; 3801 } 3802 3803 retval = 0; 3804 3805 out: 3806 return retval; 3807 } 3808 3809 static int sctp_setsockopt_default_prinfo(struct sock *sk, 3810 char __user *optval, 3811 unsigned int optlen) 3812 { 3813 struct sctp_default_prinfo info; 3814 struct sctp_association *asoc; 3815 int retval = -EINVAL; 3816 3817 if (optlen != sizeof(info)) 3818 goto out; 3819 3820 if (copy_from_user(&info, optval, sizeof(info))) { 3821 retval = -EFAULT; 3822 goto out; 3823 } 3824 3825 if (info.pr_policy & ~SCTP_PR_SCTP_MASK) 3826 goto out; 3827 3828 if (info.pr_policy == SCTP_PR_SCTP_NONE) 3829 info.pr_value = 0; 3830 3831 asoc = sctp_id2assoc(sk, info.pr_assoc_id); 3832 if (asoc) { 3833 SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); 3834 asoc->default_timetolive = info.pr_value; 3835 } else if (!info.pr_assoc_id) { 3836 struct sctp_sock *sp = sctp_sk(sk); 3837 3838 SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); 3839 sp->default_timetolive = info.pr_value; 3840 } else { 3841 goto out; 3842 } 3843 3844 retval = 0; 3845 3846 out: 3847 return retval; 3848 } 3849 3850 static int sctp_setsockopt_reconfig_supported(struct sock *sk, 3851 char __user *optval, 3852 unsigned int optlen) 3853 { 3854 struct sctp_assoc_value params; 3855 struct sctp_association *asoc; 3856 int retval = -EINVAL; 3857 3858 if (optlen != sizeof(params)) 3859 goto out; 3860 3861 if (copy_from_user(¶ms, optval, optlen)) { 3862 retval = -EFAULT; 3863 goto out; 3864 } 3865 3866 asoc = sctp_id2assoc(sk, params.assoc_id); 3867 if (asoc) { 3868 asoc->reconf_enable = !!params.assoc_value; 3869 } else if (!params.assoc_id) { 3870 struct sctp_sock *sp = sctp_sk(sk); 3871 3872 sp->ep->reconf_enable = !!params.assoc_value; 3873 } else { 3874 goto out; 3875 } 3876 3877 retval = 0; 3878 3879 out: 3880 return retval; 3881 } 3882 3883 static int sctp_setsockopt_enable_strreset(struct sock *sk, 3884 char __user *optval, 3885 unsigned int optlen) 3886 { 3887 struct sctp_assoc_value params; 3888 struct sctp_association *asoc; 3889 int retval = -EINVAL; 3890 3891 if (optlen != sizeof(params)) 3892 goto out; 3893 3894 if (copy_from_user(¶ms, optval, optlen)) { 3895 retval = -EFAULT; 3896 goto out; 3897 } 3898 3899 if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK)) 3900 goto out; 3901 3902 asoc = sctp_id2assoc(sk, params.assoc_id); 3903 if (asoc) { 3904 asoc->strreset_enable = params.assoc_value; 3905 } else if (!params.assoc_id) { 3906 struct sctp_sock *sp = sctp_sk(sk); 3907 3908 sp->ep->strreset_enable = params.assoc_value; 3909 } else { 3910 goto out; 3911 } 3912 3913 retval = 0; 3914 3915 out: 3916 return retval; 3917 } 3918 3919 static int sctp_setsockopt_reset_streams(struct sock *sk, 3920 char __user *optval, 3921 unsigned int optlen) 3922 { 3923 struct sctp_reset_streams *params; 3924 struct sctp_association *asoc; 3925 int retval = -EINVAL; 3926 3927 if (optlen < sizeof(*params)) 3928 return -EINVAL; 3929 3930 params = memdup_user(optval, optlen); 3931 if (IS_ERR(params)) 3932 return PTR_ERR(params); 3933 3934 if (params->srs_number_streams * sizeof(__u16) > 3935 optlen - sizeof(*params)) 3936 goto out; 3937 3938 asoc = sctp_id2assoc(sk, params->srs_assoc_id); 3939 if (!asoc) 3940 goto out; 3941 3942 retval = sctp_send_reset_streams(asoc, params); 3943 3944 out: 3945 kfree(params); 3946 return retval; 3947 } 3948 3949 static int sctp_setsockopt_reset_assoc(struct sock *sk, 3950 char __user *optval, 3951 unsigned int optlen) 3952 { 3953 struct sctp_association *asoc; 3954 sctp_assoc_t associd; 3955 int retval = -EINVAL; 3956 3957 if (optlen != sizeof(associd)) 3958 goto out; 3959 3960 if (copy_from_user(&associd, optval, optlen)) { 3961 retval = -EFAULT; 3962 goto out; 3963 } 3964 3965 asoc = sctp_id2assoc(sk, associd); 3966 if (!asoc) 3967 goto out; 3968 3969 retval = sctp_send_reset_assoc(asoc); 3970 3971 out: 3972 return retval; 3973 } 3974 3975 static int sctp_setsockopt_add_streams(struct sock *sk, 3976 char __user *optval, 3977 unsigned int optlen) 3978 { 3979 struct sctp_association *asoc; 3980 struct sctp_add_streams params; 3981 int retval = -EINVAL; 3982 3983 if (optlen != sizeof(params)) 3984 goto out; 3985 3986 if (copy_from_user(¶ms, optval, optlen)) { 3987 retval = -EFAULT; 3988 goto out; 3989 } 3990 3991 asoc = sctp_id2assoc(sk, params.sas_assoc_id); 3992 if (!asoc) 3993 goto out; 3994 3995 retval = sctp_send_add_streams(asoc, ¶ms); 3996 3997 out: 3998 return retval; 3999 } 4000 4001 static int sctp_setsockopt_scheduler(struct sock *sk, 4002 char __user *optval, 4003 unsigned int optlen) 4004 { 4005 struct sctp_association *asoc; 4006 struct sctp_assoc_value params; 4007 int retval = -EINVAL; 4008 4009 if (optlen < sizeof(params)) 4010 goto out; 4011 4012 optlen = sizeof(params); 4013 if (copy_from_user(¶ms, optval, optlen)) { 4014 retval = -EFAULT; 4015 goto out; 4016 } 4017 4018 if (params.assoc_value > SCTP_SS_MAX) 4019 goto out; 4020 4021 asoc = sctp_id2assoc(sk, params.assoc_id); 4022 if (!asoc) 4023 goto out; 4024 4025 retval = sctp_sched_set_sched(asoc, params.assoc_value); 4026 4027 out: 4028 return retval; 4029 } 4030 4031 static int sctp_setsockopt_scheduler_value(struct sock *sk, 4032 char __user *optval, 4033 unsigned int optlen) 4034 { 4035 struct sctp_association *asoc; 4036 struct sctp_stream_value params; 4037 int retval = -EINVAL; 4038 4039 if (optlen < sizeof(params)) 4040 goto out; 4041 4042 optlen = sizeof(params); 4043 if (copy_from_user(¶ms, optval, optlen)) { 4044 retval = -EFAULT; 4045 goto out; 4046 } 4047 4048 asoc = sctp_id2assoc(sk, params.assoc_id); 4049 if (!asoc) 4050 goto out; 4051 4052 retval = sctp_sched_set_value(asoc, params.stream_id, 4053 params.stream_value, GFP_KERNEL); 4054 4055 out: 4056 return retval; 4057 } 4058 4059 static int sctp_setsockopt_interleaving_supported(struct sock *sk, 4060 char __user *optval, 4061 unsigned int optlen) 4062 { 4063 struct sctp_sock *sp = sctp_sk(sk); 4064 struct net *net = sock_net(sk); 4065 struct sctp_assoc_value params; 4066 int retval = -EINVAL; 4067 4068 if (optlen < sizeof(params)) 4069 goto out; 4070 4071 optlen = sizeof(params); 4072 if (copy_from_user(¶ms, optval, optlen)) { 4073 retval = -EFAULT; 4074 goto out; 4075 } 4076 4077 if (params.assoc_id) 4078 goto out; 4079 4080 if (!net->sctp.intl_enable || !sp->frag_interleave) { 4081 retval = -EPERM; 4082 goto out; 4083 } 4084 4085 sp->strm_interleave = !!params.assoc_value; 4086 4087 retval = 0; 4088 4089 out: 4090 return retval; 4091 } 4092 4093 /* API 6.2 setsockopt(), getsockopt() 4094 * 4095 * Applications use setsockopt() and getsockopt() to set or retrieve 4096 * socket options. Socket options are used to change the default 4097 * behavior of sockets calls. They are described in Section 7. 4098 * 4099 * The syntax is: 4100 * 4101 * ret = getsockopt(int sd, int level, int optname, void __user *optval, 4102 * int __user *optlen); 4103 * ret = setsockopt(int sd, int level, int optname, const void __user *optval, 4104 * int optlen); 4105 * 4106 * sd - the socket descript. 4107 * level - set to IPPROTO_SCTP for all SCTP options. 4108 * optname - the option name. 4109 * optval - the buffer to store the value of the option. 4110 * optlen - the size of the buffer. 4111 */ 4112 static int sctp_setsockopt(struct sock *sk, int level, int optname, 4113 char __user *optval, unsigned int optlen) 4114 { 4115 int retval = 0; 4116 4117 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 4118 4119 /* I can hardly begin to describe how wrong this is. This is 4120 * so broken as to be worse than useless. The API draft 4121 * REALLY is NOT helpful here... I am not convinced that the 4122 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP 4123 * are at all well-founded. 4124 */ 4125 if (level != SOL_SCTP) { 4126 struct sctp_af *af = sctp_sk(sk)->pf->af; 4127 retval = af->setsockopt(sk, level, optname, optval, optlen); 4128 goto out_nounlock; 4129 } 4130 4131 lock_sock(sk); 4132 4133 switch (optname) { 4134 case SCTP_SOCKOPT_BINDX_ADD: 4135 /* 'optlen' is the size of the addresses buffer. */ 4136 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 4137 optlen, SCTP_BINDX_ADD_ADDR); 4138 break; 4139 4140 case SCTP_SOCKOPT_BINDX_REM: 4141 /* 'optlen' is the size of the addresses buffer. */ 4142 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, 4143 optlen, SCTP_BINDX_REM_ADDR); 4144 break; 4145 4146 case SCTP_SOCKOPT_CONNECTX_OLD: 4147 /* 'optlen' is the size of the addresses buffer. */ 4148 retval = sctp_setsockopt_connectx_old(sk, 4149 (struct sockaddr __user *)optval, 4150 optlen); 4151 break; 4152 4153 case SCTP_SOCKOPT_CONNECTX: 4154 /* 'optlen' is the size of the addresses buffer. */ 4155 retval = sctp_setsockopt_connectx(sk, 4156 (struct sockaddr __user *)optval, 4157 optlen); 4158 break; 4159 4160 case SCTP_DISABLE_FRAGMENTS: 4161 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); 4162 break; 4163 4164 case SCTP_EVENTS: 4165 retval = sctp_setsockopt_events(sk, optval, optlen); 4166 break; 4167 4168 case SCTP_AUTOCLOSE: 4169 retval = sctp_setsockopt_autoclose(sk, optval, optlen); 4170 break; 4171 4172 case SCTP_PEER_ADDR_PARAMS: 4173 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 4174 break; 4175 4176 case SCTP_DELAYED_SACK: 4177 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); 4178 break; 4179 case SCTP_PARTIAL_DELIVERY_POINT: 4180 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 4181 break; 4182 4183 case SCTP_INITMSG: 4184 retval = sctp_setsockopt_initmsg(sk, optval, optlen); 4185 break; 4186 case SCTP_DEFAULT_SEND_PARAM: 4187 retval = sctp_setsockopt_default_send_param(sk, optval, 4188 optlen); 4189 break; 4190 case SCTP_DEFAULT_SNDINFO: 4191 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); 4192 break; 4193 case SCTP_PRIMARY_ADDR: 4194 retval = sctp_setsockopt_primary_addr(sk, optval, optlen); 4195 break; 4196 case SCTP_SET_PEER_PRIMARY_ADDR: 4197 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); 4198 break; 4199 case SCTP_NODELAY: 4200 retval = sctp_setsockopt_nodelay(sk, optval, optlen); 4201 break; 4202 case SCTP_RTOINFO: 4203 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); 4204 break; 4205 case SCTP_ASSOCINFO: 4206 retval = sctp_setsockopt_associnfo(sk, optval, optlen); 4207 break; 4208 case SCTP_I_WANT_MAPPED_V4_ADDR: 4209 retval = sctp_setsockopt_mappedv4(sk, optval, optlen); 4210 break; 4211 case SCTP_MAXSEG: 4212 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 4213 break; 4214 case SCTP_ADAPTATION_LAYER: 4215 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); 4216 break; 4217 case SCTP_CONTEXT: 4218 retval = sctp_setsockopt_context(sk, optval, optlen); 4219 break; 4220 case SCTP_FRAGMENT_INTERLEAVE: 4221 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); 4222 break; 4223 case SCTP_MAX_BURST: 4224 retval = sctp_setsockopt_maxburst(sk, optval, optlen); 4225 break; 4226 case SCTP_AUTH_CHUNK: 4227 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); 4228 break; 4229 case SCTP_HMAC_IDENT: 4230 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); 4231 break; 4232 case SCTP_AUTH_KEY: 4233 retval = sctp_setsockopt_auth_key(sk, optval, optlen); 4234 break; 4235 case SCTP_AUTH_ACTIVE_KEY: 4236 retval = sctp_setsockopt_active_key(sk, optval, optlen); 4237 break; 4238 case SCTP_AUTH_DELETE_KEY: 4239 retval = sctp_setsockopt_del_key(sk, optval, optlen); 4240 break; 4241 case SCTP_AUTO_ASCONF: 4242 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); 4243 break; 4244 case SCTP_PEER_ADDR_THLDS: 4245 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); 4246 break; 4247 case SCTP_RECVRCVINFO: 4248 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); 4249 break; 4250 case SCTP_RECVNXTINFO: 4251 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); 4252 break; 4253 case SCTP_PR_SUPPORTED: 4254 retval = sctp_setsockopt_pr_supported(sk, optval, optlen); 4255 break; 4256 case SCTP_DEFAULT_PRINFO: 4257 retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); 4258 break; 4259 case SCTP_RECONFIG_SUPPORTED: 4260 retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen); 4261 break; 4262 case SCTP_ENABLE_STREAM_RESET: 4263 retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); 4264 break; 4265 case SCTP_RESET_STREAMS: 4266 retval = sctp_setsockopt_reset_streams(sk, optval, optlen); 4267 break; 4268 case SCTP_RESET_ASSOC: 4269 retval = sctp_setsockopt_reset_assoc(sk, optval, optlen); 4270 break; 4271 case SCTP_ADD_STREAMS: 4272 retval = sctp_setsockopt_add_streams(sk, optval, optlen); 4273 break; 4274 case SCTP_STREAM_SCHEDULER: 4275 retval = sctp_setsockopt_scheduler(sk, optval, optlen); 4276 break; 4277 case SCTP_STREAM_SCHEDULER_VALUE: 4278 retval = sctp_setsockopt_scheduler_value(sk, optval, optlen); 4279 break; 4280 case SCTP_INTERLEAVING_SUPPORTED: 4281 retval = sctp_setsockopt_interleaving_supported(sk, optval, 4282 optlen); 4283 break; 4284 default: 4285 retval = -ENOPROTOOPT; 4286 break; 4287 } 4288 4289 release_sock(sk); 4290 4291 out_nounlock: 4292 return retval; 4293 } 4294 4295 /* API 3.1.6 connect() - UDP Style Syntax 4296 * 4297 * An application may use the connect() call in the UDP model to initiate an 4298 * association without sending data. 4299 * 4300 * The syntax is: 4301 * 4302 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); 4303 * 4304 * sd: the socket descriptor to have a new association added to. 4305 * 4306 * nam: the address structure (either struct sockaddr_in or struct 4307 * sockaddr_in6 defined in RFC2553 [7]). 4308 * 4309 * len: the size of the address. 4310 */ 4311 static int sctp_connect(struct sock *sk, struct sockaddr *addr, 4312 int addr_len) 4313 { 4314 int err = 0; 4315 struct sctp_af *af; 4316 4317 lock_sock(sk); 4318 4319 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 4320 addr, addr_len); 4321 4322 /* Validate addr_len before calling common connect/connectx routine. */ 4323 af = sctp_get_af_specific(addr->sa_family); 4324 if (!af || addr_len < af->sockaddr_len) { 4325 err = -EINVAL; 4326 } else { 4327 /* Pass correct addr len to common routine (so it knows there 4328 * is only one address being passed. 4329 */ 4330 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 4331 } 4332 4333 release_sock(sk); 4334 return err; 4335 } 4336 4337 /* FIXME: Write comments. */ 4338 static int sctp_disconnect(struct sock *sk, int flags) 4339 { 4340 return -EOPNOTSUPP; /* STUB */ 4341 } 4342 4343 /* 4.1.4 accept() - TCP Style Syntax 4344 * 4345 * Applications use accept() call to remove an established SCTP 4346 * association from the accept queue of the endpoint. A new socket 4347 * descriptor will be returned from accept() to represent the newly 4348 * formed association. 4349 */ 4350 static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern) 4351 { 4352 struct sctp_sock *sp; 4353 struct sctp_endpoint *ep; 4354 struct sock *newsk = NULL; 4355 struct sctp_association *asoc; 4356 long timeo; 4357 int error = 0; 4358 4359 lock_sock(sk); 4360 4361 sp = sctp_sk(sk); 4362 ep = sp->ep; 4363 4364 if (!sctp_style(sk, TCP)) { 4365 error = -EOPNOTSUPP; 4366 goto out; 4367 } 4368 4369 if (!sctp_sstate(sk, LISTENING)) { 4370 error = -EINVAL; 4371 goto out; 4372 } 4373 4374 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 4375 4376 error = sctp_wait_for_accept(sk, timeo); 4377 if (error) 4378 goto out; 4379 4380 /* We treat the list of associations on the endpoint as the accept 4381 * queue and pick the first association on the list. 4382 */ 4383 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 4384 4385 newsk = sp->pf->create_accept_sk(sk, asoc, kern); 4386 if (!newsk) { 4387 error = -ENOMEM; 4388 goto out; 4389 } 4390 4391 /* Populate the fields of the newsk from the oldsk and migrate the 4392 * asoc to the newsk. 4393 */ 4394 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); 4395 4396 out: 4397 release_sock(sk); 4398 *err = error; 4399 return newsk; 4400 } 4401 4402 /* The SCTP ioctl handler. */ 4403 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 4404 { 4405 int rc = -ENOTCONN; 4406 4407 lock_sock(sk); 4408 4409 /* 4410 * SEQPACKET-style sockets in LISTENING state are valid, for 4411 * SCTP, so only discard TCP-style sockets in LISTENING state. 4412 */ 4413 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 4414 goto out; 4415 4416 switch (cmd) { 4417 case SIOCINQ: { 4418 struct sk_buff *skb; 4419 unsigned int amount = 0; 4420 4421 skb = skb_peek(&sk->sk_receive_queue); 4422 if (skb != NULL) { 4423 /* 4424 * We will only return the amount of this packet since 4425 * that is all that will be read. 4426 */ 4427 amount = skb->len; 4428 } 4429 rc = put_user(amount, (int __user *)arg); 4430 break; 4431 } 4432 default: 4433 rc = -ENOIOCTLCMD; 4434 break; 4435 } 4436 out: 4437 release_sock(sk); 4438 return rc; 4439 } 4440 4441 /* This is the function which gets called during socket creation to 4442 * initialized the SCTP-specific portion of the sock. 4443 * The sock structure should already be zero-filled memory. 4444 */ 4445 static int sctp_init_sock(struct sock *sk) 4446 { 4447 struct net *net = sock_net(sk); 4448 struct sctp_sock *sp; 4449 4450 pr_debug("%s: sk:%p\n", __func__, sk); 4451 4452 sp = sctp_sk(sk); 4453 4454 /* Initialize the SCTP per socket area. */ 4455 switch (sk->sk_type) { 4456 case SOCK_SEQPACKET: 4457 sp->type = SCTP_SOCKET_UDP; 4458 break; 4459 case SOCK_STREAM: 4460 sp->type = SCTP_SOCKET_TCP; 4461 break; 4462 default: 4463 return -ESOCKTNOSUPPORT; 4464 } 4465 4466 sk->sk_gso_type = SKB_GSO_SCTP; 4467 4468 /* Initialize default send parameters. These parameters can be 4469 * modified with the SCTP_DEFAULT_SEND_PARAM socket option. 4470 */ 4471 sp->default_stream = 0; 4472 sp->default_ppid = 0; 4473 sp->default_flags = 0; 4474 sp->default_context = 0; 4475 sp->default_timetolive = 0; 4476 4477 sp->default_rcv_context = 0; 4478 sp->max_burst = net->sctp.max_burst; 4479 4480 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; 4481 4482 /* Initialize default setup parameters. These parameters 4483 * can be modified with the SCTP_INITMSG socket option or 4484 * overridden by the SCTP_INIT CMSG. 4485 */ 4486 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 4487 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 4488 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; 4489 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; 4490 4491 /* Initialize default RTO related parameters. These parameters can 4492 * be modified for with the SCTP_RTOINFO socket option. 4493 */ 4494 sp->rtoinfo.srto_initial = net->sctp.rto_initial; 4495 sp->rtoinfo.srto_max = net->sctp.rto_max; 4496 sp->rtoinfo.srto_min = net->sctp.rto_min; 4497 4498 /* Initialize default association related parameters. These parameters 4499 * can be modified with the SCTP_ASSOCINFO socket option. 4500 */ 4501 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; 4502 sp->assocparams.sasoc_number_peer_destinations = 0; 4503 sp->assocparams.sasoc_peer_rwnd = 0; 4504 sp->assocparams.sasoc_local_rwnd = 0; 4505 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; 4506 4507 /* Initialize default event subscriptions. By default, all the 4508 * options are off. 4509 */ 4510 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); 4511 4512 /* Default Peer Address Parameters. These defaults can 4513 * be modified via SCTP_PEER_ADDR_PARAMS 4514 */ 4515 sp->hbinterval = net->sctp.hb_interval; 4516 sp->pathmaxrxt = net->sctp.max_retrans_path; 4517 sp->pathmtu = 0; /* allow default discovery */ 4518 sp->sackdelay = net->sctp.sack_timeout; 4519 sp->sackfreq = 2; 4520 sp->param_flags = SPP_HB_ENABLE | 4521 SPP_PMTUD_ENABLE | 4522 SPP_SACKDELAY_ENABLE; 4523 4524 /* If enabled no SCTP message fragmentation will be performed. 4525 * Configure through SCTP_DISABLE_FRAGMENTS socket option. 4526 */ 4527 sp->disable_fragments = 0; 4528 4529 /* Enable Nagle algorithm by default. */ 4530 sp->nodelay = 0; 4531 4532 sp->recvrcvinfo = 0; 4533 sp->recvnxtinfo = 0; 4534 4535 /* Enable by default. */ 4536 sp->v4mapped = 1; 4537 4538 /* Auto-close idle associations after the configured 4539 * number of seconds. A value of 0 disables this 4540 * feature. Configure through the SCTP_AUTOCLOSE socket option, 4541 * for UDP-style sockets only. 4542 */ 4543 sp->autoclose = 0; 4544 4545 /* User specified fragmentation limit. */ 4546 sp->user_frag = 0; 4547 4548 sp->adaptation_ind = 0; 4549 4550 sp->pf = sctp_get_pf_specific(sk->sk_family); 4551 4552 /* Control variables for partial data delivery. */ 4553 atomic_set(&sp->pd_mode, 0); 4554 skb_queue_head_init(&sp->pd_lobby); 4555 sp->frag_interleave = 0; 4556 4557 /* Create a per socket endpoint structure. Even if we 4558 * change the data structure relationships, this may still 4559 * be useful for storing pre-connect address information. 4560 */ 4561 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); 4562 if (!sp->ep) 4563 return -ENOMEM; 4564 4565 sp->hmac = NULL; 4566 4567 sk->sk_destruct = sctp_destruct_sock; 4568 4569 SCTP_DBG_OBJCNT_INC(sock); 4570 4571 local_bh_disable(); 4572 sk_sockets_allocated_inc(sk); 4573 sock_prot_inuse_add(net, sk->sk_prot, 1); 4574 4575 /* Nothing can fail after this block, otherwise 4576 * sctp_destroy_sock() will be called without addr_wq_lock held 4577 */ 4578 if (net->sctp.default_auto_asconf) { 4579 spin_lock(&sock_net(sk)->sctp.addr_wq_lock); 4580 list_add_tail(&sp->auto_asconf_list, 4581 &net->sctp.auto_asconf_splist); 4582 sp->do_auto_asconf = 1; 4583 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); 4584 } else { 4585 sp->do_auto_asconf = 0; 4586 } 4587 4588 local_bh_enable(); 4589 4590 return 0; 4591 } 4592 4593 /* Cleanup any SCTP per socket resources. Must be called with 4594 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true 4595 */ 4596 static void sctp_destroy_sock(struct sock *sk) 4597 { 4598 struct sctp_sock *sp; 4599 4600 pr_debug("%s: sk:%p\n", __func__, sk); 4601 4602 /* Release our hold on the endpoint. */ 4603 sp = sctp_sk(sk); 4604 /* This could happen during socket init, thus we bail out 4605 * early, since the rest of the below is not setup either. 4606 */ 4607 if (sp->ep == NULL) 4608 return; 4609 4610 if (sp->do_auto_asconf) { 4611 sp->do_auto_asconf = 0; 4612 list_del(&sp->auto_asconf_list); 4613 } 4614 sctp_endpoint_free(sp->ep); 4615 local_bh_disable(); 4616 sk_sockets_allocated_dec(sk); 4617 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4618 local_bh_enable(); 4619 } 4620 4621 /* Triggered when there are no references on the socket anymore */ 4622 static void sctp_destruct_sock(struct sock *sk) 4623 { 4624 struct sctp_sock *sp = sctp_sk(sk); 4625 4626 /* Free up the HMAC transform. */ 4627 crypto_free_shash(sp->hmac); 4628 4629 inet_sock_destruct(sk); 4630 } 4631 4632 /* API 4.1.7 shutdown() - TCP Style Syntax 4633 * int shutdown(int socket, int how); 4634 * 4635 * sd - the socket descriptor of the association to be closed. 4636 * how - Specifies the type of shutdown. The values are 4637 * as follows: 4638 * SHUT_RD 4639 * Disables further receive operations. No SCTP 4640 * protocol action is taken. 4641 * SHUT_WR 4642 * Disables further send operations, and initiates 4643 * the SCTP shutdown sequence. 4644 * SHUT_RDWR 4645 * Disables further send and receive operations 4646 * and initiates the SCTP shutdown sequence. 4647 */ 4648 static void sctp_shutdown(struct sock *sk, int how) 4649 { 4650 struct net *net = sock_net(sk); 4651 struct sctp_endpoint *ep; 4652 4653 if (!sctp_style(sk, TCP)) 4654 return; 4655 4656 ep = sctp_sk(sk)->ep; 4657 if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { 4658 struct sctp_association *asoc; 4659 4660 inet_sk_set_state(sk, SCTP_SS_CLOSING); 4661 asoc = list_entry(ep->asocs.next, 4662 struct sctp_association, asocs); 4663 sctp_primitive_SHUTDOWN(net, asoc, NULL); 4664 } 4665 } 4666 4667 int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, 4668 struct sctp_info *info) 4669 { 4670 struct sctp_transport *prim; 4671 struct list_head *pos; 4672 int mask; 4673 4674 memset(info, 0, sizeof(*info)); 4675 if (!asoc) { 4676 struct sctp_sock *sp = sctp_sk(sk); 4677 4678 info->sctpi_s_autoclose = sp->autoclose; 4679 info->sctpi_s_adaptation_ind = sp->adaptation_ind; 4680 info->sctpi_s_pd_point = sp->pd_point; 4681 info->sctpi_s_nodelay = sp->nodelay; 4682 info->sctpi_s_disable_fragments = sp->disable_fragments; 4683 info->sctpi_s_v4mapped = sp->v4mapped; 4684 info->sctpi_s_frag_interleave = sp->frag_interleave; 4685 info->sctpi_s_type = sp->type; 4686 4687 return 0; 4688 } 4689 4690 info->sctpi_tag = asoc->c.my_vtag; 4691 info->sctpi_state = asoc->state; 4692 info->sctpi_rwnd = asoc->a_rwnd; 4693 info->sctpi_unackdata = asoc->unack_data; 4694 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4695 info->sctpi_instrms = asoc->stream.incnt; 4696 info->sctpi_outstrms = asoc->stream.outcnt; 4697 list_for_each(pos, &asoc->base.inqueue.in_chunk_list) 4698 info->sctpi_inqueue++; 4699 list_for_each(pos, &asoc->outqueue.out_chunk_list) 4700 info->sctpi_outqueue++; 4701 info->sctpi_overall_error = asoc->overall_error_count; 4702 info->sctpi_max_burst = asoc->max_burst; 4703 info->sctpi_maxseg = asoc->frag_point; 4704 info->sctpi_peer_rwnd = asoc->peer.rwnd; 4705 info->sctpi_peer_tag = asoc->c.peer_vtag; 4706 4707 mask = asoc->peer.ecn_capable << 1; 4708 mask = (mask | asoc->peer.ipv4_address) << 1; 4709 mask = (mask | asoc->peer.ipv6_address) << 1; 4710 mask = (mask | asoc->peer.hostname_address) << 1; 4711 mask = (mask | asoc->peer.asconf_capable) << 1; 4712 mask = (mask | asoc->peer.prsctp_capable) << 1; 4713 mask = (mask | asoc->peer.auth_capable); 4714 info->sctpi_peer_capable = mask; 4715 mask = asoc->peer.sack_needed << 1; 4716 mask = (mask | asoc->peer.sack_generation) << 1; 4717 mask = (mask | asoc->peer.zero_window_announced); 4718 info->sctpi_peer_sack = mask; 4719 4720 info->sctpi_isacks = asoc->stats.isacks; 4721 info->sctpi_osacks = asoc->stats.osacks; 4722 info->sctpi_opackets = asoc->stats.opackets; 4723 info->sctpi_ipackets = asoc->stats.ipackets; 4724 info->sctpi_rtxchunks = asoc->stats.rtxchunks; 4725 info->sctpi_outofseqtsns = asoc->stats.outofseqtsns; 4726 info->sctpi_idupchunks = asoc->stats.idupchunks; 4727 info->sctpi_gapcnt = asoc->stats.gapcnt; 4728 info->sctpi_ouodchunks = asoc->stats.ouodchunks; 4729 info->sctpi_iuodchunks = asoc->stats.iuodchunks; 4730 info->sctpi_oodchunks = asoc->stats.oodchunks; 4731 info->sctpi_iodchunks = asoc->stats.iodchunks; 4732 info->sctpi_octrlchunks = asoc->stats.octrlchunks; 4733 info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; 4734 4735 prim = asoc->peer.primary_path; 4736 memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr)); 4737 info->sctpi_p_state = prim->state; 4738 info->sctpi_p_cwnd = prim->cwnd; 4739 info->sctpi_p_srtt = prim->srtt; 4740 info->sctpi_p_rto = jiffies_to_msecs(prim->rto); 4741 info->sctpi_p_hbinterval = prim->hbinterval; 4742 info->sctpi_p_pathmaxrxt = prim->pathmaxrxt; 4743 info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay); 4744 info->sctpi_p_ssthresh = prim->ssthresh; 4745 info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked; 4746 info->sctpi_p_flight_size = prim->flight_size; 4747 info->sctpi_p_error = prim->error_count; 4748 4749 return 0; 4750 } 4751 EXPORT_SYMBOL_GPL(sctp_get_sctp_info); 4752 4753 /* use callback to avoid exporting the core structure */ 4754 void sctp_transport_walk_start(struct rhashtable_iter *iter) 4755 { 4756 rhltable_walk_enter(&sctp_transport_hashtable, iter); 4757 4758 rhashtable_walk_start(iter); 4759 } 4760 4761 void sctp_transport_walk_stop(struct rhashtable_iter *iter) 4762 { 4763 rhashtable_walk_stop(iter); 4764 rhashtable_walk_exit(iter); 4765 } 4766 4767 struct sctp_transport *sctp_transport_get_next(struct net *net, 4768 struct rhashtable_iter *iter) 4769 { 4770 struct sctp_transport *t; 4771 4772 t = rhashtable_walk_next(iter); 4773 for (; t; t = rhashtable_walk_next(iter)) { 4774 if (IS_ERR(t)) { 4775 if (PTR_ERR(t) == -EAGAIN) 4776 continue; 4777 break; 4778 } 4779 4780 if (net_eq(sock_net(t->asoc->base.sk), net) && 4781 t->asoc->peer.primary_path == t) 4782 break; 4783 } 4784 4785 return t; 4786 } 4787 4788 struct sctp_transport *sctp_transport_get_idx(struct net *net, 4789 struct rhashtable_iter *iter, 4790 int pos) 4791 { 4792 void *obj = SEQ_START_TOKEN; 4793 4794 while (pos && (obj = sctp_transport_get_next(net, iter)) && 4795 !IS_ERR(obj)) 4796 pos--; 4797 4798 return obj; 4799 } 4800 4801 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), 4802 void *p) { 4803 int err = 0; 4804 int hash = 0; 4805 struct sctp_ep_common *epb; 4806 struct sctp_hashbucket *head; 4807 4808 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; 4809 hash++, head++) { 4810 read_lock_bh(&head->lock); 4811 sctp_for_each_hentry(epb, &head->chain) { 4812 err = cb(sctp_ep(epb), p); 4813 if (err) 4814 break; 4815 } 4816 read_unlock_bh(&head->lock); 4817 } 4818 4819 return err; 4820 } 4821 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint); 4822 4823 int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), 4824 struct net *net, 4825 const union sctp_addr *laddr, 4826 const union sctp_addr *paddr, void *p) 4827 { 4828 struct sctp_transport *transport; 4829 int err; 4830 4831 rcu_read_lock(); 4832 transport = sctp_addrs_lookup_transport(net, laddr, paddr); 4833 rcu_read_unlock(); 4834 if (!transport) 4835 return -ENOENT; 4836 4837 err = cb(transport, p); 4838 sctp_transport_put(transport); 4839 4840 return err; 4841 } 4842 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); 4843 4844 int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), 4845 int (*cb_done)(struct sctp_transport *, void *), 4846 struct net *net, int *pos, void *p) { 4847 struct rhashtable_iter hti; 4848 struct sctp_transport *tsp; 4849 int ret = 0; 4850 4851 again: 4852 sctp_transport_walk_start(&hti); 4853 4854 tsp = sctp_transport_get_idx(net, &hti, *pos + 1); 4855 for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { 4856 if (!sctp_transport_hold(tsp)) 4857 continue; 4858 ret = cb(tsp, p); 4859 if (ret) 4860 break; 4861 (*pos)++; 4862 sctp_transport_put(tsp); 4863 } 4864 sctp_transport_walk_stop(&hti); 4865 4866 if (ret) { 4867 if (cb_done && !cb_done(tsp, p)) { 4868 (*pos)++; 4869 sctp_transport_put(tsp); 4870 goto again; 4871 } 4872 sctp_transport_put(tsp); 4873 } 4874 4875 return ret; 4876 } 4877 EXPORT_SYMBOL_GPL(sctp_for_each_transport); 4878 4879 /* 7.2.1 Association Status (SCTP_STATUS) 4880 4881 * Applications can retrieve current status information about an 4882 * association, including association state, peer receiver window size, 4883 * number of unacked data chunks, and number of data chunks pending 4884 * receipt. This information is read-only. 4885 */ 4886 static int sctp_getsockopt_sctp_status(struct sock *sk, int len, 4887 char __user *optval, 4888 int __user *optlen) 4889 { 4890 struct sctp_status status; 4891 struct sctp_association *asoc = NULL; 4892 struct sctp_transport *transport; 4893 sctp_assoc_t associd; 4894 int retval = 0; 4895 4896 if (len < sizeof(status)) { 4897 retval = -EINVAL; 4898 goto out; 4899 } 4900 4901 len = sizeof(status); 4902 if (copy_from_user(&status, optval, len)) { 4903 retval = -EFAULT; 4904 goto out; 4905 } 4906 4907 associd = status.sstat_assoc_id; 4908 asoc = sctp_id2assoc(sk, associd); 4909 if (!asoc) { 4910 retval = -EINVAL; 4911 goto out; 4912 } 4913 4914 transport = asoc->peer.primary_path; 4915 4916 status.sstat_assoc_id = sctp_assoc2id(asoc); 4917 status.sstat_state = sctp_assoc_to_state(asoc); 4918 status.sstat_rwnd = asoc->peer.rwnd; 4919 status.sstat_unackdata = asoc->unack_data; 4920 4921 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4922 status.sstat_instrms = asoc->stream.incnt; 4923 status.sstat_outstrms = asoc->stream.outcnt; 4924 status.sstat_fragmentation_point = asoc->frag_point; 4925 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4926 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4927 transport->af_specific->sockaddr_len); 4928 /* Map ipv4 address into v4-mapped-on-v6 address. */ 4929 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), 4930 (union sctp_addr *)&status.sstat_primary.spinfo_address); 4931 status.sstat_primary.spinfo_state = transport->state; 4932 status.sstat_primary.spinfo_cwnd = transport->cwnd; 4933 status.sstat_primary.spinfo_srtt = transport->srtt; 4934 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); 4935 status.sstat_primary.spinfo_mtu = transport->pathmtu; 4936 4937 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) 4938 status.sstat_primary.spinfo_state = SCTP_ACTIVE; 4939 4940 if (put_user(len, optlen)) { 4941 retval = -EFAULT; 4942 goto out; 4943 } 4944 4945 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", 4946 __func__, len, status.sstat_state, status.sstat_rwnd, 4947 status.sstat_assoc_id); 4948 4949 if (copy_to_user(optval, &status, len)) { 4950 retval = -EFAULT; 4951 goto out; 4952 } 4953 4954 out: 4955 return retval; 4956 } 4957 4958 4959 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) 4960 * 4961 * Applications can retrieve information about a specific peer address 4962 * of an association, including its reachability state, congestion 4963 * window, and retransmission timer values. This information is 4964 * read-only. 4965 */ 4966 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, 4967 char __user *optval, 4968 int __user *optlen) 4969 { 4970 struct sctp_paddrinfo pinfo; 4971 struct sctp_transport *transport; 4972 int retval = 0; 4973 4974 if (len < sizeof(pinfo)) { 4975 retval = -EINVAL; 4976 goto out; 4977 } 4978 4979 len = sizeof(pinfo); 4980 if (copy_from_user(&pinfo, optval, len)) { 4981 retval = -EFAULT; 4982 goto out; 4983 } 4984 4985 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, 4986 pinfo.spinfo_assoc_id); 4987 if (!transport) 4988 return -EINVAL; 4989 4990 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4991 pinfo.spinfo_state = transport->state; 4992 pinfo.spinfo_cwnd = transport->cwnd; 4993 pinfo.spinfo_srtt = transport->srtt; 4994 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); 4995 pinfo.spinfo_mtu = transport->pathmtu; 4996 4997 if (pinfo.spinfo_state == SCTP_UNKNOWN) 4998 pinfo.spinfo_state = SCTP_ACTIVE; 4999 5000 if (put_user(len, optlen)) { 5001 retval = -EFAULT; 5002 goto out; 5003 } 5004 5005 if (copy_to_user(optval, &pinfo, len)) { 5006 retval = -EFAULT; 5007 goto out; 5008 } 5009 5010 out: 5011 return retval; 5012 } 5013 5014 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 5015 * 5016 * This option is a on/off flag. If enabled no SCTP message 5017 * fragmentation will be performed. Instead if a message being sent 5018 * exceeds the current PMTU size, the message will NOT be sent and 5019 * instead a error will be indicated to the user. 5020 */ 5021 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, 5022 char __user *optval, int __user *optlen) 5023 { 5024 int val; 5025 5026 if (len < sizeof(int)) 5027 return -EINVAL; 5028 5029 len = sizeof(int); 5030 val = (sctp_sk(sk)->disable_fragments == 1); 5031 if (put_user(len, optlen)) 5032 return -EFAULT; 5033 if (copy_to_user(optval, &val, len)) 5034 return -EFAULT; 5035 return 0; 5036 } 5037 5038 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) 5039 * 5040 * This socket option is used to specify various notifications and 5041 * ancillary data the user wishes to receive. 5042 */ 5043 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 5044 int __user *optlen) 5045 { 5046 if (len == 0) 5047 return -EINVAL; 5048 if (len > sizeof(struct sctp_event_subscribe)) 5049 len = sizeof(struct sctp_event_subscribe); 5050 if (put_user(len, optlen)) 5051 return -EFAULT; 5052 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 5053 return -EFAULT; 5054 return 0; 5055 } 5056 5057 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) 5058 * 5059 * This socket option is applicable to the UDP-style socket only. When 5060 * set it will cause associations that are idle for more than the 5061 * specified number of seconds to automatically close. An association 5062 * being idle is defined an association that has NOT sent or received 5063 * user data. The special value of '0' indicates that no automatic 5064 * close of any associations should be performed. The option expects an 5065 * integer defining the number of seconds of idle time before an 5066 * association is closed. 5067 */ 5068 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) 5069 { 5070 /* Applicable to UDP-style socket only */ 5071 if (sctp_style(sk, TCP)) 5072 return -EOPNOTSUPP; 5073 if (len < sizeof(int)) 5074 return -EINVAL; 5075 len = sizeof(int); 5076 if (put_user(len, optlen)) 5077 return -EFAULT; 5078 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 5079 return -EFAULT; 5080 return 0; 5081 } 5082 5083 /* Helper routine to branch off an association to a new socket. */ 5084 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) 5085 { 5086 struct sctp_association *asoc = sctp_id2assoc(sk, id); 5087 struct sctp_sock *sp = sctp_sk(sk); 5088 struct socket *sock; 5089 int err = 0; 5090 5091 /* Do not peel off from one netns to another one. */ 5092 if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) 5093 return -EINVAL; 5094 5095 if (!asoc) 5096 return -EINVAL; 5097 5098 /* An association cannot be branched off from an already peeled-off 5099 * socket, nor is this supported for tcp style sockets. 5100 */ 5101 if (!sctp_style(sk, UDP)) 5102 return -EINVAL; 5103 5104 /* Create a new socket. */ 5105 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); 5106 if (err < 0) 5107 return err; 5108 5109 sctp_copy_sock(sock->sk, sk, asoc); 5110 5111 /* Make peeled-off sockets more like 1-1 accepted sockets. 5112 * Set the daddr and initialize id to something more random 5113 */ 5114 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); 5115 5116 /* Populate the fields of the newsk from the oldsk and migrate the 5117 * asoc to the newsk. 5118 */ 5119 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); 5120 5121 *sockp = sock; 5122 5123 return err; 5124 } 5125 EXPORT_SYMBOL(sctp_do_peeloff); 5126 5127 static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff, 5128 struct file **newfile, unsigned flags) 5129 { 5130 struct socket *newsock; 5131 int retval; 5132 5133 retval = sctp_do_peeloff(sk, peeloff->associd, &newsock); 5134 if (retval < 0) 5135 goto out; 5136 5137 /* Map the socket to an unused fd that can be returned to the user. */ 5138 retval = get_unused_fd_flags(flags & SOCK_CLOEXEC); 5139 if (retval < 0) { 5140 sock_release(newsock); 5141 goto out; 5142 } 5143 5144 *newfile = sock_alloc_file(newsock, 0, NULL); 5145 if (IS_ERR(*newfile)) { 5146 put_unused_fd(retval); 5147 retval = PTR_ERR(*newfile); 5148 *newfile = NULL; 5149 return retval; 5150 } 5151 5152 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, 5153 retval); 5154 5155 peeloff->sd = retval; 5156 5157 if (flags & SOCK_NONBLOCK) 5158 (*newfile)->f_flags |= O_NONBLOCK; 5159 out: 5160 return retval; 5161 } 5162 5163 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 5164 { 5165 sctp_peeloff_arg_t peeloff; 5166 struct file *newfile = NULL; 5167 int retval = 0; 5168 5169 if (len < sizeof(sctp_peeloff_arg_t)) 5170 return -EINVAL; 5171 len = sizeof(sctp_peeloff_arg_t); 5172 if (copy_from_user(&peeloff, optval, len)) 5173 return -EFAULT; 5174 5175 retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0); 5176 if (retval < 0) 5177 goto out; 5178 5179 /* Return the fd mapped to the new socket. */ 5180 if (put_user(len, optlen)) { 5181 fput(newfile); 5182 put_unused_fd(retval); 5183 return -EFAULT; 5184 } 5185 5186 if (copy_to_user(optval, &peeloff, len)) { 5187 fput(newfile); 5188 put_unused_fd(retval); 5189 return -EFAULT; 5190 } 5191 fd_install(retval, newfile); 5192 out: 5193 return retval; 5194 } 5195 5196 static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len, 5197 char __user *optval, int __user *optlen) 5198 { 5199 sctp_peeloff_flags_arg_t peeloff; 5200 struct file *newfile = NULL; 5201 int retval = 0; 5202 5203 if (len < sizeof(sctp_peeloff_flags_arg_t)) 5204 return -EINVAL; 5205 len = sizeof(sctp_peeloff_flags_arg_t); 5206 if (copy_from_user(&peeloff, optval, len)) 5207 return -EFAULT; 5208 5209 retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg, 5210 &newfile, peeloff.flags); 5211 if (retval < 0) 5212 goto out; 5213 5214 /* Return the fd mapped to the new socket. */ 5215 if (put_user(len, optlen)) { 5216 fput(newfile); 5217 put_unused_fd(retval); 5218 return -EFAULT; 5219 } 5220 5221 if (copy_to_user(optval, &peeloff, len)) { 5222 fput(newfile); 5223 put_unused_fd(retval); 5224 return -EFAULT; 5225 } 5226 fd_install(retval, newfile); 5227 out: 5228 return retval; 5229 } 5230 5231 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) 5232 * 5233 * Applications can enable or disable heartbeats for any peer address of 5234 * an association, modify an address's heartbeat interval, force a 5235 * heartbeat to be sent immediately, and adjust the address's maximum 5236 * number of retransmissions sent before an address is considered 5237 * unreachable. The following structure is used to access and modify an 5238 * address's parameters: 5239 * 5240 * struct sctp_paddrparams { 5241 * sctp_assoc_t spp_assoc_id; 5242 * struct sockaddr_storage spp_address; 5243 * uint32_t spp_hbinterval; 5244 * uint16_t spp_pathmaxrxt; 5245 * uint32_t spp_pathmtu; 5246 * uint32_t spp_sackdelay; 5247 * uint32_t spp_flags; 5248 * }; 5249 * 5250 * spp_assoc_id - (one-to-many style socket) This is filled in the 5251 * application, and identifies the association for 5252 * this query. 5253 * spp_address - This specifies which address is of interest. 5254 * spp_hbinterval - This contains the value of the heartbeat interval, 5255 * in milliseconds. If a value of zero 5256 * is present in this field then no changes are to 5257 * be made to this parameter. 5258 * spp_pathmaxrxt - This contains the maximum number of 5259 * retransmissions before this address shall be 5260 * considered unreachable. If a value of zero 5261 * is present in this field then no changes are to 5262 * be made to this parameter. 5263 * spp_pathmtu - When Path MTU discovery is disabled the value 5264 * specified here will be the "fixed" path mtu. 5265 * Note that if the spp_address field is empty 5266 * then all associations on this address will 5267 * have this fixed path mtu set upon them. 5268 * 5269 * spp_sackdelay - When delayed sack is enabled, this value specifies 5270 * the number of milliseconds that sacks will be delayed 5271 * for. This value will apply to all addresses of an 5272 * association if the spp_address field is empty. Note 5273 * also, that if delayed sack is enabled and this 5274 * value is set to 0, no change is made to the last 5275 * recorded delayed sack timer value. 5276 * 5277 * spp_flags - These flags are used to control various features 5278 * on an association. The flag field may contain 5279 * zero or more of the following options. 5280 * 5281 * SPP_HB_ENABLE - Enable heartbeats on the 5282 * specified address. Note that if the address 5283 * field is empty all addresses for the association 5284 * have heartbeats enabled upon them. 5285 * 5286 * SPP_HB_DISABLE - Disable heartbeats on the 5287 * speicifed address. Note that if the address 5288 * field is empty all addresses for the association 5289 * will have their heartbeats disabled. Note also 5290 * that SPP_HB_ENABLE and SPP_HB_DISABLE are 5291 * mutually exclusive, only one of these two should 5292 * be specified. Enabling both fields will have 5293 * undetermined results. 5294 * 5295 * SPP_HB_DEMAND - Request a user initiated heartbeat 5296 * to be made immediately. 5297 * 5298 * SPP_PMTUD_ENABLE - This field will enable PMTU 5299 * discovery upon the specified address. Note that 5300 * if the address feild is empty then all addresses 5301 * on the association are effected. 5302 * 5303 * SPP_PMTUD_DISABLE - This field will disable PMTU 5304 * discovery upon the specified address. Note that 5305 * if the address feild is empty then all addresses 5306 * on the association are effected. Not also that 5307 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually 5308 * exclusive. Enabling both will have undetermined 5309 * results. 5310 * 5311 * SPP_SACKDELAY_ENABLE - Setting this flag turns 5312 * on delayed sack. The time specified in spp_sackdelay 5313 * is used to specify the sack delay for this address. Note 5314 * that if spp_address is empty then all addresses will 5315 * enable delayed sack and take on the sack delay 5316 * value specified in spp_sackdelay. 5317 * SPP_SACKDELAY_DISABLE - Setting this flag turns 5318 * off delayed sack. If the spp_address field is blank then 5319 * delayed sack is disabled for the entire association. Note 5320 * also that this field is mutually exclusive to 5321 * SPP_SACKDELAY_ENABLE, setting both will have undefined 5322 * results. 5323 */ 5324 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, 5325 char __user *optval, int __user *optlen) 5326 { 5327 struct sctp_paddrparams params; 5328 struct sctp_transport *trans = NULL; 5329 struct sctp_association *asoc = NULL; 5330 struct sctp_sock *sp = sctp_sk(sk); 5331 5332 if (len < sizeof(struct sctp_paddrparams)) 5333 return -EINVAL; 5334 len = sizeof(struct sctp_paddrparams); 5335 if (copy_from_user(¶ms, optval, len)) 5336 return -EFAULT; 5337 5338 /* If an address other than INADDR_ANY is specified, and 5339 * no transport is found, then the request is invalid. 5340 */ 5341 if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { 5342 trans = sctp_addr_id2transport(sk, ¶ms.spp_address, 5343 params.spp_assoc_id); 5344 if (!trans) { 5345 pr_debug("%s: failed no transport\n", __func__); 5346 return -EINVAL; 5347 } 5348 } 5349 5350 /* Get association, if assoc_id != 0 and the socket is a one 5351 * to many style socket, and an association was not found, then 5352 * the id was invalid. 5353 */ 5354 asoc = sctp_id2assoc(sk, params.spp_assoc_id); 5355 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { 5356 pr_debug("%s: failed no association\n", __func__); 5357 return -EINVAL; 5358 } 5359 5360 if (trans) { 5361 /* Fetch transport values. */ 5362 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); 5363 params.spp_pathmtu = trans->pathmtu; 5364 params.spp_pathmaxrxt = trans->pathmaxrxt; 5365 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); 5366 5367 /*draft-11 doesn't say what to return in spp_flags*/ 5368 params.spp_flags = trans->param_flags; 5369 } else if (asoc) { 5370 /* Fetch association values. */ 5371 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); 5372 params.spp_pathmtu = asoc->pathmtu; 5373 params.spp_pathmaxrxt = asoc->pathmaxrxt; 5374 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); 5375 5376 /*draft-11 doesn't say what to return in spp_flags*/ 5377 params.spp_flags = asoc->param_flags; 5378 } else { 5379 /* Fetch socket values. */ 5380 params.spp_hbinterval = sp->hbinterval; 5381 params.spp_pathmtu = sp->pathmtu; 5382 params.spp_sackdelay = sp->sackdelay; 5383 params.spp_pathmaxrxt = sp->pathmaxrxt; 5384 5385 /*draft-11 doesn't say what to return in spp_flags*/ 5386 params.spp_flags = sp->param_flags; 5387 } 5388 5389 if (copy_to_user(optval, ¶ms, len)) 5390 return -EFAULT; 5391 5392 if (put_user(len, optlen)) 5393 return -EFAULT; 5394 5395 return 0; 5396 } 5397 5398 /* 5399 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) 5400 * 5401 * This option will effect the way delayed acks are performed. This 5402 * option allows you to get or set the delayed ack time, in 5403 * milliseconds. It also allows changing the delayed ack frequency. 5404 * Changing the frequency to 1 disables the delayed sack algorithm. If 5405 * the assoc_id is 0, then this sets or gets the endpoints default 5406 * values. If the assoc_id field is non-zero, then the set or get 5407 * effects the specified association for the one to many model (the 5408 * assoc_id field is ignored by the one to one model). Note that if 5409 * sack_delay or sack_freq are 0 when setting this option, then the 5410 * current values will remain unchanged. 5411 * 5412 * struct sctp_sack_info { 5413 * sctp_assoc_t sack_assoc_id; 5414 * uint32_t sack_delay; 5415 * uint32_t sack_freq; 5416 * }; 5417 * 5418 * sack_assoc_id - This parameter, indicates which association the user 5419 * is performing an action upon. Note that if this field's value is 5420 * zero then the endpoints default value is changed (effecting future 5421 * associations only). 5422 * 5423 * sack_delay - This parameter contains the number of milliseconds that 5424 * the user is requesting the delayed ACK timer be set to. Note that 5425 * this value is defined in the standard to be between 200 and 500 5426 * milliseconds. 5427 * 5428 * sack_freq - This parameter contains the number of packets that must 5429 * be received before a sack is sent without waiting for the delay 5430 * timer to expire. The default value for this is 2, setting this 5431 * value to 1 will disable the delayed sack algorithm. 5432 */ 5433 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, 5434 char __user *optval, 5435 int __user *optlen) 5436 { 5437 struct sctp_sack_info params; 5438 struct sctp_association *asoc = NULL; 5439 struct sctp_sock *sp = sctp_sk(sk); 5440 5441 if (len >= sizeof(struct sctp_sack_info)) { 5442 len = sizeof(struct sctp_sack_info); 5443 5444 if (copy_from_user(¶ms, optval, len)) 5445 return -EFAULT; 5446 } else if (len == sizeof(struct sctp_assoc_value)) { 5447 pr_warn_ratelimited(DEPRECATED 5448 "%s (pid %d) " 5449 "Use of struct sctp_assoc_value in delayed_ack socket option.\n" 5450 "Use struct sctp_sack_info instead\n", 5451 current->comm, task_pid_nr(current)); 5452 if (copy_from_user(¶ms, optval, len)) 5453 return -EFAULT; 5454 } else 5455 return -EINVAL; 5456 5457 /* Get association, if sack_assoc_id != 0 and the socket is a one 5458 * to many style socket, and an association was not found, then 5459 * the id was invalid. 5460 */ 5461 asoc = sctp_id2assoc(sk, params.sack_assoc_id); 5462 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) 5463 return -EINVAL; 5464 5465 if (asoc) { 5466 /* Fetch association values. */ 5467 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { 5468 params.sack_delay = jiffies_to_msecs( 5469 asoc->sackdelay); 5470 params.sack_freq = asoc->sackfreq; 5471 5472 } else { 5473 params.sack_delay = 0; 5474 params.sack_freq = 1; 5475 } 5476 } else { 5477 /* Fetch socket values. */ 5478 if (sp->param_flags & SPP_SACKDELAY_ENABLE) { 5479 params.sack_delay = sp->sackdelay; 5480 params.sack_freq = sp->sackfreq; 5481 } else { 5482 params.sack_delay = 0; 5483 params.sack_freq = 1; 5484 } 5485 } 5486 5487 if (copy_to_user(optval, ¶ms, len)) 5488 return -EFAULT; 5489 5490 if (put_user(len, optlen)) 5491 return -EFAULT; 5492 5493 return 0; 5494 } 5495 5496 /* 7.1.3 Initialization Parameters (SCTP_INITMSG) 5497 * 5498 * Applications can specify protocol parameters for the default association 5499 * initialization. The option name argument to setsockopt() and getsockopt() 5500 * is SCTP_INITMSG. 5501 * 5502 * Setting initialization parameters is effective only on an unconnected 5503 * socket (for UDP-style sockets only future associations are effected 5504 * by the change). With TCP-style sockets, this option is inherited by 5505 * sockets derived from a listener socket. 5506 */ 5507 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 5508 { 5509 if (len < sizeof(struct sctp_initmsg)) 5510 return -EINVAL; 5511 len = sizeof(struct sctp_initmsg); 5512 if (put_user(len, optlen)) 5513 return -EFAULT; 5514 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 5515 return -EFAULT; 5516 return 0; 5517 } 5518 5519 5520 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, 5521 char __user *optval, int __user *optlen) 5522 { 5523 struct sctp_association *asoc; 5524 int cnt = 0; 5525 struct sctp_getaddrs getaddrs; 5526 struct sctp_transport *from; 5527 void __user *to; 5528 union sctp_addr temp; 5529 struct sctp_sock *sp = sctp_sk(sk); 5530 int addrlen; 5531 size_t space_left; 5532 int bytes_copied; 5533 5534 if (len < sizeof(struct sctp_getaddrs)) 5535 return -EINVAL; 5536 5537 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 5538 return -EFAULT; 5539 5540 /* For UDP-style sockets, id specifies the association to query. */ 5541 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 5542 if (!asoc) 5543 return -EINVAL; 5544 5545 to = optval + offsetof(struct sctp_getaddrs, addrs); 5546 space_left = len - offsetof(struct sctp_getaddrs, addrs); 5547 5548 list_for_each_entry(from, &asoc->peer.transport_addr_list, 5549 transports) { 5550 memcpy(&temp, &from->ipaddr, sizeof(temp)); 5551 addrlen = sctp_get_pf_specific(sk->sk_family) 5552 ->addr_to_user(sp, &temp); 5553 if (space_left < addrlen) 5554 return -ENOMEM; 5555 if (copy_to_user(to, &temp, addrlen)) 5556 return -EFAULT; 5557 to += addrlen; 5558 cnt++; 5559 space_left -= addrlen; 5560 } 5561 5562 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 5563 return -EFAULT; 5564 bytes_copied = ((char __user *)to) - optval; 5565 if (put_user(bytes_copied, optlen)) 5566 return -EFAULT; 5567 5568 return 0; 5569 } 5570 5571 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 5572 size_t space_left, int *bytes_copied) 5573 { 5574 struct sctp_sockaddr_entry *addr; 5575 union sctp_addr temp; 5576 int cnt = 0; 5577 int addrlen; 5578 struct net *net = sock_net(sk); 5579 5580 rcu_read_lock(); 5581 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { 5582 if (!addr->valid) 5583 continue; 5584 5585 if ((PF_INET == sk->sk_family) && 5586 (AF_INET6 == addr->a.sa.sa_family)) 5587 continue; 5588 if ((PF_INET6 == sk->sk_family) && 5589 inet_v6_ipv6only(sk) && 5590 (AF_INET == addr->a.sa.sa_family)) 5591 continue; 5592 memcpy(&temp, &addr->a, sizeof(temp)); 5593 if (!temp.v4.sin_port) 5594 temp.v4.sin_port = htons(port); 5595 5596 addrlen = sctp_get_pf_specific(sk->sk_family) 5597 ->addr_to_user(sctp_sk(sk), &temp); 5598 5599 if (space_left < addrlen) { 5600 cnt = -ENOMEM; 5601 break; 5602 } 5603 memcpy(to, &temp, addrlen); 5604 5605 to += addrlen; 5606 cnt++; 5607 space_left -= addrlen; 5608 *bytes_copied += addrlen; 5609 } 5610 rcu_read_unlock(); 5611 5612 return cnt; 5613 } 5614 5615 5616 static int sctp_getsockopt_local_addrs(struct sock *sk, int len, 5617 char __user *optval, int __user *optlen) 5618 { 5619 struct sctp_bind_addr *bp; 5620 struct sctp_association *asoc; 5621 int cnt = 0; 5622 struct sctp_getaddrs getaddrs; 5623 struct sctp_sockaddr_entry *addr; 5624 void __user *to; 5625 union sctp_addr temp; 5626 struct sctp_sock *sp = sctp_sk(sk); 5627 int addrlen; 5628 int err = 0; 5629 size_t space_left; 5630 int bytes_copied = 0; 5631 void *addrs; 5632 void *buf; 5633 5634 if (len < sizeof(struct sctp_getaddrs)) 5635 return -EINVAL; 5636 5637 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 5638 return -EFAULT; 5639 5640 /* 5641 * For UDP-style sockets, id specifies the association to query. 5642 * If the id field is set to the value '0' then the locally bound 5643 * addresses are returned without regard to any particular 5644 * association. 5645 */ 5646 if (0 == getaddrs.assoc_id) { 5647 bp = &sctp_sk(sk)->ep->base.bind_addr; 5648 } else { 5649 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 5650 if (!asoc) 5651 return -EINVAL; 5652 bp = &asoc->base.bind_addr; 5653 } 5654 5655 to = optval + offsetof(struct sctp_getaddrs, addrs); 5656 space_left = len - offsetof(struct sctp_getaddrs, addrs); 5657 5658 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); 5659 if (!addrs) 5660 return -ENOMEM; 5661 5662 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 5663 * addresses from the global local address list. 5664 */ 5665 if (sctp_list_single_entry(&bp->address_list)) { 5666 addr = list_entry(bp->address_list.next, 5667 struct sctp_sockaddr_entry, list); 5668 if (sctp_is_any(sk, &addr->a)) { 5669 cnt = sctp_copy_laddrs(sk, bp->port, addrs, 5670 space_left, &bytes_copied); 5671 if (cnt < 0) { 5672 err = cnt; 5673 goto out; 5674 } 5675 goto copy_getaddrs; 5676 } 5677 } 5678 5679 buf = addrs; 5680 /* Protection on the bound address list is not needed since 5681 * in the socket option context we hold a socket lock and 5682 * thus the bound address list can't change. 5683 */ 5684 list_for_each_entry(addr, &bp->address_list, list) { 5685 memcpy(&temp, &addr->a, sizeof(temp)); 5686 addrlen = sctp_get_pf_specific(sk->sk_family) 5687 ->addr_to_user(sp, &temp); 5688 if (space_left < addrlen) { 5689 err = -ENOMEM; /*fixme: right error?*/ 5690 goto out; 5691 } 5692 memcpy(buf, &temp, addrlen); 5693 buf += addrlen; 5694 bytes_copied += addrlen; 5695 cnt++; 5696 space_left -= addrlen; 5697 } 5698 5699 copy_getaddrs: 5700 if (copy_to_user(to, addrs, bytes_copied)) { 5701 err = -EFAULT; 5702 goto out; 5703 } 5704 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { 5705 err = -EFAULT; 5706 goto out; 5707 } 5708 if (put_user(bytes_copied, optlen)) 5709 err = -EFAULT; 5710 out: 5711 kfree(addrs); 5712 return err; 5713 } 5714 5715 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) 5716 * 5717 * Requests that the local SCTP stack use the enclosed peer address as 5718 * the association primary. The enclosed address must be one of the 5719 * association peer's addresses. 5720 */ 5721 static int sctp_getsockopt_primary_addr(struct sock *sk, int len, 5722 char __user *optval, int __user *optlen) 5723 { 5724 struct sctp_prim prim; 5725 struct sctp_association *asoc; 5726 struct sctp_sock *sp = sctp_sk(sk); 5727 5728 if (len < sizeof(struct sctp_prim)) 5729 return -EINVAL; 5730 5731 len = sizeof(struct sctp_prim); 5732 5733 if (copy_from_user(&prim, optval, len)) 5734 return -EFAULT; 5735 5736 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 5737 if (!asoc) 5738 return -EINVAL; 5739 5740 if (!asoc->peer.primary_path) 5741 return -ENOTCONN; 5742 5743 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, 5744 asoc->peer.primary_path->af_specific->sockaddr_len); 5745 5746 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, 5747 (union sctp_addr *)&prim.ssp_addr); 5748 5749 if (put_user(len, optlen)) 5750 return -EFAULT; 5751 if (copy_to_user(optval, &prim, len)) 5752 return -EFAULT; 5753 5754 return 0; 5755 } 5756 5757 /* 5758 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) 5759 * 5760 * Requests that the local endpoint set the specified Adaptation Layer 5761 * Indication parameter for all future INIT and INIT-ACK exchanges. 5762 */ 5763 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, 5764 char __user *optval, int __user *optlen) 5765 { 5766 struct sctp_setadaptation adaptation; 5767 5768 if (len < sizeof(struct sctp_setadaptation)) 5769 return -EINVAL; 5770 5771 len = sizeof(struct sctp_setadaptation); 5772 5773 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 5774 5775 if (put_user(len, optlen)) 5776 return -EFAULT; 5777 if (copy_to_user(optval, &adaptation, len)) 5778 return -EFAULT; 5779 5780 return 0; 5781 } 5782 5783 /* 5784 * 5785 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) 5786 * 5787 * Applications that wish to use the sendto() system call may wish to 5788 * specify a default set of parameters that would normally be supplied 5789 * through the inclusion of ancillary data. This socket option allows 5790 * such an application to set the default sctp_sndrcvinfo structure. 5791 5792 5793 * The application that wishes to use this socket option simply passes 5794 * in to this call the sctp_sndrcvinfo structure defined in Section 5795 * 5.2.2) The input parameters accepted by this call include 5796 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, 5797 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in 5798 * to this call if the caller is using the UDP model. 5799 * 5800 * For getsockopt, it get the default sctp_sndrcvinfo structure. 5801 */ 5802 static int sctp_getsockopt_default_send_param(struct sock *sk, 5803 int len, char __user *optval, 5804 int __user *optlen) 5805 { 5806 struct sctp_sock *sp = sctp_sk(sk); 5807 struct sctp_association *asoc; 5808 struct sctp_sndrcvinfo info; 5809 5810 if (len < sizeof(info)) 5811 return -EINVAL; 5812 5813 len = sizeof(info); 5814 5815 if (copy_from_user(&info, optval, len)) 5816 return -EFAULT; 5817 5818 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 5819 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) 5820 return -EINVAL; 5821 if (asoc) { 5822 info.sinfo_stream = asoc->default_stream; 5823 info.sinfo_flags = asoc->default_flags; 5824 info.sinfo_ppid = asoc->default_ppid; 5825 info.sinfo_context = asoc->default_context; 5826 info.sinfo_timetolive = asoc->default_timetolive; 5827 } else { 5828 info.sinfo_stream = sp->default_stream; 5829 info.sinfo_flags = sp->default_flags; 5830 info.sinfo_ppid = sp->default_ppid; 5831 info.sinfo_context = sp->default_context; 5832 info.sinfo_timetolive = sp->default_timetolive; 5833 } 5834 5835 if (put_user(len, optlen)) 5836 return -EFAULT; 5837 if (copy_to_user(optval, &info, len)) 5838 return -EFAULT; 5839 5840 return 0; 5841 } 5842 5843 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters 5844 * (SCTP_DEFAULT_SNDINFO) 5845 */ 5846 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, 5847 char __user *optval, 5848 int __user *optlen) 5849 { 5850 struct sctp_sock *sp = sctp_sk(sk); 5851 struct sctp_association *asoc; 5852 struct sctp_sndinfo info; 5853 5854 if (len < sizeof(info)) 5855 return -EINVAL; 5856 5857 len = sizeof(info); 5858 5859 if (copy_from_user(&info, optval, len)) 5860 return -EFAULT; 5861 5862 asoc = sctp_id2assoc(sk, info.snd_assoc_id); 5863 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) 5864 return -EINVAL; 5865 if (asoc) { 5866 info.snd_sid = asoc->default_stream; 5867 info.snd_flags = asoc->default_flags; 5868 info.snd_ppid = asoc->default_ppid; 5869 info.snd_context = asoc->default_context; 5870 } else { 5871 info.snd_sid = sp->default_stream; 5872 info.snd_flags = sp->default_flags; 5873 info.snd_ppid = sp->default_ppid; 5874 info.snd_context = sp->default_context; 5875 } 5876 5877 if (put_user(len, optlen)) 5878 return -EFAULT; 5879 if (copy_to_user(optval, &info, len)) 5880 return -EFAULT; 5881 5882 return 0; 5883 } 5884 5885 /* 5886 * 5887 * 7.1.5 SCTP_NODELAY 5888 * 5889 * Turn on/off any Nagle-like algorithm. This means that packets are 5890 * generally sent as soon as possible and no unnecessary delays are 5891 * introduced, at the cost of more packets in the network. Expects an 5892 * integer boolean flag. 5893 */ 5894 5895 static int sctp_getsockopt_nodelay(struct sock *sk, int len, 5896 char __user *optval, int __user *optlen) 5897 { 5898 int val; 5899 5900 if (len < sizeof(int)) 5901 return -EINVAL; 5902 5903 len = sizeof(int); 5904 val = (sctp_sk(sk)->nodelay == 1); 5905 if (put_user(len, optlen)) 5906 return -EFAULT; 5907 if (copy_to_user(optval, &val, len)) 5908 return -EFAULT; 5909 return 0; 5910 } 5911 5912 /* 5913 * 5914 * 7.1.1 SCTP_RTOINFO 5915 * 5916 * The protocol parameters used to initialize and bound retransmission 5917 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access 5918 * and modify these parameters. 5919 * All parameters are time values, in milliseconds. A value of 0, when 5920 * modifying the parameters, indicates that the current value should not 5921 * be changed. 5922 * 5923 */ 5924 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, 5925 char __user *optval, 5926 int __user *optlen) { 5927 struct sctp_rtoinfo rtoinfo; 5928 struct sctp_association *asoc; 5929 5930 if (len < sizeof (struct sctp_rtoinfo)) 5931 return -EINVAL; 5932 5933 len = sizeof(struct sctp_rtoinfo); 5934 5935 if (copy_from_user(&rtoinfo, optval, len)) 5936 return -EFAULT; 5937 5938 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 5939 5940 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) 5941 return -EINVAL; 5942 5943 /* Values corresponding to the specific association. */ 5944 if (asoc) { 5945 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); 5946 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); 5947 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); 5948 } else { 5949 /* Values corresponding to the endpoint. */ 5950 struct sctp_sock *sp = sctp_sk(sk); 5951 5952 rtoinfo.srto_initial = sp->rtoinfo.srto_initial; 5953 rtoinfo.srto_max = sp->rtoinfo.srto_max; 5954 rtoinfo.srto_min = sp->rtoinfo.srto_min; 5955 } 5956 5957 if (put_user(len, optlen)) 5958 return -EFAULT; 5959 5960 if (copy_to_user(optval, &rtoinfo, len)) 5961 return -EFAULT; 5962 5963 return 0; 5964 } 5965 5966 /* 5967 * 5968 * 7.1.2 SCTP_ASSOCINFO 5969 * 5970 * This option is used to tune the maximum retransmission attempts 5971 * of the association. 5972 * Returns an error if the new association retransmission value is 5973 * greater than the sum of the retransmission value of the peer. 5974 * See [SCTP] for more information. 5975 * 5976 */ 5977 static int sctp_getsockopt_associnfo(struct sock *sk, int len, 5978 char __user *optval, 5979 int __user *optlen) 5980 { 5981 5982 struct sctp_assocparams assocparams; 5983 struct sctp_association *asoc; 5984 struct list_head *pos; 5985 int cnt = 0; 5986 5987 if (len < sizeof (struct sctp_assocparams)) 5988 return -EINVAL; 5989 5990 len = sizeof(struct sctp_assocparams); 5991 5992 if (copy_from_user(&assocparams, optval, len)) 5993 return -EFAULT; 5994 5995 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 5996 5997 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) 5998 return -EINVAL; 5999 6000 /* Values correspoinding to the specific association */ 6001 if (asoc) { 6002 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 6003 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 6004 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 6005 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); 6006 6007 list_for_each(pos, &asoc->peer.transport_addr_list) { 6008 cnt++; 6009 } 6010 6011 assocparams.sasoc_number_peer_destinations = cnt; 6012 } else { 6013 /* Values corresponding to the endpoint */ 6014 struct sctp_sock *sp = sctp_sk(sk); 6015 6016 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; 6017 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; 6018 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; 6019 assocparams.sasoc_cookie_life = 6020 sp->assocparams.sasoc_cookie_life; 6021 assocparams.sasoc_number_peer_destinations = 6022 sp->assocparams. 6023 sasoc_number_peer_destinations; 6024 } 6025 6026 if (put_user(len, optlen)) 6027 return -EFAULT; 6028 6029 if (copy_to_user(optval, &assocparams, len)) 6030 return -EFAULT; 6031 6032 return 0; 6033 } 6034 6035 /* 6036 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) 6037 * 6038 * This socket option is a boolean flag which turns on or off mapped V4 6039 * addresses. If this option is turned on and the socket is type 6040 * PF_INET6, then IPv4 addresses will be mapped to V6 representation. 6041 * If this option is turned off, then no mapping will be done of V4 6042 * addresses and a user will receive both PF_INET6 and PF_INET type 6043 * addresses on the socket. 6044 */ 6045 static int sctp_getsockopt_mappedv4(struct sock *sk, int len, 6046 char __user *optval, int __user *optlen) 6047 { 6048 int val; 6049 struct sctp_sock *sp = sctp_sk(sk); 6050 6051 if (len < sizeof(int)) 6052 return -EINVAL; 6053 6054 len = sizeof(int); 6055 val = sp->v4mapped; 6056 if (put_user(len, optlen)) 6057 return -EFAULT; 6058 if (copy_to_user(optval, &val, len)) 6059 return -EFAULT; 6060 6061 return 0; 6062 } 6063 6064 /* 6065 * 7.1.29. Set or Get the default context (SCTP_CONTEXT) 6066 * (chapter and verse is quoted at sctp_setsockopt_context()) 6067 */ 6068 static int sctp_getsockopt_context(struct sock *sk, int len, 6069 char __user *optval, int __user *optlen) 6070 { 6071 struct sctp_assoc_value params; 6072 struct sctp_sock *sp; 6073 struct sctp_association *asoc; 6074 6075 if (len < sizeof(struct sctp_assoc_value)) 6076 return -EINVAL; 6077 6078 len = sizeof(struct sctp_assoc_value); 6079 6080 if (copy_from_user(¶ms, optval, len)) 6081 return -EFAULT; 6082 6083 sp = sctp_sk(sk); 6084 6085 if (params.assoc_id != 0) { 6086 asoc = sctp_id2assoc(sk, params.assoc_id); 6087 if (!asoc) 6088 return -EINVAL; 6089 params.assoc_value = asoc->default_rcv_context; 6090 } else { 6091 params.assoc_value = sp->default_rcv_context; 6092 } 6093 6094 if (put_user(len, optlen)) 6095 return -EFAULT; 6096 if (copy_to_user(optval, ¶ms, len)) 6097 return -EFAULT; 6098 6099 return 0; 6100 } 6101 6102 /* 6103 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) 6104 * This option will get or set the maximum size to put in any outgoing 6105 * SCTP DATA chunk. If a message is larger than this size it will be 6106 * fragmented by SCTP into the specified size. Note that the underlying 6107 * SCTP implementation may fragment into smaller sized chunks when the 6108 * PMTU of the underlying association is smaller than the value set by 6109 * the user. The default value for this option is '0' which indicates 6110 * the user is NOT limiting fragmentation and only the PMTU will effect 6111 * SCTP's choice of DATA chunk size. Note also that values set larger 6112 * than the maximum size of an IP datagram will effectively let SCTP 6113 * control fragmentation (i.e. the same as setting this option to 0). 6114 * 6115 * The following structure is used to access and modify this parameter: 6116 * 6117 * struct sctp_assoc_value { 6118 * sctp_assoc_t assoc_id; 6119 * uint32_t assoc_value; 6120 * }; 6121 * 6122 * assoc_id: This parameter is ignored for one-to-one style sockets. 6123 * For one-to-many style sockets this parameter indicates which 6124 * association the user is performing an action upon. Note that if 6125 * this field's value is zero then the endpoints default value is 6126 * changed (effecting future associations only). 6127 * assoc_value: This parameter specifies the maximum size in bytes. 6128 */ 6129 static int sctp_getsockopt_maxseg(struct sock *sk, int len, 6130 char __user *optval, int __user *optlen) 6131 { 6132 struct sctp_assoc_value params; 6133 struct sctp_association *asoc; 6134 6135 if (len == sizeof(int)) { 6136 pr_warn_ratelimited(DEPRECATED 6137 "%s (pid %d) " 6138 "Use of int in maxseg socket option.\n" 6139 "Use struct sctp_assoc_value instead\n", 6140 current->comm, task_pid_nr(current)); 6141 params.assoc_id = 0; 6142 } else if (len >= sizeof(struct sctp_assoc_value)) { 6143 len = sizeof(struct sctp_assoc_value); 6144 if (copy_from_user(¶ms, optval, sizeof(params))) 6145 return -EFAULT; 6146 } else 6147 return -EINVAL; 6148 6149 asoc = sctp_id2assoc(sk, params.assoc_id); 6150 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 6151 return -EINVAL; 6152 6153 if (asoc) 6154 params.assoc_value = asoc->frag_point; 6155 else 6156 params.assoc_value = sctp_sk(sk)->user_frag; 6157 6158 if (put_user(len, optlen)) 6159 return -EFAULT; 6160 if (len == sizeof(int)) { 6161 if (copy_to_user(optval, ¶ms.assoc_value, len)) 6162 return -EFAULT; 6163 } else { 6164 if (copy_to_user(optval, ¶ms, len)) 6165 return -EFAULT; 6166 } 6167 6168 return 0; 6169 } 6170 6171 /* 6172 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) 6173 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) 6174 */ 6175 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, 6176 char __user *optval, int __user *optlen) 6177 { 6178 int val; 6179 6180 if (len < sizeof(int)) 6181 return -EINVAL; 6182 6183 len = sizeof(int); 6184 6185 val = sctp_sk(sk)->frag_interleave; 6186 if (put_user(len, optlen)) 6187 return -EFAULT; 6188 if (copy_to_user(optval, &val, len)) 6189 return -EFAULT; 6190 6191 return 0; 6192 } 6193 6194 /* 6195 * 7.1.25. Set or Get the sctp partial delivery point 6196 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) 6197 */ 6198 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, 6199 char __user *optval, 6200 int __user *optlen) 6201 { 6202 u32 val; 6203 6204 if (len < sizeof(u32)) 6205 return -EINVAL; 6206 6207 len = sizeof(u32); 6208 6209 val = sctp_sk(sk)->pd_point; 6210 if (put_user(len, optlen)) 6211 return -EFAULT; 6212 if (copy_to_user(optval, &val, len)) 6213 return -EFAULT; 6214 6215 return 0; 6216 } 6217 6218 /* 6219 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) 6220 * (chapter and verse is quoted at sctp_setsockopt_maxburst()) 6221 */ 6222 static int sctp_getsockopt_maxburst(struct sock *sk, int len, 6223 char __user *optval, 6224 int __user *optlen) 6225 { 6226 struct sctp_assoc_value params; 6227 struct sctp_sock *sp; 6228 struct sctp_association *asoc; 6229 6230 if (len == sizeof(int)) { 6231 pr_warn_ratelimited(DEPRECATED 6232 "%s (pid %d) " 6233 "Use of int in max_burst socket option.\n" 6234 "Use struct sctp_assoc_value instead\n", 6235 current->comm, task_pid_nr(current)); 6236 params.assoc_id = 0; 6237 } else if (len >= sizeof(struct sctp_assoc_value)) { 6238 len = sizeof(struct sctp_assoc_value); 6239 if (copy_from_user(¶ms, optval, len)) 6240 return -EFAULT; 6241 } else 6242 return -EINVAL; 6243 6244 sp = sctp_sk(sk); 6245 6246 if (params.assoc_id != 0) { 6247 asoc = sctp_id2assoc(sk, params.assoc_id); 6248 if (!asoc) 6249 return -EINVAL; 6250 params.assoc_value = asoc->max_burst; 6251 } else 6252 params.assoc_value = sp->max_burst; 6253 6254 if (len == sizeof(int)) { 6255 if (copy_to_user(optval, ¶ms.assoc_value, len)) 6256 return -EFAULT; 6257 } else { 6258 if (copy_to_user(optval, ¶ms, len)) 6259 return -EFAULT; 6260 } 6261 6262 return 0; 6263 6264 } 6265 6266 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 6267 char __user *optval, int __user *optlen) 6268 { 6269 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6270 struct sctp_hmacalgo __user *p = (void __user *)optval; 6271 struct sctp_hmac_algo_param *hmacs; 6272 __u16 data_len = 0; 6273 u32 num_idents; 6274 int i; 6275 6276 if (!ep->auth_enable) 6277 return -EACCES; 6278 6279 hmacs = ep->auth_hmacs_list; 6280 data_len = ntohs(hmacs->param_hdr.length) - 6281 sizeof(struct sctp_paramhdr); 6282 6283 if (len < sizeof(struct sctp_hmacalgo) + data_len) 6284 return -EINVAL; 6285 6286 len = sizeof(struct sctp_hmacalgo) + data_len; 6287 num_idents = data_len / sizeof(u16); 6288 6289 if (put_user(len, optlen)) 6290 return -EFAULT; 6291 if (put_user(num_idents, &p->shmac_num_idents)) 6292 return -EFAULT; 6293 for (i = 0; i < num_idents; i++) { 6294 __u16 hmacid = ntohs(hmacs->hmac_ids[i]); 6295 6296 if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) 6297 return -EFAULT; 6298 } 6299 return 0; 6300 } 6301 6302 static int sctp_getsockopt_active_key(struct sock *sk, int len, 6303 char __user *optval, int __user *optlen) 6304 { 6305 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6306 struct sctp_authkeyid val; 6307 struct sctp_association *asoc; 6308 6309 if (!ep->auth_enable) 6310 return -EACCES; 6311 6312 if (len < sizeof(struct sctp_authkeyid)) 6313 return -EINVAL; 6314 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 6315 return -EFAULT; 6316 6317 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 6318 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) 6319 return -EINVAL; 6320 6321 if (asoc) 6322 val.scact_keynumber = asoc->active_key_id; 6323 else 6324 val.scact_keynumber = ep->active_key_id; 6325 6326 len = sizeof(struct sctp_authkeyid); 6327 if (put_user(len, optlen)) 6328 return -EFAULT; 6329 if (copy_to_user(optval, &val, len)) 6330 return -EFAULT; 6331 6332 return 0; 6333 } 6334 6335 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 6336 char __user *optval, int __user *optlen) 6337 { 6338 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6339 struct sctp_authchunks __user *p = (void __user *)optval; 6340 struct sctp_authchunks val; 6341 struct sctp_association *asoc; 6342 struct sctp_chunks_param *ch; 6343 u32 num_chunks = 0; 6344 char __user *to; 6345 6346 if (!ep->auth_enable) 6347 return -EACCES; 6348 6349 if (len < sizeof(struct sctp_authchunks)) 6350 return -EINVAL; 6351 6352 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6353 return -EFAULT; 6354 6355 to = p->gauth_chunks; 6356 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 6357 if (!asoc) 6358 return -EINVAL; 6359 6360 ch = asoc->peer.peer_chunks; 6361 if (!ch) 6362 goto num; 6363 6364 /* See if the user provided enough room for all the data */ 6365 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr); 6366 if (len < num_chunks) 6367 return -EINVAL; 6368 6369 if (copy_to_user(to, ch->chunks, num_chunks)) 6370 return -EFAULT; 6371 num: 6372 len = sizeof(struct sctp_authchunks) + num_chunks; 6373 if (put_user(len, optlen)) 6374 return -EFAULT; 6375 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 6376 return -EFAULT; 6377 return 0; 6378 } 6379 6380 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 6381 char __user *optval, int __user *optlen) 6382 { 6383 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 6384 struct sctp_authchunks __user *p = (void __user *)optval; 6385 struct sctp_authchunks val; 6386 struct sctp_association *asoc; 6387 struct sctp_chunks_param *ch; 6388 u32 num_chunks = 0; 6389 char __user *to; 6390 6391 if (!ep->auth_enable) 6392 return -EACCES; 6393 6394 if (len < sizeof(struct sctp_authchunks)) 6395 return -EINVAL; 6396 6397 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6398 return -EFAULT; 6399 6400 to = p->gauth_chunks; 6401 asoc = sctp_id2assoc(sk, val.gauth_assoc_id); 6402 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) 6403 return -EINVAL; 6404 6405 if (asoc) 6406 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; 6407 else 6408 ch = ep->auth_chunk_list; 6409 6410 if (!ch) 6411 goto num; 6412 6413 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr); 6414 if (len < sizeof(struct sctp_authchunks) + num_chunks) 6415 return -EINVAL; 6416 6417 if (copy_to_user(to, ch->chunks, num_chunks)) 6418 return -EFAULT; 6419 num: 6420 len = sizeof(struct sctp_authchunks) + num_chunks; 6421 if (put_user(len, optlen)) 6422 return -EFAULT; 6423 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 6424 return -EFAULT; 6425 6426 return 0; 6427 } 6428 6429 /* 6430 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) 6431 * This option gets the current number of associations that are attached 6432 * to a one-to-many style socket. The option value is an uint32_t. 6433 */ 6434 static int sctp_getsockopt_assoc_number(struct sock *sk, int len, 6435 char __user *optval, int __user *optlen) 6436 { 6437 struct sctp_sock *sp = sctp_sk(sk); 6438 struct sctp_association *asoc; 6439 u32 val = 0; 6440 6441 if (sctp_style(sk, TCP)) 6442 return -EOPNOTSUPP; 6443 6444 if (len < sizeof(u32)) 6445 return -EINVAL; 6446 6447 len = sizeof(u32); 6448 6449 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6450 val++; 6451 } 6452 6453 if (put_user(len, optlen)) 6454 return -EFAULT; 6455 if (copy_to_user(optval, &val, len)) 6456 return -EFAULT; 6457 6458 return 0; 6459 } 6460 6461 /* 6462 * 8.1.23 SCTP_AUTO_ASCONF 6463 * See the corresponding setsockopt entry as description 6464 */ 6465 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, 6466 char __user *optval, int __user *optlen) 6467 { 6468 int val = 0; 6469 6470 if (len < sizeof(int)) 6471 return -EINVAL; 6472 6473 len = sizeof(int); 6474 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) 6475 val = 1; 6476 if (put_user(len, optlen)) 6477 return -EFAULT; 6478 if (copy_to_user(optval, &val, len)) 6479 return -EFAULT; 6480 return 0; 6481 } 6482 6483 /* 6484 * 8.2.6. Get the Current Identifiers of Associations 6485 * (SCTP_GET_ASSOC_ID_LIST) 6486 * 6487 * This option gets the current list of SCTP association identifiers of 6488 * the SCTP associations handled by a one-to-many style socket. 6489 */ 6490 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, 6491 char __user *optval, int __user *optlen) 6492 { 6493 struct sctp_sock *sp = sctp_sk(sk); 6494 struct sctp_association *asoc; 6495 struct sctp_assoc_ids *ids; 6496 u32 num = 0; 6497 6498 if (sctp_style(sk, TCP)) 6499 return -EOPNOTSUPP; 6500 6501 if (len < sizeof(struct sctp_assoc_ids)) 6502 return -EINVAL; 6503 6504 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6505 num++; 6506 } 6507 6508 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) 6509 return -EINVAL; 6510 6511 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; 6512 6513 ids = kmalloc(len, GFP_USER | __GFP_NOWARN); 6514 if (unlikely(!ids)) 6515 return -ENOMEM; 6516 6517 ids->gaids_number_of_ids = num; 6518 num = 0; 6519 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { 6520 ids->gaids_assoc_id[num++] = asoc->assoc_id; 6521 } 6522 6523 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { 6524 kfree(ids); 6525 return -EFAULT; 6526 } 6527 6528 kfree(ids); 6529 return 0; 6530 } 6531 6532 /* 6533 * SCTP_PEER_ADDR_THLDS 6534 * 6535 * This option allows us to fetch the partially failed threshold for one or all 6536 * transports in an association. See Section 6.1 of: 6537 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt 6538 */ 6539 static int sctp_getsockopt_paddr_thresholds(struct sock *sk, 6540 char __user *optval, 6541 int len, 6542 int __user *optlen) 6543 { 6544 struct sctp_paddrthlds val; 6545 struct sctp_transport *trans; 6546 struct sctp_association *asoc; 6547 6548 if (len < sizeof(struct sctp_paddrthlds)) 6549 return -EINVAL; 6550 len = sizeof(struct sctp_paddrthlds); 6551 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) 6552 return -EFAULT; 6553 6554 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { 6555 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 6556 if (!asoc) 6557 return -ENOENT; 6558 6559 val.spt_pathpfthld = asoc->pf_retrans; 6560 val.spt_pathmaxrxt = asoc->pathmaxrxt; 6561 } else { 6562 trans = sctp_addr_id2transport(sk, &val.spt_address, 6563 val.spt_assoc_id); 6564 if (!trans) 6565 return -ENOENT; 6566 6567 val.spt_pathmaxrxt = trans->pathmaxrxt; 6568 val.spt_pathpfthld = trans->pf_retrans; 6569 } 6570 6571 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 6572 return -EFAULT; 6573 6574 return 0; 6575 } 6576 6577 /* 6578 * SCTP_GET_ASSOC_STATS 6579 * 6580 * This option retrieves local per endpoint statistics. It is modeled 6581 * after OpenSolaris' implementation 6582 */ 6583 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, 6584 char __user *optval, 6585 int __user *optlen) 6586 { 6587 struct sctp_assoc_stats sas; 6588 struct sctp_association *asoc = NULL; 6589 6590 /* User must provide at least the assoc id */ 6591 if (len < sizeof(sctp_assoc_t)) 6592 return -EINVAL; 6593 6594 /* Allow the struct to grow and fill in as much as possible */ 6595 len = min_t(size_t, len, sizeof(sas)); 6596 6597 if (copy_from_user(&sas, optval, len)) 6598 return -EFAULT; 6599 6600 asoc = sctp_id2assoc(sk, sas.sas_assoc_id); 6601 if (!asoc) 6602 return -EINVAL; 6603 6604 sas.sas_rtxchunks = asoc->stats.rtxchunks; 6605 sas.sas_gapcnt = asoc->stats.gapcnt; 6606 sas.sas_outofseqtsns = asoc->stats.outofseqtsns; 6607 sas.sas_osacks = asoc->stats.osacks; 6608 sas.sas_isacks = asoc->stats.isacks; 6609 sas.sas_octrlchunks = asoc->stats.octrlchunks; 6610 sas.sas_ictrlchunks = asoc->stats.ictrlchunks; 6611 sas.sas_oodchunks = asoc->stats.oodchunks; 6612 sas.sas_iodchunks = asoc->stats.iodchunks; 6613 sas.sas_ouodchunks = asoc->stats.ouodchunks; 6614 sas.sas_iuodchunks = asoc->stats.iuodchunks; 6615 sas.sas_idupchunks = asoc->stats.idupchunks; 6616 sas.sas_opackets = asoc->stats.opackets; 6617 sas.sas_ipackets = asoc->stats.ipackets; 6618 6619 /* New high max rto observed, will return 0 if not a single 6620 * RTO update took place. obs_rto_ipaddr will be bogus 6621 * in such a case 6622 */ 6623 sas.sas_maxrto = asoc->stats.max_obs_rto; 6624 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, 6625 sizeof(struct sockaddr_storage)); 6626 6627 /* Mark beginning of a new observation period */ 6628 asoc->stats.max_obs_rto = asoc->rto_min; 6629 6630 if (put_user(len, optlen)) 6631 return -EFAULT; 6632 6633 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); 6634 6635 if (copy_to_user(optval, &sas, len)) 6636 return -EFAULT; 6637 6638 return 0; 6639 } 6640 6641 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, 6642 char __user *optval, 6643 int __user *optlen) 6644 { 6645 int val = 0; 6646 6647 if (len < sizeof(int)) 6648 return -EINVAL; 6649 6650 len = sizeof(int); 6651 if (sctp_sk(sk)->recvrcvinfo) 6652 val = 1; 6653 if (put_user(len, optlen)) 6654 return -EFAULT; 6655 if (copy_to_user(optval, &val, len)) 6656 return -EFAULT; 6657 6658 return 0; 6659 } 6660 6661 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, 6662 char __user *optval, 6663 int __user *optlen) 6664 { 6665 int val = 0; 6666 6667 if (len < sizeof(int)) 6668 return -EINVAL; 6669 6670 len = sizeof(int); 6671 if (sctp_sk(sk)->recvnxtinfo) 6672 val = 1; 6673 if (put_user(len, optlen)) 6674 return -EFAULT; 6675 if (copy_to_user(optval, &val, len)) 6676 return -EFAULT; 6677 6678 return 0; 6679 } 6680 6681 static int sctp_getsockopt_pr_supported(struct sock *sk, int len, 6682 char __user *optval, 6683 int __user *optlen) 6684 { 6685 struct sctp_assoc_value params; 6686 struct sctp_association *asoc; 6687 int retval = -EFAULT; 6688 6689 if (len < sizeof(params)) { 6690 retval = -EINVAL; 6691 goto out; 6692 } 6693 6694 len = sizeof(params); 6695 if (copy_from_user(¶ms, optval, len)) 6696 goto out; 6697 6698 asoc = sctp_id2assoc(sk, params.assoc_id); 6699 if (asoc) { 6700 params.assoc_value = asoc->prsctp_enable; 6701 } else if (!params.assoc_id) { 6702 struct sctp_sock *sp = sctp_sk(sk); 6703 6704 params.assoc_value = sp->ep->prsctp_enable; 6705 } else { 6706 retval = -EINVAL; 6707 goto out; 6708 } 6709 6710 if (put_user(len, optlen)) 6711 goto out; 6712 6713 if (copy_to_user(optval, ¶ms, len)) 6714 goto out; 6715 6716 retval = 0; 6717 6718 out: 6719 return retval; 6720 } 6721 6722 static int sctp_getsockopt_default_prinfo(struct sock *sk, int len, 6723 char __user *optval, 6724 int __user *optlen) 6725 { 6726 struct sctp_default_prinfo info; 6727 struct sctp_association *asoc; 6728 int retval = -EFAULT; 6729 6730 if (len < sizeof(info)) { 6731 retval = -EINVAL; 6732 goto out; 6733 } 6734 6735 len = sizeof(info); 6736 if (copy_from_user(&info, optval, len)) 6737 goto out; 6738 6739 asoc = sctp_id2assoc(sk, info.pr_assoc_id); 6740 if (asoc) { 6741 info.pr_policy = SCTP_PR_POLICY(asoc->default_flags); 6742 info.pr_value = asoc->default_timetolive; 6743 } else if (!info.pr_assoc_id) { 6744 struct sctp_sock *sp = sctp_sk(sk); 6745 6746 info.pr_policy = SCTP_PR_POLICY(sp->default_flags); 6747 info.pr_value = sp->default_timetolive; 6748 } else { 6749 retval = -EINVAL; 6750 goto out; 6751 } 6752 6753 if (put_user(len, optlen)) 6754 goto out; 6755 6756 if (copy_to_user(optval, &info, len)) 6757 goto out; 6758 6759 retval = 0; 6760 6761 out: 6762 return retval; 6763 } 6764 6765 static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, 6766 char __user *optval, 6767 int __user *optlen) 6768 { 6769 struct sctp_prstatus params; 6770 struct sctp_association *asoc; 6771 int policy; 6772 int retval = -EINVAL; 6773 6774 if (len < sizeof(params)) 6775 goto out; 6776 6777 len = sizeof(params); 6778 if (copy_from_user(¶ms, optval, len)) { 6779 retval = -EFAULT; 6780 goto out; 6781 } 6782 6783 policy = params.sprstat_policy; 6784 if (policy & ~SCTP_PR_SCTP_MASK) 6785 goto out; 6786 6787 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); 6788 if (!asoc) 6789 goto out; 6790 6791 if (policy == SCTP_PR_SCTP_NONE) { 6792 params.sprstat_abandoned_unsent = 0; 6793 params.sprstat_abandoned_sent = 0; 6794 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { 6795 params.sprstat_abandoned_unsent += 6796 asoc->abandoned_unsent[policy]; 6797 params.sprstat_abandoned_sent += 6798 asoc->abandoned_sent[policy]; 6799 } 6800 } else { 6801 params.sprstat_abandoned_unsent = 6802 asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)]; 6803 params.sprstat_abandoned_sent = 6804 asoc->abandoned_sent[__SCTP_PR_INDEX(policy)]; 6805 } 6806 6807 if (put_user(len, optlen)) { 6808 retval = -EFAULT; 6809 goto out; 6810 } 6811 6812 if (copy_to_user(optval, ¶ms, len)) { 6813 retval = -EFAULT; 6814 goto out; 6815 } 6816 6817 retval = 0; 6818 6819 out: 6820 return retval; 6821 } 6822 6823 static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, 6824 char __user *optval, 6825 int __user *optlen) 6826 { 6827 struct sctp_stream_out_ext *streamoute; 6828 struct sctp_association *asoc; 6829 struct sctp_prstatus params; 6830 int retval = -EINVAL; 6831 int policy; 6832 6833 if (len < sizeof(params)) 6834 goto out; 6835 6836 len = sizeof(params); 6837 if (copy_from_user(¶ms, optval, len)) { 6838 retval = -EFAULT; 6839 goto out; 6840 } 6841 6842 policy = params.sprstat_policy; 6843 if (policy & ~SCTP_PR_SCTP_MASK) 6844 goto out; 6845 6846 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); 6847 if (!asoc || params.sprstat_sid >= asoc->stream.outcnt) 6848 goto out; 6849 6850 streamoute = asoc->stream.out[params.sprstat_sid].ext; 6851 if (!streamoute) { 6852 /* Not allocated yet, means all stats are 0 */ 6853 params.sprstat_abandoned_unsent = 0; 6854 params.sprstat_abandoned_sent = 0; 6855 retval = 0; 6856 goto out; 6857 } 6858 6859 if (policy == SCTP_PR_SCTP_NONE) { 6860 params.sprstat_abandoned_unsent = 0; 6861 params.sprstat_abandoned_sent = 0; 6862 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { 6863 params.sprstat_abandoned_unsent += 6864 streamoute->abandoned_unsent[policy]; 6865 params.sprstat_abandoned_sent += 6866 streamoute->abandoned_sent[policy]; 6867 } 6868 } else { 6869 params.sprstat_abandoned_unsent = 6870 streamoute->abandoned_unsent[__SCTP_PR_INDEX(policy)]; 6871 params.sprstat_abandoned_sent = 6872 streamoute->abandoned_sent[__SCTP_PR_INDEX(policy)]; 6873 } 6874 6875 if (put_user(len, optlen) || copy_to_user(optval, ¶ms, len)) { 6876 retval = -EFAULT; 6877 goto out; 6878 } 6879 6880 retval = 0; 6881 6882 out: 6883 return retval; 6884 } 6885 6886 static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len, 6887 char __user *optval, 6888 int __user *optlen) 6889 { 6890 struct sctp_assoc_value params; 6891 struct sctp_association *asoc; 6892 int retval = -EFAULT; 6893 6894 if (len < sizeof(params)) { 6895 retval = -EINVAL; 6896 goto out; 6897 } 6898 6899 len = sizeof(params); 6900 if (copy_from_user(¶ms, optval, len)) 6901 goto out; 6902 6903 asoc = sctp_id2assoc(sk, params.assoc_id); 6904 if (asoc) { 6905 params.assoc_value = asoc->reconf_enable; 6906 } else if (!params.assoc_id) { 6907 struct sctp_sock *sp = sctp_sk(sk); 6908 6909 params.assoc_value = sp->ep->reconf_enable; 6910 } else { 6911 retval = -EINVAL; 6912 goto out; 6913 } 6914 6915 if (put_user(len, optlen)) 6916 goto out; 6917 6918 if (copy_to_user(optval, ¶ms, len)) 6919 goto out; 6920 6921 retval = 0; 6922 6923 out: 6924 return retval; 6925 } 6926 6927 static int sctp_getsockopt_enable_strreset(struct sock *sk, int len, 6928 char __user *optval, 6929 int __user *optlen) 6930 { 6931 struct sctp_assoc_value params; 6932 struct sctp_association *asoc; 6933 int retval = -EFAULT; 6934 6935 if (len < sizeof(params)) { 6936 retval = -EINVAL; 6937 goto out; 6938 } 6939 6940 len = sizeof(params); 6941 if (copy_from_user(¶ms, optval, len)) 6942 goto out; 6943 6944 asoc = sctp_id2assoc(sk, params.assoc_id); 6945 if (asoc) { 6946 params.assoc_value = asoc->strreset_enable; 6947 } else if (!params.assoc_id) { 6948 struct sctp_sock *sp = sctp_sk(sk); 6949 6950 params.assoc_value = sp->ep->strreset_enable; 6951 } else { 6952 retval = -EINVAL; 6953 goto out; 6954 } 6955 6956 if (put_user(len, optlen)) 6957 goto out; 6958 6959 if (copy_to_user(optval, ¶ms, len)) 6960 goto out; 6961 6962 retval = 0; 6963 6964 out: 6965 return retval; 6966 } 6967 6968 static int sctp_getsockopt_scheduler(struct sock *sk, int len, 6969 char __user *optval, 6970 int __user *optlen) 6971 { 6972 struct sctp_assoc_value params; 6973 struct sctp_association *asoc; 6974 int retval = -EFAULT; 6975 6976 if (len < sizeof(params)) { 6977 retval = -EINVAL; 6978 goto out; 6979 } 6980 6981 len = sizeof(params); 6982 if (copy_from_user(¶ms, optval, len)) 6983 goto out; 6984 6985 asoc = sctp_id2assoc(sk, params.assoc_id); 6986 if (!asoc) { 6987 retval = -EINVAL; 6988 goto out; 6989 } 6990 6991 params.assoc_value = sctp_sched_get_sched(asoc); 6992 6993 if (put_user(len, optlen)) 6994 goto out; 6995 6996 if (copy_to_user(optval, ¶ms, len)) 6997 goto out; 6998 6999 retval = 0; 7000 7001 out: 7002 return retval; 7003 } 7004 7005 static int sctp_getsockopt_scheduler_value(struct sock *sk, int len, 7006 char __user *optval, 7007 int __user *optlen) 7008 { 7009 struct sctp_stream_value params; 7010 struct sctp_association *asoc; 7011 int retval = -EFAULT; 7012 7013 if (len < sizeof(params)) { 7014 retval = -EINVAL; 7015 goto out; 7016 } 7017 7018 len = sizeof(params); 7019 if (copy_from_user(¶ms, optval, len)) 7020 goto out; 7021 7022 asoc = sctp_id2assoc(sk, params.assoc_id); 7023 if (!asoc) { 7024 retval = -EINVAL; 7025 goto out; 7026 } 7027 7028 retval = sctp_sched_get_value(asoc, params.stream_id, 7029 ¶ms.stream_value); 7030 if (retval) 7031 goto out; 7032 7033 if (put_user(len, optlen)) { 7034 retval = -EFAULT; 7035 goto out; 7036 } 7037 7038 if (copy_to_user(optval, ¶ms, len)) { 7039 retval = -EFAULT; 7040 goto out; 7041 } 7042 7043 out: 7044 return retval; 7045 } 7046 7047 static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len, 7048 char __user *optval, 7049 int __user *optlen) 7050 { 7051 struct sctp_assoc_value params; 7052 struct sctp_association *asoc; 7053 int retval = -EFAULT; 7054 7055 if (len < sizeof(params)) { 7056 retval = -EINVAL; 7057 goto out; 7058 } 7059 7060 len = sizeof(params); 7061 if (copy_from_user(¶ms, optval, len)) 7062 goto out; 7063 7064 asoc = sctp_id2assoc(sk, params.assoc_id); 7065 if (asoc) { 7066 params.assoc_value = asoc->intl_enable; 7067 } else if (!params.assoc_id) { 7068 struct sctp_sock *sp = sctp_sk(sk); 7069 7070 params.assoc_value = sp->strm_interleave; 7071 } else { 7072 retval = -EINVAL; 7073 goto out; 7074 } 7075 7076 if (put_user(len, optlen)) 7077 goto out; 7078 7079 if (copy_to_user(optval, ¶ms, len)) 7080 goto out; 7081 7082 retval = 0; 7083 7084 out: 7085 return retval; 7086 } 7087 7088 static int sctp_getsockopt(struct sock *sk, int level, int optname, 7089 char __user *optval, int __user *optlen) 7090 { 7091 int retval = 0; 7092 int len; 7093 7094 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); 7095 7096 /* I can hardly begin to describe how wrong this is. This is 7097 * so broken as to be worse than useless. The API draft 7098 * REALLY is NOT helpful here... I am not convinced that the 7099 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP 7100 * are at all well-founded. 7101 */ 7102 if (level != SOL_SCTP) { 7103 struct sctp_af *af = sctp_sk(sk)->pf->af; 7104 7105 retval = af->getsockopt(sk, level, optname, optval, optlen); 7106 return retval; 7107 } 7108 7109 if (get_user(len, optlen)) 7110 return -EFAULT; 7111 7112 if (len < 0) 7113 return -EINVAL; 7114 7115 lock_sock(sk); 7116 7117 switch (optname) { 7118 case SCTP_STATUS: 7119 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); 7120 break; 7121 case SCTP_DISABLE_FRAGMENTS: 7122 retval = sctp_getsockopt_disable_fragments(sk, len, optval, 7123 optlen); 7124 break; 7125 case SCTP_EVENTS: 7126 retval = sctp_getsockopt_events(sk, len, optval, optlen); 7127 break; 7128 case SCTP_AUTOCLOSE: 7129 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); 7130 break; 7131 case SCTP_SOCKOPT_PEELOFF: 7132 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); 7133 break; 7134 case SCTP_SOCKOPT_PEELOFF_FLAGS: 7135 retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen); 7136 break; 7137 case SCTP_PEER_ADDR_PARAMS: 7138 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 7139 optlen); 7140 break; 7141 case SCTP_DELAYED_SACK: 7142 retval = sctp_getsockopt_delayed_ack(sk, len, optval, 7143 optlen); 7144 break; 7145 case SCTP_INITMSG: 7146 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); 7147 break; 7148 case SCTP_GET_PEER_ADDRS: 7149 retval = sctp_getsockopt_peer_addrs(sk, len, optval, 7150 optlen); 7151 break; 7152 case SCTP_GET_LOCAL_ADDRS: 7153 retval = sctp_getsockopt_local_addrs(sk, len, optval, 7154 optlen); 7155 break; 7156 case SCTP_SOCKOPT_CONNECTX3: 7157 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); 7158 break; 7159 case SCTP_DEFAULT_SEND_PARAM: 7160 retval = sctp_getsockopt_default_send_param(sk, len, 7161 optval, optlen); 7162 break; 7163 case SCTP_DEFAULT_SNDINFO: 7164 retval = sctp_getsockopt_default_sndinfo(sk, len, 7165 optval, optlen); 7166 break; 7167 case SCTP_PRIMARY_ADDR: 7168 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); 7169 break; 7170 case SCTP_NODELAY: 7171 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); 7172 break; 7173 case SCTP_RTOINFO: 7174 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); 7175 break; 7176 case SCTP_ASSOCINFO: 7177 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); 7178 break; 7179 case SCTP_I_WANT_MAPPED_V4_ADDR: 7180 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); 7181 break; 7182 case SCTP_MAXSEG: 7183 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); 7184 break; 7185 case SCTP_GET_PEER_ADDR_INFO: 7186 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 7187 optlen); 7188 break; 7189 case SCTP_ADAPTATION_LAYER: 7190 retval = sctp_getsockopt_adaptation_layer(sk, len, optval, 7191 optlen); 7192 break; 7193 case SCTP_CONTEXT: 7194 retval = sctp_getsockopt_context(sk, len, optval, optlen); 7195 break; 7196 case SCTP_FRAGMENT_INTERLEAVE: 7197 retval = sctp_getsockopt_fragment_interleave(sk, len, optval, 7198 optlen); 7199 break; 7200 case SCTP_PARTIAL_DELIVERY_POINT: 7201 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, 7202 optlen); 7203 break; 7204 case SCTP_MAX_BURST: 7205 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); 7206 break; 7207 case SCTP_AUTH_KEY: 7208 case SCTP_AUTH_CHUNK: 7209 case SCTP_AUTH_DELETE_KEY: 7210 retval = -EOPNOTSUPP; 7211 break; 7212 case SCTP_HMAC_IDENT: 7213 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); 7214 break; 7215 case SCTP_AUTH_ACTIVE_KEY: 7216 retval = sctp_getsockopt_active_key(sk, len, optval, optlen); 7217 break; 7218 case SCTP_PEER_AUTH_CHUNKS: 7219 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, 7220 optlen); 7221 break; 7222 case SCTP_LOCAL_AUTH_CHUNKS: 7223 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, 7224 optlen); 7225 break; 7226 case SCTP_GET_ASSOC_NUMBER: 7227 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 7228 break; 7229 case SCTP_GET_ASSOC_ID_LIST: 7230 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 7231 break; 7232 case SCTP_AUTO_ASCONF: 7233 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); 7234 break; 7235 case SCTP_PEER_ADDR_THLDS: 7236 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 7237 break; 7238 case SCTP_GET_ASSOC_STATS: 7239 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); 7240 break; 7241 case SCTP_RECVRCVINFO: 7242 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); 7243 break; 7244 case SCTP_RECVNXTINFO: 7245 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); 7246 break; 7247 case SCTP_PR_SUPPORTED: 7248 retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen); 7249 break; 7250 case SCTP_DEFAULT_PRINFO: 7251 retval = sctp_getsockopt_default_prinfo(sk, len, optval, 7252 optlen); 7253 break; 7254 case SCTP_PR_ASSOC_STATUS: 7255 retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, 7256 optlen); 7257 break; 7258 case SCTP_PR_STREAM_STATUS: 7259 retval = sctp_getsockopt_pr_streamstatus(sk, len, optval, 7260 optlen); 7261 break; 7262 case SCTP_RECONFIG_SUPPORTED: 7263 retval = sctp_getsockopt_reconfig_supported(sk, len, optval, 7264 optlen); 7265 break; 7266 case SCTP_ENABLE_STREAM_RESET: 7267 retval = sctp_getsockopt_enable_strreset(sk, len, optval, 7268 optlen); 7269 break; 7270 case SCTP_STREAM_SCHEDULER: 7271 retval = sctp_getsockopt_scheduler(sk, len, optval, 7272 optlen); 7273 break; 7274 case SCTP_STREAM_SCHEDULER_VALUE: 7275 retval = sctp_getsockopt_scheduler_value(sk, len, optval, 7276 optlen); 7277 break; 7278 case SCTP_INTERLEAVING_SUPPORTED: 7279 retval = sctp_getsockopt_interleaving_supported(sk, len, optval, 7280 optlen); 7281 break; 7282 default: 7283 retval = -ENOPROTOOPT; 7284 break; 7285 } 7286 7287 release_sock(sk); 7288 return retval; 7289 } 7290 7291 static int sctp_hash(struct sock *sk) 7292 { 7293 /* STUB */ 7294 return 0; 7295 } 7296 7297 static void sctp_unhash(struct sock *sk) 7298 { 7299 /* STUB */ 7300 } 7301 7302 /* Check if port is acceptable. Possibly find first available port. 7303 * 7304 * The port hash table (contained in the 'global' SCTP protocol storage 7305 * returned by struct sctp_protocol *sctp_get_protocol()). The hash 7306 * table is an array of 4096 lists (sctp_bind_hashbucket). Each 7307 * list (the list number is the port number hashed out, so as you 7308 * would expect from a hash function, all the ports in a given list have 7309 * such a number that hashes out to the same list number; you were 7310 * expecting that, right?); so each list has a set of ports, with a 7311 * link to the socket (struct sock) that uses it, the port number and 7312 * a fastreuse flag (FIXME: NPI ipg). 7313 */ 7314 static struct sctp_bind_bucket *sctp_bucket_create( 7315 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 7316 7317 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 7318 { 7319 struct sctp_bind_hashbucket *head; /* hash list */ 7320 struct sctp_bind_bucket *pp; 7321 unsigned short snum; 7322 int ret; 7323 7324 snum = ntohs(addr->v4.sin_port); 7325 7326 pr_debug("%s: begins, snum:%d\n", __func__, snum); 7327 7328 local_bh_disable(); 7329 7330 if (snum == 0) { 7331 /* Search for an available port. */ 7332 int low, high, remaining, index; 7333 unsigned int rover; 7334 struct net *net = sock_net(sk); 7335 7336 inet_get_local_port_range(net, &low, &high); 7337 remaining = (high - low) + 1; 7338 rover = prandom_u32() % remaining + low; 7339 7340 do { 7341 rover++; 7342 if ((rover < low) || (rover > high)) 7343 rover = low; 7344 if (inet_is_local_reserved_port(net, rover)) 7345 continue; 7346 index = sctp_phashfn(sock_net(sk), rover); 7347 head = &sctp_port_hashtable[index]; 7348 spin_lock(&head->lock); 7349 sctp_for_each_hentry(pp, &head->chain) 7350 if ((pp->port == rover) && 7351 net_eq(sock_net(sk), pp->net)) 7352 goto next; 7353 break; 7354 next: 7355 spin_unlock(&head->lock); 7356 } while (--remaining > 0); 7357 7358 /* Exhausted local port range during search? */ 7359 ret = 1; 7360 if (remaining <= 0) 7361 goto fail; 7362 7363 /* OK, here is the one we will use. HEAD (the port 7364 * hash table list entry) is non-NULL and we hold it's 7365 * mutex. 7366 */ 7367 snum = rover; 7368 } else { 7369 /* We are given an specific port number; we verify 7370 * that it is not being used. If it is used, we will 7371 * exahust the search in the hash list corresponding 7372 * to the port number (snum) - we detect that with the 7373 * port iterator, pp being NULL. 7374 */ 7375 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; 7376 spin_lock(&head->lock); 7377 sctp_for_each_hentry(pp, &head->chain) { 7378 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) 7379 goto pp_found; 7380 } 7381 } 7382 pp = NULL; 7383 goto pp_not_found; 7384 pp_found: 7385 if (!hlist_empty(&pp->owner)) { 7386 /* We had a port hash table hit - there is an 7387 * available port (pp != NULL) and it is being 7388 * used by other socket (pp->owner not empty); that other 7389 * socket is going to be sk2. 7390 */ 7391 int reuse = sk->sk_reuse; 7392 struct sock *sk2; 7393 7394 pr_debug("%s: found a possible match\n", __func__); 7395 7396 if (pp->fastreuse && sk->sk_reuse && 7397 sk->sk_state != SCTP_SS_LISTENING) 7398 goto success; 7399 7400 /* Run through the list of sockets bound to the port 7401 * (pp->port) [via the pointers bind_next and 7402 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, 7403 * we get the endpoint they describe and run through 7404 * the endpoint's list of IP (v4 or v6) addresses, 7405 * comparing each of the addresses with the address of 7406 * the socket sk. If we find a match, then that means 7407 * that this port/socket (sk) combination are already 7408 * in an endpoint. 7409 */ 7410 sk_for_each_bound(sk2, &pp->owner) { 7411 struct sctp_endpoint *ep2; 7412 ep2 = sctp_sk(sk2)->ep; 7413 7414 if (sk == sk2 || 7415 (reuse && sk2->sk_reuse && 7416 sk2->sk_state != SCTP_SS_LISTENING)) 7417 continue; 7418 7419 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, 7420 sctp_sk(sk2), sctp_sk(sk))) { 7421 ret = (long)sk2; 7422 goto fail_unlock; 7423 } 7424 } 7425 7426 pr_debug("%s: found a match\n", __func__); 7427 } 7428 pp_not_found: 7429 /* If there was a hash table miss, create a new port. */ 7430 ret = 1; 7431 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) 7432 goto fail_unlock; 7433 7434 /* In either case (hit or miss), make sure fastreuse is 1 only 7435 * if sk->sk_reuse is too (that is, if the caller requested 7436 * SO_REUSEADDR on this socket -sk-). 7437 */ 7438 if (hlist_empty(&pp->owner)) { 7439 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) 7440 pp->fastreuse = 1; 7441 else 7442 pp->fastreuse = 0; 7443 } else if (pp->fastreuse && 7444 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) 7445 pp->fastreuse = 0; 7446 7447 /* We are set, so fill up all the data in the hash table 7448 * entry, tie the socket list information with the rest of the 7449 * sockets FIXME: Blurry, NPI (ipg). 7450 */ 7451 success: 7452 if (!sctp_sk(sk)->bind_hash) { 7453 inet_sk(sk)->inet_num = snum; 7454 sk_add_bind_node(sk, &pp->owner); 7455 sctp_sk(sk)->bind_hash = pp; 7456 } 7457 ret = 0; 7458 7459 fail_unlock: 7460 spin_unlock(&head->lock); 7461 7462 fail: 7463 local_bh_enable(); 7464 return ret; 7465 } 7466 7467 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral 7468 * port is requested. 7469 */ 7470 static int sctp_get_port(struct sock *sk, unsigned short snum) 7471 { 7472 union sctp_addr addr; 7473 struct sctp_af *af = sctp_sk(sk)->pf->af; 7474 7475 /* Set up a dummy address struct from the sk. */ 7476 af->from_sk(&addr, sk); 7477 addr.v4.sin_port = htons(snum); 7478 7479 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 7480 return !!sctp_get_port_local(sk, &addr); 7481 } 7482 7483 /* 7484 * Move a socket to LISTENING state. 7485 */ 7486 static int sctp_listen_start(struct sock *sk, int backlog) 7487 { 7488 struct sctp_sock *sp = sctp_sk(sk); 7489 struct sctp_endpoint *ep = sp->ep; 7490 struct crypto_shash *tfm = NULL; 7491 char alg[32]; 7492 7493 /* Allocate HMAC for generating cookie. */ 7494 if (!sp->hmac && sp->sctp_hmac_alg) { 7495 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); 7496 tfm = crypto_alloc_shash(alg, 0, 0); 7497 if (IS_ERR(tfm)) { 7498 net_info_ratelimited("failed to load transform for %s: %ld\n", 7499 sp->sctp_hmac_alg, PTR_ERR(tfm)); 7500 return -ENOSYS; 7501 } 7502 sctp_sk(sk)->hmac = tfm; 7503 } 7504 7505 /* 7506 * If a bind() or sctp_bindx() is not called prior to a listen() 7507 * call that allows new associations to be accepted, the system 7508 * picks an ephemeral port and will choose an address set equivalent 7509 * to binding with a wildcard address. 7510 * 7511 * This is not currently spelled out in the SCTP sockets 7512 * extensions draft, but follows the practice as seen in TCP 7513 * sockets. 7514 * 7515 */ 7516 inet_sk_set_state(sk, SCTP_SS_LISTENING); 7517 if (!ep->base.bind_addr.port) { 7518 if (sctp_autobind(sk)) 7519 return -EAGAIN; 7520 } else { 7521 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { 7522 inet_sk_set_state(sk, SCTP_SS_CLOSED); 7523 return -EADDRINUSE; 7524 } 7525 } 7526 7527 sk->sk_max_ack_backlog = backlog; 7528 sctp_hash_endpoint(ep); 7529 return 0; 7530 } 7531 7532 /* 7533 * 4.1.3 / 5.1.3 listen() 7534 * 7535 * By default, new associations are not accepted for UDP style sockets. 7536 * An application uses listen() to mark a socket as being able to 7537 * accept new associations. 7538 * 7539 * On TCP style sockets, applications use listen() to ready the SCTP 7540 * endpoint for accepting inbound associations. 7541 * 7542 * On both types of endpoints a backlog of '0' disables listening. 7543 * 7544 * Move a socket to LISTENING state. 7545 */ 7546 int sctp_inet_listen(struct socket *sock, int backlog) 7547 { 7548 struct sock *sk = sock->sk; 7549 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 7550 int err = -EINVAL; 7551 7552 if (unlikely(backlog < 0)) 7553 return err; 7554 7555 lock_sock(sk); 7556 7557 /* Peeled-off sockets are not allowed to listen(). */ 7558 if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) 7559 goto out; 7560 7561 if (sock->state != SS_UNCONNECTED) 7562 goto out; 7563 7564 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) 7565 goto out; 7566 7567 /* If backlog is zero, disable listening. */ 7568 if (!backlog) { 7569 if (sctp_sstate(sk, CLOSED)) 7570 goto out; 7571 7572 err = 0; 7573 sctp_unhash_endpoint(ep); 7574 sk->sk_state = SCTP_SS_CLOSED; 7575 if (sk->sk_reuse) 7576 sctp_sk(sk)->bind_hash->fastreuse = 1; 7577 goto out; 7578 } 7579 7580 /* If we are already listening, just update the backlog */ 7581 if (sctp_sstate(sk, LISTENING)) 7582 sk->sk_max_ack_backlog = backlog; 7583 else { 7584 err = sctp_listen_start(sk, backlog); 7585 if (err) 7586 goto out; 7587 } 7588 7589 err = 0; 7590 out: 7591 release_sock(sk); 7592 return err; 7593 } 7594 7595 /* 7596 * This function is done by modeling the current datagram_poll() and the 7597 * tcp_poll(). Note that, based on these implementations, we don't 7598 * lock the socket in this function, even though it seems that, 7599 * ideally, locking or some other mechanisms can be used to ensure 7600 * the integrity of the counters (sndbuf and wmem_alloc) used 7601 * in this place. We assume that we don't need locks either until proven 7602 * otherwise. 7603 * 7604 * Another thing to note is that we include the Async I/O support 7605 * here, again, by modeling the current TCP/UDP code. We don't have 7606 * a good way to test with it yet. 7607 */ 7608 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) 7609 { 7610 struct sock *sk = sock->sk; 7611 struct sctp_sock *sp = sctp_sk(sk); 7612 unsigned int mask; 7613 7614 poll_wait(file, sk_sleep(sk), wait); 7615 7616 sock_rps_record_flow(sk); 7617 7618 /* A TCP-style listening socket becomes readable when the accept queue 7619 * is not empty. 7620 */ 7621 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 7622 return (!list_empty(&sp->ep->asocs)) ? 7623 (POLLIN | POLLRDNORM) : 0; 7624 7625 mask = 0; 7626 7627 /* Is there any exceptional events? */ 7628 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 7629 mask |= POLLERR | 7630 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 7631 if (sk->sk_shutdown & RCV_SHUTDOWN) 7632 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 7633 if (sk->sk_shutdown == SHUTDOWN_MASK) 7634 mask |= POLLHUP; 7635 7636 /* Is it readable? Reconsider this code with TCP-style support. */ 7637 if (!skb_queue_empty(&sk->sk_receive_queue)) 7638 mask |= POLLIN | POLLRDNORM; 7639 7640 /* The association is either gone or not ready. */ 7641 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) 7642 return mask; 7643 7644 /* Is it writable? */ 7645 if (sctp_writeable(sk)) { 7646 mask |= POLLOUT | POLLWRNORM; 7647 } else { 7648 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 7649 /* 7650 * Since the socket is not locked, the buffer 7651 * might be made available after the writeable check and 7652 * before the bit is set. This could cause a lost I/O 7653 * signal. tcp_poll() has a race breaker for this race 7654 * condition. Based on their implementation, we put 7655 * in the following code to cover it as well. 7656 */ 7657 if (sctp_writeable(sk)) 7658 mask |= POLLOUT | POLLWRNORM; 7659 } 7660 return mask; 7661 } 7662 7663 /******************************************************************** 7664 * 2nd Level Abstractions 7665 ********************************************************************/ 7666 7667 static struct sctp_bind_bucket *sctp_bucket_create( 7668 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) 7669 { 7670 struct sctp_bind_bucket *pp; 7671 7672 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); 7673 if (pp) { 7674 SCTP_DBG_OBJCNT_INC(bind_bucket); 7675 pp->port = snum; 7676 pp->fastreuse = 0; 7677 INIT_HLIST_HEAD(&pp->owner); 7678 pp->net = net; 7679 hlist_add_head(&pp->node, &head->chain); 7680 } 7681 return pp; 7682 } 7683 7684 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 7685 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 7686 { 7687 if (pp && hlist_empty(&pp->owner)) { 7688 __hlist_del(&pp->node); 7689 kmem_cache_free(sctp_bucket_cachep, pp); 7690 SCTP_DBG_OBJCNT_DEC(bind_bucket); 7691 } 7692 } 7693 7694 /* Release this socket's reference to a local port. */ 7695 static inline void __sctp_put_port(struct sock *sk) 7696 { 7697 struct sctp_bind_hashbucket *head = 7698 &sctp_port_hashtable[sctp_phashfn(sock_net(sk), 7699 inet_sk(sk)->inet_num)]; 7700 struct sctp_bind_bucket *pp; 7701 7702 spin_lock(&head->lock); 7703 pp = sctp_sk(sk)->bind_hash; 7704 __sk_del_bind_node(sk); 7705 sctp_sk(sk)->bind_hash = NULL; 7706 inet_sk(sk)->inet_num = 0; 7707 sctp_bucket_destroy(pp); 7708 spin_unlock(&head->lock); 7709 } 7710 7711 void sctp_put_port(struct sock *sk) 7712 { 7713 local_bh_disable(); 7714 __sctp_put_port(sk); 7715 local_bh_enable(); 7716 } 7717 7718 /* 7719 * The system picks an ephemeral port and choose an address set equivalent 7720 * to binding with a wildcard address. 7721 * One of those addresses will be the primary address for the association. 7722 * This automatically enables the multihoming capability of SCTP. 7723 */ 7724 static int sctp_autobind(struct sock *sk) 7725 { 7726 union sctp_addr autoaddr; 7727 struct sctp_af *af; 7728 __be16 port; 7729 7730 /* Initialize a local sockaddr structure to INADDR_ANY. */ 7731 af = sctp_sk(sk)->pf->af; 7732 7733 port = htons(inet_sk(sk)->inet_num); 7734 af->inaddr_any(&autoaddr, port); 7735 7736 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); 7737 } 7738 7739 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. 7740 * 7741 * From RFC 2292 7742 * 4.2 The cmsghdr Structure * 7743 * 7744 * When ancillary data is sent or received, any number of ancillary data 7745 * objects can be specified by the msg_control and msg_controllen members of 7746 * the msghdr structure, because each object is preceded by 7747 * a cmsghdr structure defining the object's length (the cmsg_len member). 7748 * Historically Berkeley-derived implementations have passed only one object 7749 * at a time, but this API allows multiple objects to be 7750 * passed in a single call to sendmsg() or recvmsg(). The following example 7751 * shows two ancillary data objects in a control buffer. 7752 * 7753 * |<--------------------------- msg_controllen -------------------------->| 7754 * | | 7755 * 7756 * |<----- ancillary data object ----->|<----- ancillary data object ----->| 7757 * 7758 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| 7759 * | | | 7760 * 7761 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | 7762 * 7763 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | 7764 * | | | | | 7765 * 7766 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 7767 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| 7768 * 7769 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| 7770 * 7771 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ 7772 * ^ 7773 * | 7774 * 7775 * msg_control 7776 * points here 7777 */ 7778 static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs) 7779 { 7780 struct msghdr *my_msg = (struct msghdr *)msg; 7781 struct cmsghdr *cmsg; 7782 7783 for_each_cmsghdr(cmsg, my_msg) { 7784 if (!CMSG_OK(my_msg, cmsg)) 7785 return -EINVAL; 7786 7787 /* Should we parse this header or ignore? */ 7788 if (cmsg->cmsg_level != IPPROTO_SCTP) 7789 continue; 7790 7791 /* Strictly check lengths following example in SCM code. */ 7792 switch (cmsg->cmsg_type) { 7793 case SCTP_INIT: 7794 /* SCTP Socket API Extension 7795 * 5.3.1 SCTP Initiation Structure (SCTP_INIT) 7796 * 7797 * This cmsghdr structure provides information for 7798 * initializing new SCTP associations with sendmsg(). 7799 * The SCTP_INITMSG socket option uses this same data 7800 * structure. This structure is not used for 7801 * recvmsg(). 7802 * 7803 * cmsg_level cmsg_type cmsg_data[] 7804 * ------------ ------------ ---------------------- 7805 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg 7806 */ 7807 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) 7808 return -EINVAL; 7809 7810 cmsgs->init = CMSG_DATA(cmsg); 7811 break; 7812 7813 case SCTP_SNDRCV: 7814 /* SCTP Socket API Extension 7815 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) 7816 * 7817 * This cmsghdr structure specifies SCTP options for 7818 * sendmsg() and describes SCTP header information 7819 * about a received message through recvmsg(). 7820 * 7821 * cmsg_level cmsg_type cmsg_data[] 7822 * ------------ ------------ ---------------------- 7823 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo 7824 */ 7825 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) 7826 return -EINVAL; 7827 7828 cmsgs->srinfo = CMSG_DATA(cmsg); 7829 7830 if (cmsgs->srinfo->sinfo_flags & 7831 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 7832 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | 7833 SCTP_ABORT | SCTP_EOF)) 7834 return -EINVAL; 7835 break; 7836 7837 case SCTP_SNDINFO: 7838 /* SCTP Socket API Extension 7839 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) 7840 * 7841 * This cmsghdr structure specifies SCTP options for 7842 * sendmsg(). This structure and SCTP_RCVINFO replaces 7843 * SCTP_SNDRCV which has been deprecated. 7844 * 7845 * cmsg_level cmsg_type cmsg_data[] 7846 * ------------ ------------ --------------------- 7847 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo 7848 */ 7849 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) 7850 return -EINVAL; 7851 7852 cmsgs->sinfo = CMSG_DATA(cmsg); 7853 7854 if (cmsgs->sinfo->snd_flags & 7855 ~(SCTP_UNORDERED | SCTP_ADDR_OVER | 7856 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | 7857 SCTP_ABORT | SCTP_EOF)) 7858 return -EINVAL; 7859 break; 7860 default: 7861 return -EINVAL; 7862 } 7863 } 7864 7865 return 0; 7866 } 7867 7868 /* 7869 * Wait for a packet.. 7870 * Note: This function is the same function as in core/datagram.c 7871 * with a few modifications to make lksctp work. 7872 */ 7873 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) 7874 { 7875 int error; 7876 DEFINE_WAIT(wait); 7877 7878 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 7879 7880 /* Socket errors? */ 7881 error = sock_error(sk); 7882 if (error) 7883 goto out; 7884 7885 if (!skb_queue_empty(&sk->sk_receive_queue)) 7886 goto ready; 7887 7888 /* Socket shut down? */ 7889 if (sk->sk_shutdown & RCV_SHUTDOWN) 7890 goto out; 7891 7892 /* Sequenced packets can come disconnected. If so we report the 7893 * problem. 7894 */ 7895 error = -ENOTCONN; 7896 7897 /* Is there a good reason to think that we may receive some data? */ 7898 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) 7899 goto out; 7900 7901 /* Handle signals. */ 7902 if (signal_pending(current)) 7903 goto interrupted; 7904 7905 /* Let another process have a go. Since we are going to sleep 7906 * anyway. Note: This may cause odd behaviors if the message 7907 * does not fit in the user's buffer, but this seems to be the 7908 * only way to honor MSG_DONTWAIT realistically. 7909 */ 7910 release_sock(sk); 7911 *timeo_p = schedule_timeout(*timeo_p); 7912 lock_sock(sk); 7913 7914 ready: 7915 finish_wait(sk_sleep(sk), &wait); 7916 return 0; 7917 7918 interrupted: 7919 error = sock_intr_errno(*timeo_p); 7920 7921 out: 7922 finish_wait(sk_sleep(sk), &wait); 7923 *err = error; 7924 return error; 7925 } 7926 7927 /* Receive a datagram. 7928 * Note: This is pretty much the same routine as in core/datagram.c 7929 * with a few changes to make lksctp work. 7930 */ 7931 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, 7932 int noblock, int *err) 7933 { 7934 int error; 7935 struct sk_buff *skb; 7936 long timeo; 7937 7938 timeo = sock_rcvtimeo(sk, noblock); 7939 7940 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, 7941 MAX_SCHEDULE_TIMEOUT); 7942 7943 do { 7944 /* Again only user level code calls this function, 7945 * so nothing interrupt level 7946 * will suddenly eat the receive_queue. 7947 * 7948 * Look at current nfs client by the way... 7949 * However, this function was correct in any case. 8) 7950 */ 7951 if (flags & MSG_PEEK) { 7952 skb = skb_peek(&sk->sk_receive_queue); 7953 if (skb) 7954 refcount_inc(&skb->users); 7955 } else { 7956 skb = __skb_dequeue(&sk->sk_receive_queue); 7957 } 7958 7959 if (skb) 7960 return skb; 7961 7962 /* Caller is allowed not to check sk->sk_err before calling. */ 7963 error = sock_error(sk); 7964 if (error) 7965 goto no_packet; 7966 7967 if (sk->sk_shutdown & RCV_SHUTDOWN) 7968 break; 7969 7970 if (sk_can_busy_loop(sk)) { 7971 sk_busy_loop(sk, noblock); 7972 7973 if (!skb_queue_empty(&sk->sk_receive_queue)) 7974 continue; 7975 } 7976 7977 /* User doesn't want to wait. */ 7978 error = -EAGAIN; 7979 if (!timeo) 7980 goto no_packet; 7981 } while (sctp_wait_for_packet(sk, err, &timeo) == 0); 7982 7983 return NULL; 7984 7985 no_packet: 7986 *err = error; 7987 return NULL; 7988 } 7989 7990 /* If sndbuf has changed, wake up per association sndbuf waiters. */ 7991 static void __sctp_write_space(struct sctp_association *asoc) 7992 { 7993 struct sock *sk = asoc->base.sk; 7994 7995 if (sctp_wspace(asoc) <= 0) 7996 return; 7997 7998 if (waitqueue_active(&asoc->wait)) 7999 wake_up_interruptible(&asoc->wait); 8000 8001 if (sctp_writeable(sk)) { 8002 struct socket_wq *wq; 8003 8004 rcu_read_lock(); 8005 wq = rcu_dereference(sk->sk_wq); 8006 if (wq) { 8007 if (waitqueue_active(&wq->wait)) 8008 wake_up_interruptible(&wq->wait); 8009 8010 /* Note that we try to include the Async I/O support 8011 * here by modeling from the current TCP/UDP code. 8012 * We have not tested with it yet. 8013 */ 8014 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 8015 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 8016 } 8017 rcu_read_unlock(); 8018 } 8019 } 8020 8021 static void sctp_wake_up_waiters(struct sock *sk, 8022 struct sctp_association *asoc) 8023 { 8024 struct sctp_association *tmp = asoc; 8025 8026 /* We do accounting for the sndbuf space per association, 8027 * so we only need to wake our own association. 8028 */ 8029 if (asoc->ep->sndbuf_policy) 8030 return __sctp_write_space(asoc); 8031 8032 /* If association goes down and is just flushing its 8033 * outq, then just normally notify others. 8034 */ 8035 if (asoc->base.dead) 8036 return sctp_write_space(sk); 8037 8038 /* Accounting for the sndbuf space is per socket, so we 8039 * need to wake up others, try to be fair and in case of 8040 * other associations, let them have a go first instead 8041 * of just doing a sctp_write_space() call. 8042 * 8043 * Note that we reach sctp_wake_up_waiters() only when 8044 * associations free up queued chunks, thus we are under 8045 * lock and the list of associations on a socket is 8046 * guaranteed not to change. 8047 */ 8048 for (tmp = list_next_entry(tmp, asocs); 1; 8049 tmp = list_next_entry(tmp, asocs)) { 8050 /* Manually skip the head element. */ 8051 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) 8052 continue; 8053 /* Wake up association. */ 8054 __sctp_write_space(tmp); 8055 /* We've reached the end. */ 8056 if (tmp == asoc) 8057 break; 8058 } 8059 } 8060 8061 /* Do accounting for the sndbuf space. 8062 * Decrement the used sndbuf space of the corresponding association by the 8063 * data size which was just transmitted(freed). 8064 */ 8065 static void sctp_wfree(struct sk_buff *skb) 8066 { 8067 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; 8068 struct sctp_association *asoc = chunk->asoc; 8069 struct sock *sk = asoc->base.sk; 8070 8071 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + 8072 sizeof(struct sk_buff) + 8073 sizeof(struct sctp_chunk); 8074 8075 WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc)); 8076 8077 /* 8078 * This undoes what is done via sctp_set_owner_w and sk_mem_charge 8079 */ 8080 sk->sk_wmem_queued -= skb->truesize; 8081 sk_mem_uncharge(sk, skb->truesize); 8082 8083 sock_wfree(skb); 8084 sctp_wake_up_waiters(sk, asoc); 8085 8086 sctp_association_put(asoc); 8087 } 8088 8089 /* Do accounting for the receive space on the socket. 8090 * Accounting for the association is done in ulpevent.c 8091 * We set this as a destructor for the cloned data skbs so that 8092 * accounting is done at the correct time. 8093 */ 8094 void sctp_sock_rfree(struct sk_buff *skb) 8095 { 8096 struct sock *sk = skb->sk; 8097 struct sctp_ulpevent *event = sctp_skb2event(skb); 8098 8099 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 8100 8101 /* 8102 * Mimic the behavior of sock_rfree 8103 */ 8104 sk_mem_uncharge(sk, event->rmem_len); 8105 } 8106 8107 8108 /* Helper function to wait for space in the sndbuf. */ 8109 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 8110 size_t msg_len, struct sock **orig_sk) 8111 { 8112 struct sock *sk = asoc->base.sk; 8113 int err = 0; 8114 long current_timeo = *timeo_p; 8115 DEFINE_WAIT(wait); 8116 8117 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, 8118 *timeo_p, msg_len); 8119 8120 /* Increment the association's refcnt. */ 8121 sctp_association_hold(asoc); 8122 8123 /* Wait on the association specific sndbuf space. */ 8124 for (;;) { 8125 prepare_to_wait_exclusive(&asoc->wait, &wait, 8126 TASK_INTERRUPTIBLE); 8127 if (asoc->base.dead) 8128 goto do_dead; 8129 if (!*timeo_p) 8130 goto do_nonblock; 8131 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING) 8132 goto do_error; 8133 if (signal_pending(current)) 8134 goto do_interrupted; 8135 if (msg_len <= sctp_wspace(asoc)) 8136 break; 8137 8138 /* Let another process have a go. Since we are going 8139 * to sleep anyway. 8140 */ 8141 release_sock(sk); 8142 current_timeo = schedule_timeout(current_timeo); 8143 lock_sock(sk); 8144 if (sk != asoc->base.sk) { 8145 release_sock(sk); 8146 sk = asoc->base.sk; 8147 lock_sock(sk); 8148 } 8149 8150 *timeo_p = current_timeo; 8151 } 8152 8153 out: 8154 *orig_sk = sk; 8155 finish_wait(&asoc->wait, &wait); 8156 8157 /* Release the association's refcnt. */ 8158 sctp_association_put(asoc); 8159 8160 return err; 8161 8162 do_dead: 8163 err = -ESRCH; 8164 goto out; 8165 8166 do_error: 8167 err = -EPIPE; 8168 goto out; 8169 8170 do_interrupted: 8171 err = sock_intr_errno(*timeo_p); 8172 goto out; 8173 8174 do_nonblock: 8175 err = -EAGAIN; 8176 goto out; 8177 } 8178 8179 void sctp_data_ready(struct sock *sk) 8180 { 8181 struct socket_wq *wq; 8182 8183 rcu_read_lock(); 8184 wq = rcu_dereference(sk->sk_wq); 8185 if (skwq_has_sleeper(wq)) 8186 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 8187 POLLRDNORM | POLLRDBAND); 8188 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 8189 rcu_read_unlock(); 8190 } 8191 8192 /* If socket sndbuf has changed, wake up all per association waiters. */ 8193 void sctp_write_space(struct sock *sk) 8194 { 8195 struct sctp_association *asoc; 8196 8197 /* Wake up the tasks in each wait queue. */ 8198 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { 8199 __sctp_write_space(asoc); 8200 } 8201 } 8202 8203 /* Is there any sndbuf space available on the socket? 8204 * 8205 * Note that sk_wmem_alloc is the sum of the send buffers on all of the 8206 * associations on the same socket. For a UDP-style socket with 8207 * multiple associations, it is possible for it to be "unwriteable" 8208 * prematurely. I assume that this is acceptable because 8209 * a premature "unwriteable" is better than an accidental "writeable" which 8210 * would cause an unwanted block under certain circumstances. For the 1-1 8211 * UDP-style sockets or TCP-style sockets, this code should work. 8212 * - Daisy 8213 */ 8214 static int sctp_writeable(struct sock *sk) 8215 { 8216 int amt = 0; 8217 8218 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 8219 if (amt < 0) 8220 amt = 0; 8221 return amt; 8222 } 8223 8224 /* Wait for an association to go into ESTABLISHED state. If timeout is 0, 8225 * returns immediately with EINPROGRESS. 8226 */ 8227 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) 8228 { 8229 struct sock *sk = asoc->base.sk; 8230 int err = 0; 8231 long current_timeo = *timeo_p; 8232 DEFINE_WAIT(wait); 8233 8234 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); 8235 8236 /* Increment the association's refcnt. */ 8237 sctp_association_hold(asoc); 8238 8239 for (;;) { 8240 prepare_to_wait_exclusive(&asoc->wait, &wait, 8241 TASK_INTERRUPTIBLE); 8242 if (!*timeo_p) 8243 goto do_nonblock; 8244 if (sk->sk_shutdown & RCV_SHUTDOWN) 8245 break; 8246 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || 8247 asoc->base.dead) 8248 goto do_error; 8249 if (signal_pending(current)) 8250 goto do_interrupted; 8251 8252 if (sctp_state(asoc, ESTABLISHED)) 8253 break; 8254 8255 /* Let another process have a go. Since we are going 8256 * to sleep anyway. 8257 */ 8258 release_sock(sk); 8259 current_timeo = schedule_timeout(current_timeo); 8260 lock_sock(sk); 8261 8262 *timeo_p = current_timeo; 8263 } 8264 8265 out: 8266 finish_wait(&asoc->wait, &wait); 8267 8268 /* Release the association's refcnt. */ 8269 sctp_association_put(asoc); 8270 8271 return err; 8272 8273 do_error: 8274 if (asoc->init_err_counter + 1 > asoc->max_init_attempts) 8275 err = -ETIMEDOUT; 8276 else 8277 err = -ECONNREFUSED; 8278 goto out; 8279 8280 do_interrupted: 8281 err = sock_intr_errno(*timeo_p); 8282 goto out; 8283 8284 do_nonblock: 8285 err = -EINPROGRESS; 8286 goto out; 8287 } 8288 8289 static int sctp_wait_for_accept(struct sock *sk, long timeo) 8290 { 8291 struct sctp_endpoint *ep; 8292 int err = 0; 8293 DEFINE_WAIT(wait); 8294 8295 ep = sctp_sk(sk)->ep; 8296 8297 8298 for (;;) { 8299 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 8300 TASK_INTERRUPTIBLE); 8301 8302 if (list_empty(&ep->asocs)) { 8303 release_sock(sk); 8304 timeo = schedule_timeout(timeo); 8305 lock_sock(sk); 8306 } 8307 8308 err = -EINVAL; 8309 if (!sctp_sstate(sk, LISTENING)) 8310 break; 8311 8312 err = 0; 8313 if (!list_empty(&ep->asocs)) 8314 break; 8315 8316 err = sock_intr_errno(timeo); 8317 if (signal_pending(current)) 8318 break; 8319 8320 err = -EAGAIN; 8321 if (!timeo) 8322 break; 8323 } 8324 8325 finish_wait(sk_sleep(sk), &wait); 8326 8327 return err; 8328 } 8329 8330 static void sctp_wait_for_close(struct sock *sk, long timeout) 8331 { 8332 DEFINE_WAIT(wait); 8333 8334 do { 8335 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 8336 if (list_empty(&sctp_sk(sk)->ep->asocs)) 8337 break; 8338 release_sock(sk); 8339 timeout = schedule_timeout(timeout); 8340 lock_sock(sk); 8341 } while (!signal_pending(current) && timeout); 8342 8343 finish_wait(sk_sleep(sk), &wait); 8344 } 8345 8346 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 8347 { 8348 struct sk_buff *frag; 8349 8350 if (!skb->data_len) 8351 goto done; 8352 8353 /* Don't forget the fragments. */ 8354 skb_walk_frags(skb, frag) 8355 sctp_skb_set_owner_r_frag(frag, sk); 8356 8357 done: 8358 sctp_skb_set_owner_r(skb, sk); 8359 } 8360 8361 void sctp_copy_sock(struct sock *newsk, struct sock *sk, 8362 struct sctp_association *asoc) 8363 { 8364 struct inet_sock *inet = inet_sk(sk); 8365 struct inet_sock *newinet; 8366 8367 newsk->sk_type = sk->sk_type; 8368 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 8369 newsk->sk_flags = sk->sk_flags; 8370 newsk->sk_tsflags = sk->sk_tsflags; 8371 newsk->sk_no_check_tx = sk->sk_no_check_tx; 8372 newsk->sk_no_check_rx = sk->sk_no_check_rx; 8373 newsk->sk_reuse = sk->sk_reuse; 8374 8375 newsk->sk_shutdown = sk->sk_shutdown; 8376 newsk->sk_destruct = sctp_destruct_sock; 8377 newsk->sk_family = sk->sk_family; 8378 newsk->sk_protocol = IPPROTO_SCTP; 8379 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; 8380 newsk->sk_sndbuf = sk->sk_sndbuf; 8381 newsk->sk_rcvbuf = sk->sk_rcvbuf; 8382 newsk->sk_lingertime = sk->sk_lingertime; 8383 newsk->sk_rcvtimeo = sk->sk_rcvtimeo; 8384 newsk->sk_sndtimeo = sk->sk_sndtimeo; 8385 newsk->sk_rxhash = sk->sk_rxhash; 8386 8387 newinet = inet_sk(newsk); 8388 8389 /* Initialize sk's sport, dport, rcv_saddr and daddr for 8390 * getsockname() and getpeername() 8391 */ 8392 newinet->inet_sport = inet->inet_sport; 8393 newinet->inet_saddr = inet->inet_saddr; 8394 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 8395 newinet->inet_dport = htons(asoc->peer.port); 8396 newinet->pmtudisc = inet->pmtudisc; 8397 newinet->inet_id = asoc->next_tsn ^ jiffies; 8398 8399 newinet->uc_ttl = inet->uc_ttl; 8400 newinet->mc_loop = 1; 8401 newinet->mc_ttl = 1; 8402 newinet->mc_index = 0; 8403 newinet->mc_list = NULL; 8404 8405 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 8406 net_enable_timestamp(); 8407 8408 security_sk_clone(sk, newsk); 8409 } 8410 8411 static inline void sctp_copy_descendant(struct sock *sk_to, 8412 const struct sock *sk_from) 8413 { 8414 int ancestor_size = sizeof(struct inet_sock) + 8415 sizeof(struct sctp_sock) - 8416 offsetof(struct sctp_sock, auto_asconf_list); 8417 8418 if (sk_from->sk_family == PF_INET6) 8419 ancestor_size += sizeof(struct ipv6_pinfo); 8420 8421 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); 8422 } 8423 8424 /* Populate the fields of the newsk from the oldsk and migrate the assoc 8425 * and its messages to the newsk. 8426 */ 8427 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, 8428 struct sctp_association *assoc, 8429 enum sctp_socket_type type) 8430 { 8431 struct sctp_sock *oldsp = sctp_sk(oldsk); 8432 struct sctp_sock *newsp = sctp_sk(newsk); 8433 struct sctp_bind_bucket *pp; /* hash list port iterator */ 8434 struct sctp_endpoint *newep = newsp->ep; 8435 struct sk_buff *skb, *tmp; 8436 struct sctp_ulpevent *event; 8437 struct sctp_bind_hashbucket *head; 8438 8439 /* Migrate socket buffer sizes and all the socket level options to the 8440 * new socket. 8441 */ 8442 newsk->sk_sndbuf = oldsk->sk_sndbuf; 8443 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 8444 /* Brute force copy old sctp opt. */ 8445 sctp_copy_descendant(newsk, oldsk); 8446 8447 /* Restore the ep value that was overwritten with the above structure 8448 * copy. 8449 */ 8450 newsp->ep = newep; 8451 newsp->hmac = NULL; 8452 8453 /* Hook this new socket in to the bind_hash list. */ 8454 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), 8455 inet_sk(oldsk)->inet_num)]; 8456 spin_lock_bh(&head->lock); 8457 pp = sctp_sk(oldsk)->bind_hash; 8458 sk_add_bind_node(newsk, &pp->owner); 8459 sctp_sk(newsk)->bind_hash = pp; 8460 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; 8461 spin_unlock_bh(&head->lock); 8462 8463 /* Copy the bind_addr list from the original endpoint to the new 8464 * endpoint so that we can handle restarts properly 8465 */ 8466 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, 8467 &oldsp->ep->base.bind_addr, GFP_KERNEL); 8468 8469 /* Move any messages in the old socket's receive queue that are for the 8470 * peeled off association to the new socket's receive queue. 8471 */ 8472 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 8473 event = sctp_skb2event(skb); 8474 if (event->asoc == assoc) { 8475 __skb_unlink(skb, &oldsk->sk_receive_queue); 8476 __skb_queue_tail(&newsk->sk_receive_queue, skb); 8477 sctp_skb_set_owner_r_frag(skb, newsk); 8478 } 8479 } 8480 8481 /* Clean up any messages pending delivery due to partial 8482 * delivery. Three cases: 8483 * 1) No partial deliver; no work. 8484 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 8485 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 8486 */ 8487 skb_queue_head_init(&newsp->pd_lobby); 8488 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 8489 8490 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 8491 struct sk_buff_head *queue; 8492 8493 /* Decide which queue to move pd_lobby skbs to. */ 8494 if (assoc->ulpq.pd_mode) { 8495 queue = &newsp->pd_lobby; 8496 } else 8497 queue = &newsk->sk_receive_queue; 8498 8499 /* Walk through the pd_lobby, looking for skbs that 8500 * need moved to the new socket. 8501 */ 8502 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 8503 event = sctp_skb2event(skb); 8504 if (event->asoc == assoc) { 8505 __skb_unlink(skb, &oldsp->pd_lobby); 8506 __skb_queue_tail(queue, skb); 8507 sctp_skb_set_owner_r_frag(skb, newsk); 8508 } 8509 } 8510 8511 /* Clear up any skbs waiting for the partial 8512 * delivery to finish. 8513 */ 8514 if (assoc->ulpq.pd_mode) 8515 sctp_clear_pd(oldsk, NULL); 8516 8517 } 8518 8519 sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag); 8520 8521 /* Set the type of socket to indicate that it is peeled off from the 8522 * original UDP-style socket or created with the accept() call on a 8523 * TCP-style socket.. 8524 */ 8525 newsp->type = type; 8526 8527 /* Mark the new socket "in-use" by the user so that any packets 8528 * that may arrive on the association after we've moved it are 8529 * queued to the backlog. This prevents a potential race between 8530 * backlog processing on the old socket and new-packet processing 8531 * on the new socket. 8532 * 8533 * The caller has just allocated newsk so we can guarantee that other 8534 * paths won't try to lock it and then oldsk. 8535 */ 8536 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 8537 sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); 8538 sctp_assoc_migrate(assoc, newsk); 8539 sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); 8540 8541 /* If the association on the newsk is already closed before accept() 8542 * is called, set RCV_SHUTDOWN flag. 8543 */ 8544 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) { 8545 inet_sk_set_state(newsk, SCTP_SS_CLOSED); 8546 newsk->sk_shutdown |= RCV_SHUTDOWN; 8547 } else { 8548 inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED); 8549 } 8550 8551 release_sock(newsk); 8552 } 8553 8554 8555 /* This proto struct describes the ULP interface for SCTP. */ 8556 struct proto sctp_prot = { 8557 .name = "SCTP", 8558 .owner = THIS_MODULE, 8559 .close = sctp_close, 8560 .connect = sctp_connect, 8561 .disconnect = sctp_disconnect, 8562 .accept = sctp_accept, 8563 .ioctl = sctp_ioctl, 8564 .init = sctp_init_sock, 8565 .destroy = sctp_destroy_sock, 8566 .shutdown = sctp_shutdown, 8567 .setsockopt = sctp_setsockopt, 8568 .getsockopt = sctp_getsockopt, 8569 .sendmsg = sctp_sendmsg, 8570 .recvmsg = sctp_recvmsg, 8571 .bind = sctp_bind, 8572 .backlog_rcv = sctp_backlog_rcv, 8573 .hash = sctp_hash, 8574 .unhash = sctp_unhash, 8575 .get_port = sctp_get_port, 8576 .obj_size = sizeof(struct sctp_sock), 8577 .sysctl_mem = sysctl_sctp_mem, 8578 .sysctl_rmem = sysctl_sctp_rmem, 8579 .sysctl_wmem = sysctl_sctp_wmem, 8580 .memory_pressure = &sctp_memory_pressure, 8581 .enter_memory_pressure = sctp_enter_memory_pressure, 8582 .memory_allocated = &sctp_memory_allocated, 8583 .sockets_allocated = &sctp_sockets_allocated, 8584 }; 8585 8586 #if IS_ENABLED(CONFIG_IPV6) 8587 8588 #include <net/transp_v6.h> 8589 static void sctp_v6_destroy_sock(struct sock *sk) 8590 { 8591 sctp_destroy_sock(sk); 8592 inet6_destroy_sock(sk); 8593 } 8594 8595 struct proto sctpv6_prot = { 8596 .name = "SCTPv6", 8597 .owner = THIS_MODULE, 8598 .close = sctp_close, 8599 .connect = sctp_connect, 8600 .disconnect = sctp_disconnect, 8601 .accept = sctp_accept, 8602 .ioctl = sctp_ioctl, 8603 .init = sctp_init_sock, 8604 .destroy = sctp_v6_destroy_sock, 8605 .shutdown = sctp_shutdown, 8606 .setsockopt = sctp_setsockopt, 8607 .getsockopt = sctp_getsockopt, 8608 .sendmsg = sctp_sendmsg, 8609 .recvmsg = sctp_recvmsg, 8610 .bind = sctp_bind, 8611 .backlog_rcv = sctp_backlog_rcv, 8612 .hash = sctp_hash, 8613 .unhash = sctp_unhash, 8614 .get_port = sctp_get_port, 8615 .obj_size = sizeof(struct sctp6_sock), 8616 .sysctl_mem = sysctl_sctp_mem, 8617 .sysctl_rmem = sysctl_sctp_rmem, 8618 .sysctl_wmem = sysctl_sctp_wmem, 8619 .memory_pressure = &sctp_memory_pressure, 8620 .enter_memory_pressure = sctp_enter_memory_pressure, 8621 .memory_allocated = &sctp_memory_allocated, 8622 .sockets_allocated = &sctp_sockets_allocated, 8623 }; 8624 #endif /* IS_ENABLED(CONFIG_IPV6) */ 8625