1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic socket support routines. Memory allocators, socket lock/release 8 * handler for protocols to use and generic option handler. 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 */ 85 86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 87 88 #include <asm/unaligned.h> 89 #include <linux/capability.h> 90 #include <linux/errno.h> 91 #include <linux/errqueue.h> 92 #include <linux/types.h> 93 #include <linux/socket.h> 94 #include <linux/in.h> 95 #include <linux/kernel.h> 96 #include <linux/module.h> 97 #include <linux/proc_fs.h> 98 #include <linux/seq_file.h> 99 #include <linux/sched.h> 100 #include <linux/sched/mm.h> 101 #include <linux/timer.h> 102 #include <linux/string.h> 103 #include <linux/sockios.h> 104 #include <linux/net.h> 105 #include <linux/mm.h> 106 #include <linux/slab.h> 107 #include <linux/interrupt.h> 108 #include <linux/poll.h> 109 #include <linux/tcp.h> 110 #include <linux/init.h> 111 #include <linux/highmem.h> 112 #include <linux/user_namespace.h> 113 #include <linux/static_key.h> 114 #include <linux/memcontrol.h> 115 #include <linux/prefetch.h> 116 117 #include <linux/uaccess.h> 118 119 #include <linux/netdevice.h> 120 #include <net/protocol.h> 121 #include <linux/skbuff.h> 122 #include <net/net_namespace.h> 123 #include <net/request_sock.h> 124 #include <net/sock.h> 125 #include <linux/net_tstamp.h> 126 #include <net/xfrm.h> 127 #include <linux/ipsec.h> 128 #include <net/cls_cgroup.h> 129 #include <net/netprio_cgroup.h> 130 #include <linux/sock_diag.h> 131 132 #include <linux/filter.h> 133 #include <net/sock_reuseport.h> 134 #include <net/bpf_sk_storage.h> 135 136 #include <trace/events/sock.h> 137 138 #include <net/tcp.h> 139 #include <net/busy_poll.h> 140 141 static DEFINE_MUTEX(proto_list_mutex); 142 static LIST_HEAD(proto_list); 143 144 static void sock_inuse_add(struct net *net, int val); 145 146 /** 147 * sk_ns_capable - General socket capability test 148 * @sk: Socket to use a capability on or through 149 * @user_ns: The user namespace of the capability to use 150 * @cap: The capability to use 151 * 152 * Test to see if the opener of the socket had when the socket was 153 * created and the current process has the capability @cap in the user 154 * namespace @user_ns. 155 */ 156 bool sk_ns_capable(const struct sock *sk, 157 struct user_namespace *user_ns, int cap) 158 { 159 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && 160 ns_capable(user_ns, cap); 161 } 162 EXPORT_SYMBOL(sk_ns_capable); 163 164 /** 165 * sk_capable - Socket global capability test 166 * @sk: Socket to use a capability on or through 167 * @cap: The global capability to use 168 * 169 * Test to see if the opener of the socket had when the socket was 170 * created and the current process has the capability @cap in all user 171 * namespaces. 172 */ 173 bool sk_capable(const struct sock *sk, int cap) 174 { 175 return sk_ns_capable(sk, &init_user_ns, cap); 176 } 177 EXPORT_SYMBOL(sk_capable); 178 179 /** 180 * sk_net_capable - Network namespace socket capability test 181 * @sk: Socket to use a capability on or through 182 * @cap: The capability to use 183 * 184 * Test to see if the opener of the socket had when the socket was created 185 * and the current process has the capability @cap over the network namespace 186 * the socket is a member of. 187 */ 188 bool sk_net_capable(const struct sock *sk, int cap) 189 { 190 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); 191 } 192 EXPORT_SYMBOL(sk_net_capable); 193 194 /* 195 * Each address family might have different locking rules, so we have 196 * one slock key per address family and separate keys for internal and 197 * userspace sockets. 198 */ 199 static struct lock_class_key af_family_keys[AF_MAX]; 200 static struct lock_class_key af_family_kern_keys[AF_MAX]; 201 static struct lock_class_key af_family_slock_keys[AF_MAX]; 202 static struct lock_class_key af_family_kern_slock_keys[AF_MAX]; 203 204 /* 205 * Make lock validator output more readable. (we pre-construct these 206 * strings build-time, so that runtime initialization of socket 207 * locks is fast): 208 */ 209 210 #define _sock_locks(x) \ 211 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \ 212 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \ 213 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \ 214 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \ 215 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \ 216 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \ 217 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \ 218 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \ 219 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \ 220 x "27" , x "28" , x "AF_CAN" , \ 221 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \ 222 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \ 223 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \ 224 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \ 225 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \ 226 x "AF_MAX" 227 228 static const char *const af_family_key_strings[AF_MAX+1] = { 229 _sock_locks("sk_lock-") 230 }; 231 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 232 _sock_locks("slock-") 233 }; 234 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 235 _sock_locks("clock-") 236 }; 237 238 static const char *const af_family_kern_key_strings[AF_MAX+1] = { 239 _sock_locks("k-sk_lock-") 240 }; 241 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = { 242 _sock_locks("k-slock-") 243 }; 244 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { 245 _sock_locks("k-clock-") 246 }; 247 static const char *const af_family_rlock_key_strings[AF_MAX+1] = { 248 _sock_locks("rlock-") 249 }; 250 static const char *const af_family_wlock_key_strings[AF_MAX+1] = { 251 _sock_locks("wlock-") 252 }; 253 static const char *const af_family_elock_key_strings[AF_MAX+1] = { 254 _sock_locks("elock-") 255 }; 256 257 /* 258 * sk_callback_lock and sk queues locking rules are per-address-family, 259 * so split the lock classes by using a per-AF key: 260 */ 261 static struct lock_class_key af_callback_keys[AF_MAX]; 262 static struct lock_class_key af_rlock_keys[AF_MAX]; 263 static struct lock_class_key af_wlock_keys[AF_MAX]; 264 static struct lock_class_key af_elock_keys[AF_MAX]; 265 static struct lock_class_key af_kern_callback_keys[AF_MAX]; 266 267 /* Run time adjustable parameters. */ 268 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 269 EXPORT_SYMBOL(sysctl_wmem_max); 270 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 271 EXPORT_SYMBOL(sysctl_rmem_max); 272 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 273 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 274 275 /* Maximal space eaten by iovec or ancillary data plus some space */ 276 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 277 EXPORT_SYMBOL(sysctl_optmem_max); 278 279 int sysctl_tstamp_allow_data __read_mostly = 1; 280 281 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); 282 EXPORT_SYMBOL_GPL(memalloc_socks_key); 283 284 /** 285 * sk_set_memalloc - sets %SOCK_MEMALLOC 286 * @sk: socket to set it on 287 * 288 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 289 * It's the responsibility of the admin to adjust min_free_kbytes 290 * to meet the requirements 291 */ 292 void sk_set_memalloc(struct sock *sk) 293 { 294 sock_set_flag(sk, SOCK_MEMALLOC); 295 sk->sk_allocation |= __GFP_MEMALLOC; 296 static_branch_inc(&memalloc_socks_key); 297 } 298 EXPORT_SYMBOL_GPL(sk_set_memalloc); 299 300 void sk_clear_memalloc(struct sock *sk) 301 { 302 sock_reset_flag(sk, SOCK_MEMALLOC); 303 sk->sk_allocation &= ~__GFP_MEMALLOC; 304 static_branch_dec(&memalloc_socks_key); 305 306 /* 307 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 308 * progress of swapping. SOCK_MEMALLOC may be cleared while 309 * it has rmem allocations due to the last swapfile being deactivated 310 * but there is a risk that the socket is unusable due to exceeding 311 * the rmem limits. Reclaim the reserves and obey rmem limits again. 312 */ 313 sk_mem_reclaim(sk); 314 } 315 EXPORT_SYMBOL_GPL(sk_clear_memalloc); 316 317 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 318 { 319 int ret; 320 unsigned int noreclaim_flag; 321 322 /* these should have been dropped before queueing */ 323 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 324 325 noreclaim_flag = memalloc_noreclaim_save(); 326 ret = sk->sk_backlog_rcv(sk, skb); 327 memalloc_noreclaim_restore(noreclaim_flag); 328 329 return ret; 330 } 331 EXPORT_SYMBOL(__sk_backlog_rcv); 332 333 static int sock_get_timeout(long timeo, void *optval, bool old_timeval) 334 { 335 struct __kernel_sock_timeval tv; 336 int size; 337 338 if (timeo == MAX_SCHEDULE_TIMEOUT) { 339 tv.tv_sec = 0; 340 tv.tv_usec = 0; 341 } else { 342 tv.tv_sec = timeo / HZ; 343 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; 344 } 345 346 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 347 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; 348 *(struct old_timeval32 *)optval = tv32; 349 return sizeof(tv32); 350 } 351 352 if (old_timeval) { 353 struct __kernel_old_timeval old_tv; 354 old_tv.tv_sec = tv.tv_sec; 355 old_tv.tv_usec = tv.tv_usec; 356 *(struct __kernel_old_timeval *)optval = old_tv; 357 size = sizeof(old_tv); 358 } else { 359 *(struct __kernel_sock_timeval *)optval = tv; 360 size = sizeof(tv); 361 } 362 363 return size; 364 } 365 366 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval) 367 { 368 struct __kernel_sock_timeval tv; 369 370 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 371 struct old_timeval32 tv32; 372 373 if (optlen < sizeof(tv32)) 374 return -EINVAL; 375 376 if (copy_from_user(&tv32, optval, sizeof(tv32))) 377 return -EFAULT; 378 tv.tv_sec = tv32.tv_sec; 379 tv.tv_usec = tv32.tv_usec; 380 } else if (old_timeval) { 381 struct __kernel_old_timeval old_tv; 382 383 if (optlen < sizeof(old_tv)) 384 return -EINVAL; 385 if (copy_from_user(&old_tv, optval, sizeof(old_tv))) 386 return -EFAULT; 387 tv.tv_sec = old_tv.tv_sec; 388 tv.tv_usec = old_tv.tv_usec; 389 } else { 390 if (optlen < sizeof(tv)) 391 return -EINVAL; 392 if (copy_from_user(&tv, optval, sizeof(tv))) 393 return -EFAULT; 394 } 395 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 396 return -EDOM; 397 398 if (tv.tv_sec < 0) { 399 static int warned __read_mostly; 400 401 *timeo_p = 0; 402 if (warned < 10 && net_ratelimit()) { 403 warned++; 404 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 405 __func__, current->comm, task_pid_nr(current)); 406 } 407 return 0; 408 } 409 *timeo_p = MAX_SCHEDULE_TIMEOUT; 410 if (tv.tv_sec == 0 && tv.tv_usec == 0) 411 return 0; 412 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) 413 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ); 414 return 0; 415 } 416 417 static void sock_warn_obsolete_bsdism(const char *name) 418 { 419 static int warned; 420 static char warncomm[TASK_COMM_LEN]; 421 if (strcmp(warncomm, current->comm) && warned < 5) { 422 strcpy(warncomm, current->comm); 423 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", 424 warncomm, name); 425 warned++; 426 } 427 } 428 429 static bool sock_needs_netstamp(const struct sock *sk) 430 { 431 switch (sk->sk_family) { 432 case AF_UNSPEC: 433 case AF_UNIX: 434 return false; 435 default: 436 return true; 437 } 438 } 439 440 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 441 { 442 if (sk->sk_flags & flags) { 443 sk->sk_flags &= ~flags; 444 if (sock_needs_netstamp(sk) && 445 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 446 net_disable_timestamp(); 447 } 448 } 449 450 451 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 452 { 453 unsigned long flags; 454 struct sk_buff_head *list = &sk->sk_receive_queue; 455 456 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 457 atomic_inc(&sk->sk_drops); 458 trace_sock_rcvqueue_full(sk, skb); 459 return -ENOMEM; 460 } 461 462 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 463 atomic_inc(&sk->sk_drops); 464 return -ENOBUFS; 465 } 466 467 skb->dev = NULL; 468 skb_set_owner_r(skb, sk); 469 470 /* we escape from rcu protected region, make sure we dont leak 471 * a norefcounted dst 472 */ 473 skb_dst_force(skb); 474 475 spin_lock_irqsave(&list->lock, flags); 476 sock_skb_set_dropcount(sk, skb); 477 __skb_queue_tail(list, skb); 478 spin_unlock_irqrestore(&list->lock, flags); 479 480 if (!sock_flag(sk, SOCK_DEAD)) 481 sk->sk_data_ready(sk); 482 return 0; 483 } 484 EXPORT_SYMBOL(__sock_queue_rcv_skb); 485 486 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 487 { 488 int err; 489 490 err = sk_filter(sk, skb); 491 if (err) 492 return err; 493 494 return __sock_queue_rcv_skb(sk, skb); 495 } 496 EXPORT_SYMBOL(sock_queue_rcv_skb); 497 498 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 499 const int nested, unsigned int trim_cap, bool refcounted) 500 { 501 int rc = NET_RX_SUCCESS; 502 503 if (sk_filter_trim_cap(sk, skb, trim_cap)) 504 goto discard_and_relse; 505 506 skb->dev = NULL; 507 508 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 509 atomic_inc(&sk->sk_drops); 510 goto discard_and_relse; 511 } 512 if (nested) 513 bh_lock_sock_nested(sk); 514 else 515 bh_lock_sock(sk); 516 if (!sock_owned_by_user(sk)) { 517 /* 518 * trylock + unlock semantics: 519 */ 520 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 521 522 rc = sk_backlog_rcv(sk, skb); 523 524 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 525 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { 526 bh_unlock_sock(sk); 527 atomic_inc(&sk->sk_drops); 528 goto discard_and_relse; 529 } 530 531 bh_unlock_sock(sk); 532 out: 533 if (refcounted) 534 sock_put(sk); 535 return rc; 536 discard_and_relse: 537 kfree_skb(skb); 538 goto out; 539 } 540 EXPORT_SYMBOL(__sk_receive_skb); 541 542 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 543 { 544 struct dst_entry *dst = __sk_dst_get(sk); 545 546 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 547 sk_tx_queue_clear(sk); 548 sk->sk_dst_pending_confirm = 0; 549 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 550 dst_release(dst); 551 return NULL; 552 } 553 554 return dst; 555 } 556 EXPORT_SYMBOL(__sk_dst_check); 557 558 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 559 { 560 struct dst_entry *dst = sk_dst_get(sk); 561 562 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 563 sk_dst_reset(sk); 564 dst_release(dst); 565 return NULL; 566 } 567 568 return dst; 569 } 570 EXPORT_SYMBOL(sk_dst_check); 571 572 static int sock_setbindtodevice_locked(struct sock *sk, int ifindex) 573 { 574 int ret = -ENOPROTOOPT; 575 #ifdef CONFIG_NETDEVICES 576 struct net *net = sock_net(sk); 577 578 /* Sorry... */ 579 ret = -EPERM; 580 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 581 goto out; 582 583 ret = -EINVAL; 584 if (ifindex < 0) 585 goto out; 586 587 sk->sk_bound_dev_if = ifindex; 588 if (sk->sk_prot->rehash) 589 sk->sk_prot->rehash(sk); 590 sk_dst_reset(sk); 591 592 ret = 0; 593 594 out: 595 #endif 596 597 return ret; 598 } 599 600 static int sock_setbindtodevice(struct sock *sk, char __user *optval, 601 int optlen) 602 { 603 int ret = -ENOPROTOOPT; 604 #ifdef CONFIG_NETDEVICES 605 struct net *net = sock_net(sk); 606 char devname[IFNAMSIZ]; 607 int index; 608 609 ret = -EINVAL; 610 if (optlen < 0) 611 goto out; 612 613 /* Bind this socket to a particular device like "eth0", 614 * as specified in the passed interface name. If the 615 * name is "" or the option length is zero the socket 616 * is not bound. 617 */ 618 if (optlen > IFNAMSIZ - 1) 619 optlen = IFNAMSIZ - 1; 620 memset(devname, 0, sizeof(devname)); 621 622 ret = -EFAULT; 623 if (copy_from_user(devname, optval, optlen)) 624 goto out; 625 626 index = 0; 627 if (devname[0] != '\0') { 628 struct net_device *dev; 629 630 rcu_read_lock(); 631 dev = dev_get_by_name_rcu(net, devname); 632 if (dev) 633 index = dev->ifindex; 634 rcu_read_unlock(); 635 ret = -ENODEV; 636 if (!dev) 637 goto out; 638 } 639 640 lock_sock(sk); 641 ret = sock_setbindtodevice_locked(sk, index); 642 release_sock(sk); 643 644 out: 645 #endif 646 647 return ret; 648 } 649 650 static int sock_getbindtodevice(struct sock *sk, char __user *optval, 651 int __user *optlen, int len) 652 { 653 int ret = -ENOPROTOOPT; 654 #ifdef CONFIG_NETDEVICES 655 struct net *net = sock_net(sk); 656 char devname[IFNAMSIZ]; 657 658 if (sk->sk_bound_dev_if == 0) { 659 len = 0; 660 goto zero; 661 } 662 663 ret = -EINVAL; 664 if (len < IFNAMSIZ) 665 goto out; 666 667 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); 668 if (ret) 669 goto out; 670 671 len = strlen(devname) + 1; 672 673 ret = -EFAULT; 674 if (copy_to_user(optval, devname, len)) 675 goto out; 676 677 zero: 678 ret = -EFAULT; 679 if (put_user(len, optlen)) 680 goto out; 681 682 ret = 0; 683 684 out: 685 #endif 686 687 return ret; 688 } 689 690 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 691 { 692 if (valbool) 693 sock_set_flag(sk, bit); 694 else 695 sock_reset_flag(sk, bit); 696 } 697 698 bool sk_mc_loop(struct sock *sk) 699 { 700 if (dev_recursion_level()) 701 return false; 702 if (!sk) 703 return true; 704 switch (sk->sk_family) { 705 case AF_INET: 706 return inet_sk(sk)->mc_loop; 707 #if IS_ENABLED(CONFIG_IPV6) 708 case AF_INET6: 709 return inet6_sk(sk)->mc_loop; 710 #endif 711 } 712 WARN_ON(1); 713 return true; 714 } 715 EXPORT_SYMBOL(sk_mc_loop); 716 717 /* 718 * This is meant for all protocols to use and covers goings on 719 * at the socket level. Everything here is generic. 720 */ 721 722 int sock_setsockopt(struct socket *sock, int level, int optname, 723 char __user *optval, unsigned int optlen) 724 { 725 struct sock_txtime sk_txtime; 726 struct sock *sk = sock->sk; 727 int val; 728 int valbool; 729 struct linger ling; 730 int ret = 0; 731 732 /* 733 * Options without arguments 734 */ 735 736 if (optname == SO_BINDTODEVICE) 737 return sock_setbindtodevice(sk, optval, optlen); 738 739 if (optlen < sizeof(int)) 740 return -EINVAL; 741 742 if (get_user(val, (int __user *)optval)) 743 return -EFAULT; 744 745 valbool = val ? 1 : 0; 746 747 lock_sock(sk); 748 749 switch (optname) { 750 case SO_DEBUG: 751 if (val && !capable(CAP_NET_ADMIN)) 752 ret = -EACCES; 753 else 754 sock_valbool_flag(sk, SOCK_DBG, valbool); 755 break; 756 case SO_REUSEADDR: 757 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 758 break; 759 case SO_REUSEPORT: 760 sk->sk_reuseport = valbool; 761 break; 762 case SO_TYPE: 763 case SO_PROTOCOL: 764 case SO_DOMAIN: 765 case SO_ERROR: 766 ret = -ENOPROTOOPT; 767 break; 768 case SO_DONTROUTE: 769 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 770 sk_dst_reset(sk); 771 break; 772 case SO_BROADCAST: 773 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 774 break; 775 case SO_SNDBUF: 776 /* Don't error on this BSD doesn't and if you think 777 * about it this is right. Otherwise apps have to 778 * play 'guess the biggest size' games. RCVBUF/SNDBUF 779 * are treated in BSD as hints 780 */ 781 val = min_t(u32, val, sysctl_wmem_max); 782 set_sndbuf: 783 /* Ensure val * 2 fits into an int, to prevent max_t() 784 * from treating it as a negative value. 785 */ 786 val = min_t(int, val, INT_MAX / 2); 787 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 788 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 789 /* Wake up sending tasks if we upped the value. */ 790 sk->sk_write_space(sk); 791 break; 792 793 case SO_SNDBUFFORCE: 794 if (!capable(CAP_NET_ADMIN)) { 795 ret = -EPERM; 796 break; 797 } 798 799 /* No negative values (to prevent underflow, as val will be 800 * multiplied by 2). 801 */ 802 if (val < 0) 803 val = 0; 804 goto set_sndbuf; 805 806 case SO_RCVBUF: 807 /* Don't error on this BSD doesn't and if you think 808 * about it this is right. Otherwise apps have to 809 * play 'guess the biggest size' games. RCVBUF/SNDBUF 810 * are treated in BSD as hints 811 */ 812 val = min_t(u32, val, sysctl_rmem_max); 813 set_rcvbuf: 814 /* Ensure val * 2 fits into an int, to prevent max_t() 815 * from treating it as a negative value. 816 */ 817 val = min_t(int, val, INT_MAX / 2); 818 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 819 /* 820 * We double it on the way in to account for 821 * "struct sk_buff" etc. overhead. Applications 822 * assume that the SO_RCVBUF setting they make will 823 * allow that much actual data to be received on that 824 * socket. 825 * 826 * Applications are unaware that "struct sk_buff" and 827 * other overheads allocate from the receive buffer 828 * during socket buffer allocation. 829 * 830 * And after considering the possible alternatives, 831 * returning the value we actually used in getsockopt 832 * is the most desirable behavior. 833 */ 834 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); 835 break; 836 837 case SO_RCVBUFFORCE: 838 if (!capable(CAP_NET_ADMIN)) { 839 ret = -EPERM; 840 break; 841 } 842 843 /* No negative values (to prevent underflow, as val will be 844 * multiplied by 2). 845 */ 846 if (val < 0) 847 val = 0; 848 goto set_rcvbuf; 849 850 case SO_KEEPALIVE: 851 if (sk->sk_prot->keepalive) 852 sk->sk_prot->keepalive(sk, valbool); 853 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 854 break; 855 856 case SO_OOBINLINE: 857 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 858 break; 859 860 case SO_NO_CHECK: 861 sk->sk_no_check_tx = valbool; 862 break; 863 864 case SO_PRIORITY: 865 if ((val >= 0 && val <= 6) || 866 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 867 sk->sk_priority = val; 868 else 869 ret = -EPERM; 870 break; 871 872 case SO_LINGER: 873 if (optlen < sizeof(ling)) { 874 ret = -EINVAL; /* 1003.1g */ 875 break; 876 } 877 if (copy_from_user(&ling, optval, sizeof(ling))) { 878 ret = -EFAULT; 879 break; 880 } 881 if (!ling.l_onoff) 882 sock_reset_flag(sk, SOCK_LINGER); 883 else { 884 #if (BITS_PER_LONG == 32) 885 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 886 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 887 else 888 #endif 889 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 890 sock_set_flag(sk, SOCK_LINGER); 891 } 892 break; 893 894 case SO_BSDCOMPAT: 895 sock_warn_obsolete_bsdism("setsockopt"); 896 break; 897 898 case SO_PASSCRED: 899 if (valbool) 900 set_bit(SOCK_PASSCRED, &sock->flags); 901 else 902 clear_bit(SOCK_PASSCRED, &sock->flags); 903 break; 904 905 case SO_TIMESTAMP_OLD: 906 case SO_TIMESTAMP_NEW: 907 case SO_TIMESTAMPNS_OLD: 908 case SO_TIMESTAMPNS_NEW: 909 if (valbool) { 910 if (optname == SO_TIMESTAMP_NEW || optname == SO_TIMESTAMPNS_NEW) 911 sock_set_flag(sk, SOCK_TSTAMP_NEW); 912 else 913 sock_reset_flag(sk, SOCK_TSTAMP_NEW); 914 915 if (optname == SO_TIMESTAMP_OLD || optname == SO_TIMESTAMP_NEW) 916 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 917 else 918 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 919 sock_set_flag(sk, SOCK_RCVTSTAMP); 920 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 921 } else { 922 sock_reset_flag(sk, SOCK_RCVTSTAMP); 923 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 924 sock_reset_flag(sk, SOCK_TSTAMP_NEW); 925 } 926 break; 927 928 case SO_TIMESTAMPING_NEW: 929 sock_set_flag(sk, SOCK_TSTAMP_NEW); 930 /* fall through */ 931 case SO_TIMESTAMPING_OLD: 932 if (val & ~SOF_TIMESTAMPING_MASK) { 933 ret = -EINVAL; 934 break; 935 } 936 937 if (val & SOF_TIMESTAMPING_OPT_ID && 938 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 939 if (sk->sk_protocol == IPPROTO_TCP && 940 sk->sk_type == SOCK_STREAM) { 941 if ((1 << sk->sk_state) & 942 (TCPF_CLOSE | TCPF_LISTEN)) { 943 ret = -EINVAL; 944 break; 945 } 946 sk->sk_tskey = tcp_sk(sk)->snd_una; 947 } else { 948 sk->sk_tskey = 0; 949 } 950 } 951 952 if (val & SOF_TIMESTAMPING_OPT_STATS && 953 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) { 954 ret = -EINVAL; 955 break; 956 } 957 958 sk->sk_tsflags = val; 959 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 960 sock_enable_timestamp(sk, 961 SOCK_TIMESTAMPING_RX_SOFTWARE); 962 else { 963 if (optname == SO_TIMESTAMPING_NEW) 964 sock_reset_flag(sk, SOCK_TSTAMP_NEW); 965 966 sock_disable_timestamp(sk, 967 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 968 } 969 break; 970 971 case SO_RCVLOWAT: 972 if (val < 0) 973 val = INT_MAX; 974 if (sock->ops->set_rcvlowat) 975 ret = sock->ops->set_rcvlowat(sk, val); 976 else 977 sk->sk_rcvlowat = val ? : 1; 978 break; 979 980 case SO_RCVTIMEO_OLD: 981 case SO_RCVTIMEO_NEW: 982 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD); 983 break; 984 985 case SO_SNDTIMEO_OLD: 986 case SO_SNDTIMEO_NEW: 987 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD); 988 break; 989 990 case SO_ATTACH_FILTER: 991 ret = -EINVAL; 992 if (optlen == sizeof(struct sock_fprog)) { 993 struct sock_fprog fprog; 994 995 ret = -EFAULT; 996 if (copy_from_user(&fprog, optval, sizeof(fprog))) 997 break; 998 999 ret = sk_attach_filter(&fprog, sk); 1000 } 1001 break; 1002 1003 case SO_ATTACH_BPF: 1004 ret = -EINVAL; 1005 if (optlen == sizeof(u32)) { 1006 u32 ufd; 1007 1008 ret = -EFAULT; 1009 if (copy_from_user(&ufd, optval, sizeof(ufd))) 1010 break; 1011 1012 ret = sk_attach_bpf(ufd, sk); 1013 } 1014 break; 1015 1016 case SO_ATTACH_REUSEPORT_CBPF: 1017 ret = -EINVAL; 1018 if (optlen == sizeof(struct sock_fprog)) { 1019 struct sock_fprog fprog; 1020 1021 ret = -EFAULT; 1022 if (copy_from_user(&fprog, optval, sizeof(fprog))) 1023 break; 1024 1025 ret = sk_reuseport_attach_filter(&fprog, sk); 1026 } 1027 break; 1028 1029 case SO_ATTACH_REUSEPORT_EBPF: 1030 ret = -EINVAL; 1031 if (optlen == sizeof(u32)) { 1032 u32 ufd; 1033 1034 ret = -EFAULT; 1035 if (copy_from_user(&ufd, optval, sizeof(ufd))) 1036 break; 1037 1038 ret = sk_reuseport_attach_bpf(ufd, sk); 1039 } 1040 break; 1041 1042 case SO_DETACH_FILTER: 1043 ret = sk_detach_filter(sk); 1044 break; 1045 1046 case SO_LOCK_FILTER: 1047 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 1048 ret = -EPERM; 1049 else 1050 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 1051 break; 1052 1053 case SO_PASSSEC: 1054 if (valbool) 1055 set_bit(SOCK_PASSSEC, &sock->flags); 1056 else 1057 clear_bit(SOCK_PASSSEC, &sock->flags); 1058 break; 1059 case SO_MARK: 1060 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1061 ret = -EPERM; 1062 } else if (val != sk->sk_mark) { 1063 sk->sk_mark = val; 1064 sk_dst_reset(sk); 1065 } 1066 break; 1067 1068 case SO_RXQ_OVFL: 1069 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 1070 break; 1071 1072 case SO_WIFI_STATUS: 1073 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 1074 break; 1075 1076 case SO_PEEK_OFF: 1077 if (sock->ops->set_peek_off) 1078 ret = sock->ops->set_peek_off(sk, val); 1079 else 1080 ret = -EOPNOTSUPP; 1081 break; 1082 1083 case SO_NOFCS: 1084 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 1085 break; 1086 1087 case SO_SELECT_ERR_QUEUE: 1088 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 1089 break; 1090 1091 #ifdef CONFIG_NET_RX_BUSY_POLL 1092 case SO_BUSY_POLL: 1093 /* allow unprivileged users to decrease the value */ 1094 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) 1095 ret = -EPERM; 1096 else { 1097 if (val < 0) 1098 ret = -EINVAL; 1099 else 1100 sk->sk_ll_usec = val; 1101 } 1102 break; 1103 #endif 1104 1105 case SO_MAX_PACING_RATE: 1106 { 1107 unsigned long ulval = (val == ~0U) ? ~0UL : val; 1108 1109 if (sizeof(ulval) != sizeof(val) && 1110 optlen >= sizeof(ulval) && 1111 get_user(ulval, (unsigned long __user *)optval)) { 1112 ret = -EFAULT; 1113 break; 1114 } 1115 if (ulval != ~0UL) 1116 cmpxchg(&sk->sk_pacing_status, 1117 SK_PACING_NONE, 1118 SK_PACING_NEEDED); 1119 sk->sk_max_pacing_rate = ulval; 1120 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); 1121 break; 1122 } 1123 case SO_INCOMING_CPU: 1124 sk->sk_incoming_cpu = val; 1125 break; 1126 1127 case SO_CNX_ADVICE: 1128 if (val == 1) 1129 dst_negative_advice(sk); 1130 break; 1131 1132 case SO_ZEROCOPY: 1133 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { 1134 if (!((sk->sk_type == SOCK_STREAM && 1135 sk->sk_protocol == IPPROTO_TCP) || 1136 (sk->sk_type == SOCK_DGRAM && 1137 sk->sk_protocol == IPPROTO_UDP))) 1138 ret = -ENOTSUPP; 1139 } else if (sk->sk_family != PF_RDS) { 1140 ret = -ENOTSUPP; 1141 } 1142 if (!ret) { 1143 if (val < 0 || val > 1) 1144 ret = -EINVAL; 1145 else 1146 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); 1147 } 1148 break; 1149 1150 case SO_TXTIME: 1151 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1152 ret = -EPERM; 1153 } else if (optlen != sizeof(struct sock_txtime)) { 1154 ret = -EINVAL; 1155 } else if (copy_from_user(&sk_txtime, optval, 1156 sizeof(struct sock_txtime))) { 1157 ret = -EFAULT; 1158 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) { 1159 ret = -EINVAL; 1160 } else { 1161 sock_valbool_flag(sk, SOCK_TXTIME, true); 1162 sk->sk_clockid = sk_txtime.clockid; 1163 sk->sk_txtime_deadline_mode = 1164 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); 1165 sk->sk_txtime_report_errors = 1166 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS); 1167 } 1168 break; 1169 1170 case SO_BINDTOIFINDEX: 1171 ret = sock_setbindtodevice_locked(sk, val); 1172 break; 1173 1174 default: 1175 ret = -ENOPROTOOPT; 1176 break; 1177 } 1178 release_sock(sk); 1179 return ret; 1180 } 1181 EXPORT_SYMBOL(sock_setsockopt); 1182 1183 1184 static void cred_to_ucred(struct pid *pid, const struct cred *cred, 1185 struct ucred *ucred) 1186 { 1187 ucred->pid = pid_vnr(pid); 1188 ucred->uid = ucred->gid = -1; 1189 if (cred) { 1190 struct user_namespace *current_ns = current_user_ns(); 1191 1192 ucred->uid = from_kuid_munged(current_ns, cred->euid); 1193 ucred->gid = from_kgid_munged(current_ns, cred->egid); 1194 } 1195 } 1196 1197 static int groups_to_user(gid_t __user *dst, const struct group_info *src) 1198 { 1199 struct user_namespace *user_ns = current_user_ns(); 1200 int i; 1201 1202 for (i = 0; i < src->ngroups; i++) 1203 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i)) 1204 return -EFAULT; 1205 1206 return 0; 1207 } 1208 1209 int sock_getsockopt(struct socket *sock, int level, int optname, 1210 char __user *optval, int __user *optlen) 1211 { 1212 struct sock *sk = sock->sk; 1213 1214 union { 1215 int val; 1216 u64 val64; 1217 unsigned long ulval; 1218 struct linger ling; 1219 struct old_timeval32 tm32; 1220 struct __kernel_old_timeval tm; 1221 struct __kernel_sock_timeval stm; 1222 struct sock_txtime txtime; 1223 } v; 1224 1225 int lv = sizeof(int); 1226 int len; 1227 1228 if (get_user(len, optlen)) 1229 return -EFAULT; 1230 if (len < 0) 1231 return -EINVAL; 1232 1233 memset(&v, 0, sizeof(v)); 1234 1235 switch (optname) { 1236 case SO_DEBUG: 1237 v.val = sock_flag(sk, SOCK_DBG); 1238 break; 1239 1240 case SO_DONTROUTE: 1241 v.val = sock_flag(sk, SOCK_LOCALROUTE); 1242 break; 1243 1244 case SO_BROADCAST: 1245 v.val = sock_flag(sk, SOCK_BROADCAST); 1246 break; 1247 1248 case SO_SNDBUF: 1249 v.val = sk->sk_sndbuf; 1250 break; 1251 1252 case SO_RCVBUF: 1253 v.val = sk->sk_rcvbuf; 1254 break; 1255 1256 case SO_REUSEADDR: 1257 v.val = sk->sk_reuse; 1258 break; 1259 1260 case SO_REUSEPORT: 1261 v.val = sk->sk_reuseport; 1262 break; 1263 1264 case SO_KEEPALIVE: 1265 v.val = sock_flag(sk, SOCK_KEEPOPEN); 1266 break; 1267 1268 case SO_TYPE: 1269 v.val = sk->sk_type; 1270 break; 1271 1272 case SO_PROTOCOL: 1273 v.val = sk->sk_protocol; 1274 break; 1275 1276 case SO_DOMAIN: 1277 v.val = sk->sk_family; 1278 break; 1279 1280 case SO_ERROR: 1281 v.val = -sock_error(sk); 1282 if (v.val == 0) 1283 v.val = xchg(&sk->sk_err_soft, 0); 1284 break; 1285 1286 case SO_OOBINLINE: 1287 v.val = sock_flag(sk, SOCK_URGINLINE); 1288 break; 1289 1290 case SO_NO_CHECK: 1291 v.val = sk->sk_no_check_tx; 1292 break; 1293 1294 case SO_PRIORITY: 1295 v.val = sk->sk_priority; 1296 break; 1297 1298 case SO_LINGER: 1299 lv = sizeof(v.ling); 1300 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1301 v.ling.l_linger = sk->sk_lingertime / HZ; 1302 break; 1303 1304 case SO_BSDCOMPAT: 1305 sock_warn_obsolete_bsdism("getsockopt"); 1306 break; 1307 1308 case SO_TIMESTAMP_OLD: 1309 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1310 !sock_flag(sk, SOCK_TSTAMP_NEW) && 1311 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1312 break; 1313 1314 case SO_TIMESTAMPNS_OLD: 1315 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW); 1316 break; 1317 1318 case SO_TIMESTAMP_NEW: 1319 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW); 1320 break; 1321 1322 case SO_TIMESTAMPNS_NEW: 1323 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW); 1324 break; 1325 1326 case SO_TIMESTAMPING_OLD: 1327 v.val = sk->sk_tsflags; 1328 break; 1329 1330 case SO_RCVTIMEO_OLD: 1331 case SO_RCVTIMEO_NEW: 1332 lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname); 1333 break; 1334 1335 case SO_SNDTIMEO_OLD: 1336 case SO_SNDTIMEO_NEW: 1337 lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname); 1338 break; 1339 1340 case SO_RCVLOWAT: 1341 v.val = sk->sk_rcvlowat; 1342 break; 1343 1344 case SO_SNDLOWAT: 1345 v.val = 1; 1346 break; 1347 1348 case SO_PASSCRED: 1349 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1350 break; 1351 1352 case SO_PEERCRED: 1353 { 1354 struct ucred peercred; 1355 if (len > sizeof(peercred)) 1356 len = sizeof(peercred); 1357 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1358 if (copy_to_user(optval, &peercred, len)) 1359 return -EFAULT; 1360 goto lenout; 1361 } 1362 1363 case SO_PEERGROUPS: 1364 { 1365 int ret, n; 1366 1367 if (!sk->sk_peer_cred) 1368 return -ENODATA; 1369 1370 n = sk->sk_peer_cred->group_info->ngroups; 1371 if (len < n * sizeof(gid_t)) { 1372 len = n * sizeof(gid_t); 1373 return put_user(len, optlen) ? -EFAULT : -ERANGE; 1374 } 1375 len = n * sizeof(gid_t); 1376 1377 ret = groups_to_user((gid_t __user *)optval, 1378 sk->sk_peer_cred->group_info); 1379 if (ret) 1380 return ret; 1381 goto lenout; 1382 } 1383 1384 case SO_PEERNAME: 1385 { 1386 char address[128]; 1387 1388 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2); 1389 if (lv < 0) 1390 return -ENOTCONN; 1391 if (lv < len) 1392 return -EINVAL; 1393 if (copy_to_user(optval, address, len)) 1394 return -EFAULT; 1395 goto lenout; 1396 } 1397 1398 /* Dubious BSD thing... Probably nobody even uses it, but 1399 * the UNIX standard wants it for whatever reason... -DaveM 1400 */ 1401 case SO_ACCEPTCONN: 1402 v.val = sk->sk_state == TCP_LISTEN; 1403 break; 1404 1405 case SO_PASSSEC: 1406 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1407 break; 1408 1409 case SO_PEERSEC: 1410 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1411 1412 case SO_MARK: 1413 v.val = sk->sk_mark; 1414 break; 1415 1416 case SO_RXQ_OVFL: 1417 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1418 break; 1419 1420 case SO_WIFI_STATUS: 1421 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1422 break; 1423 1424 case SO_PEEK_OFF: 1425 if (!sock->ops->set_peek_off) 1426 return -EOPNOTSUPP; 1427 1428 v.val = sk->sk_peek_off; 1429 break; 1430 case SO_NOFCS: 1431 v.val = sock_flag(sk, SOCK_NOFCS); 1432 break; 1433 1434 case SO_BINDTODEVICE: 1435 return sock_getbindtodevice(sk, optval, optlen, len); 1436 1437 case SO_GET_FILTER: 1438 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); 1439 if (len < 0) 1440 return len; 1441 1442 goto lenout; 1443 1444 case SO_LOCK_FILTER: 1445 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 1446 break; 1447 1448 case SO_BPF_EXTENSIONS: 1449 v.val = bpf_tell_extensions(); 1450 break; 1451 1452 case SO_SELECT_ERR_QUEUE: 1453 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 1454 break; 1455 1456 #ifdef CONFIG_NET_RX_BUSY_POLL 1457 case SO_BUSY_POLL: 1458 v.val = sk->sk_ll_usec; 1459 break; 1460 #endif 1461 1462 case SO_MAX_PACING_RATE: 1463 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { 1464 lv = sizeof(v.ulval); 1465 v.ulval = sk->sk_max_pacing_rate; 1466 } else { 1467 /* 32bit version */ 1468 v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U); 1469 } 1470 break; 1471 1472 case SO_INCOMING_CPU: 1473 v.val = sk->sk_incoming_cpu; 1474 break; 1475 1476 case SO_MEMINFO: 1477 { 1478 u32 meminfo[SK_MEMINFO_VARS]; 1479 1480 sk_get_meminfo(sk, meminfo); 1481 1482 len = min_t(unsigned int, len, sizeof(meminfo)); 1483 if (copy_to_user(optval, &meminfo, len)) 1484 return -EFAULT; 1485 1486 goto lenout; 1487 } 1488 1489 #ifdef CONFIG_NET_RX_BUSY_POLL 1490 case SO_INCOMING_NAPI_ID: 1491 v.val = READ_ONCE(sk->sk_napi_id); 1492 1493 /* aggregate non-NAPI IDs down to 0 */ 1494 if (v.val < MIN_NAPI_ID) 1495 v.val = 0; 1496 1497 break; 1498 #endif 1499 1500 case SO_COOKIE: 1501 lv = sizeof(u64); 1502 if (len < lv) 1503 return -EINVAL; 1504 v.val64 = sock_gen_cookie(sk); 1505 break; 1506 1507 case SO_ZEROCOPY: 1508 v.val = sock_flag(sk, SOCK_ZEROCOPY); 1509 break; 1510 1511 case SO_TXTIME: 1512 lv = sizeof(v.txtime); 1513 v.txtime.clockid = sk->sk_clockid; 1514 v.txtime.flags |= sk->sk_txtime_deadline_mode ? 1515 SOF_TXTIME_DEADLINE_MODE : 0; 1516 v.txtime.flags |= sk->sk_txtime_report_errors ? 1517 SOF_TXTIME_REPORT_ERRORS : 0; 1518 break; 1519 1520 case SO_BINDTOIFINDEX: 1521 v.val = sk->sk_bound_dev_if; 1522 break; 1523 1524 default: 1525 /* We implement the SO_SNDLOWAT etc to not be settable 1526 * (1003.1g 7). 1527 */ 1528 return -ENOPROTOOPT; 1529 } 1530 1531 if (len > lv) 1532 len = lv; 1533 if (copy_to_user(optval, &v, len)) 1534 return -EFAULT; 1535 lenout: 1536 if (put_user(len, optlen)) 1537 return -EFAULT; 1538 return 0; 1539 } 1540 1541 /* 1542 * Initialize an sk_lock. 1543 * 1544 * (We also register the sk_lock with the lock validator.) 1545 */ 1546 static inline void sock_lock_init(struct sock *sk) 1547 { 1548 if (sk->sk_kern_sock) 1549 sock_lock_init_class_and_name( 1550 sk, 1551 af_family_kern_slock_key_strings[sk->sk_family], 1552 af_family_kern_slock_keys + sk->sk_family, 1553 af_family_kern_key_strings[sk->sk_family], 1554 af_family_kern_keys + sk->sk_family); 1555 else 1556 sock_lock_init_class_and_name( 1557 sk, 1558 af_family_slock_key_strings[sk->sk_family], 1559 af_family_slock_keys + sk->sk_family, 1560 af_family_key_strings[sk->sk_family], 1561 af_family_keys + sk->sk_family); 1562 } 1563 1564 /* 1565 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1566 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1567 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1568 */ 1569 static void sock_copy(struct sock *nsk, const struct sock *osk) 1570 { 1571 #ifdef CONFIG_SECURITY_NETWORK 1572 void *sptr = nsk->sk_security; 1573 #endif 1574 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1575 1576 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1577 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1578 1579 #ifdef CONFIG_SECURITY_NETWORK 1580 nsk->sk_security = sptr; 1581 security_sk_clone(osk, nsk); 1582 #endif 1583 } 1584 1585 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1586 int family) 1587 { 1588 struct sock *sk; 1589 struct kmem_cache *slab; 1590 1591 slab = prot->slab; 1592 if (slab != NULL) { 1593 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1594 if (!sk) 1595 return sk; 1596 if (priority & __GFP_ZERO) 1597 sk_prot_clear_nulls(sk, prot->obj_size); 1598 } else 1599 sk = kmalloc(prot->obj_size, priority); 1600 1601 if (sk != NULL) { 1602 if (security_sk_alloc(sk, family, priority)) 1603 goto out_free; 1604 1605 if (!try_module_get(prot->owner)) 1606 goto out_free_sec; 1607 sk_tx_queue_clear(sk); 1608 } 1609 1610 return sk; 1611 1612 out_free_sec: 1613 security_sk_free(sk); 1614 out_free: 1615 if (slab != NULL) 1616 kmem_cache_free(slab, sk); 1617 else 1618 kfree(sk); 1619 return NULL; 1620 } 1621 1622 static void sk_prot_free(struct proto *prot, struct sock *sk) 1623 { 1624 struct kmem_cache *slab; 1625 struct module *owner; 1626 1627 owner = prot->owner; 1628 slab = prot->slab; 1629 1630 cgroup_sk_free(&sk->sk_cgrp_data); 1631 mem_cgroup_sk_free(sk); 1632 security_sk_free(sk); 1633 if (slab != NULL) 1634 kmem_cache_free(slab, sk); 1635 else 1636 kfree(sk); 1637 module_put(owner); 1638 } 1639 1640 /** 1641 * sk_alloc - All socket objects are allocated here 1642 * @net: the applicable net namespace 1643 * @family: protocol family 1644 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1645 * @prot: struct proto associated with this new sock instance 1646 * @kern: is this to be a kernel socket? 1647 */ 1648 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1649 struct proto *prot, int kern) 1650 { 1651 struct sock *sk; 1652 1653 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1654 if (sk) { 1655 sk->sk_family = family; 1656 /* 1657 * See comment in struct sock definition to understand 1658 * why we need sk_prot_creator -acme 1659 */ 1660 sk->sk_prot = sk->sk_prot_creator = prot; 1661 sk->sk_kern_sock = kern; 1662 sock_lock_init(sk); 1663 sk->sk_net_refcnt = kern ? 0 : 1; 1664 if (likely(sk->sk_net_refcnt)) { 1665 get_net(net); 1666 sock_inuse_add(net, 1); 1667 } 1668 1669 sock_net_set(sk, net); 1670 refcount_set(&sk->sk_wmem_alloc, 1); 1671 1672 mem_cgroup_sk_alloc(sk); 1673 cgroup_sk_alloc(&sk->sk_cgrp_data); 1674 sock_update_classid(&sk->sk_cgrp_data); 1675 sock_update_netprioidx(&sk->sk_cgrp_data); 1676 } 1677 1678 return sk; 1679 } 1680 EXPORT_SYMBOL(sk_alloc); 1681 1682 /* Sockets having SOCK_RCU_FREE will call this function after one RCU 1683 * grace period. This is the case for UDP sockets and TCP listeners. 1684 */ 1685 static void __sk_destruct(struct rcu_head *head) 1686 { 1687 struct sock *sk = container_of(head, struct sock, sk_rcu); 1688 struct sk_filter *filter; 1689 1690 if (sk->sk_destruct) 1691 sk->sk_destruct(sk); 1692 1693 filter = rcu_dereference_check(sk->sk_filter, 1694 refcount_read(&sk->sk_wmem_alloc) == 0); 1695 if (filter) { 1696 sk_filter_uncharge(sk, filter); 1697 RCU_INIT_POINTER(sk->sk_filter, NULL); 1698 } 1699 if (rcu_access_pointer(sk->sk_reuseport_cb)) 1700 reuseport_detach_sock(sk); 1701 1702 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1703 1704 #ifdef CONFIG_BPF_SYSCALL 1705 bpf_sk_storage_free(sk); 1706 #endif 1707 1708 if (atomic_read(&sk->sk_omem_alloc)) 1709 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1710 __func__, atomic_read(&sk->sk_omem_alloc)); 1711 1712 if (sk->sk_frag.page) { 1713 put_page(sk->sk_frag.page); 1714 sk->sk_frag.page = NULL; 1715 } 1716 1717 if (sk->sk_peer_cred) 1718 put_cred(sk->sk_peer_cred); 1719 put_pid(sk->sk_peer_pid); 1720 if (likely(sk->sk_net_refcnt)) 1721 put_net(sock_net(sk)); 1722 sk_prot_free(sk->sk_prot_creator, sk); 1723 } 1724 1725 void sk_destruct(struct sock *sk) 1726 { 1727 if (sock_flag(sk, SOCK_RCU_FREE)) 1728 call_rcu(&sk->sk_rcu, __sk_destruct); 1729 else 1730 __sk_destruct(&sk->sk_rcu); 1731 } 1732 1733 static void __sk_free(struct sock *sk) 1734 { 1735 if (likely(sk->sk_net_refcnt)) 1736 sock_inuse_add(sock_net(sk), -1); 1737 1738 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) 1739 sock_diag_broadcast_destroy(sk); 1740 else 1741 sk_destruct(sk); 1742 } 1743 1744 void sk_free(struct sock *sk) 1745 { 1746 /* 1747 * We subtract one from sk_wmem_alloc and can know if 1748 * some packets are still in some tx queue. 1749 * If not null, sock_wfree() will call __sk_free(sk) later 1750 */ 1751 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) 1752 __sk_free(sk); 1753 } 1754 EXPORT_SYMBOL(sk_free); 1755 1756 static void sk_init_common(struct sock *sk) 1757 { 1758 skb_queue_head_init(&sk->sk_receive_queue); 1759 skb_queue_head_init(&sk->sk_write_queue); 1760 skb_queue_head_init(&sk->sk_error_queue); 1761 1762 rwlock_init(&sk->sk_callback_lock); 1763 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, 1764 af_rlock_keys + sk->sk_family, 1765 af_family_rlock_key_strings[sk->sk_family]); 1766 lockdep_set_class_and_name(&sk->sk_write_queue.lock, 1767 af_wlock_keys + sk->sk_family, 1768 af_family_wlock_key_strings[sk->sk_family]); 1769 lockdep_set_class_and_name(&sk->sk_error_queue.lock, 1770 af_elock_keys + sk->sk_family, 1771 af_family_elock_key_strings[sk->sk_family]); 1772 lockdep_set_class_and_name(&sk->sk_callback_lock, 1773 af_callback_keys + sk->sk_family, 1774 af_family_clock_key_strings[sk->sk_family]); 1775 } 1776 1777 /** 1778 * sk_clone_lock - clone a socket, and lock its clone 1779 * @sk: the socket to clone 1780 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1781 * 1782 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1783 */ 1784 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1785 { 1786 struct sock *newsk; 1787 bool is_charged = true; 1788 1789 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1790 if (newsk != NULL) { 1791 struct sk_filter *filter; 1792 1793 sock_copy(newsk, sk); 1794 1795 newsk->sk_prot_creator = sk->sk_prot; 1796 1797 /* SANITY */ 1798 if (likely(newsk->sk_net_refcnt)) 1799 get_net(sock_net(newsk)); 1800 sk_node_init(&newsk->sk_node); 1801 sock_lock_init(newsk); 1802 bh_lock_sock(newsk); 1803 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1804 newsk->sk_backlog.len = 0; 1805 1806 atomic_set(&newsk->sk_rmem_alloc, 0); 1807 /* 1808 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1809 */ 1810 refcount_set(&newsk->sk_wmem_alloc, 1); 1811 atomic_set(&newsk->sk_omem_alloc, 0); 1812 sk_init_common(newsk); 1813 1814 newsk->sk_dst_cache = NULL; 1815 newsk->sk_dst_pending_confirm = 0; 1816 newsk->sk_wmem_queued = 0; 1817 newsk->sk_forward_alloc = 0; 1818 atomic_set(&newsk->sk_drops, 0); 1819 newsk->sk_send_head = NULL; 1820 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1821 atomic_set(&newsk->sk_zckey, 0); 1822 1823 sock_reset_flag(newsk, SOCK_DONE); 1824 mem_cgroup_sk_alloc(newsk); 1825 cgroup_sk_alloc(&newsk->sk_cgrp_data); 1826 1827 rcu_read_lock(); 1828 filter = rcu_dereference(sk->sk_filter); 1829 if (filter != NULL) 1830 /* though it's an empty new sock, the charging may fail 1831 * if sysctl_optmem_max was changed between creation of 1832 * original socket and cloning 1833 */ 1834 is_charged = sk_filter_charge(newsk, filter); 1835 RCU_INIT_POINTER(newsk->sk_filter, filter); 1836 rcu_read_unlock(); 1837 1838 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1839 /* We need to make sure that we don't uncharge the new 1840 * socket if we couldn't charge it in the first place 1841 * as otherwise we uncharge the parent's filter. 1842 */ 1843 if (!is_charged) 1844 RCU_INIT_POINTER(newsk->sk_filter, NULL); 1845 sk_free_unlock_clone(newsk); 1846 newsk = NULL; 1847 goto out; 1848 } 1849 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 1850 #ifdef CONFIG_BPF_SYSCALL 1851 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL); 1852 #endif 1853 1854 newsk->sk_err = 0; 1855 newsk->sk_err_soft = 0; 1856 newsk->sk_priority = 0; 1857 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1858 if (likely(newsk->sk_net_refcnt)) 1859 sock_inuse_add(sock_net(newsk), 1); 1860 1861 /* 1862 * Before updating sk_refcnt, we must commit prior changes to memory 1863 * (Documentation/RCU/rculist_nulls.txt for details) 1864 */ 1865 smp_wmb(); 1866 refcount_set(&newsk->sk_refcnt, 2); 1867 1868 /* 1869 * Increment the counter in the same struct proto as the master 1870 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1871 * is the same as sk->sk_prot->socks, as this field was copied 1872 * with memcpy). 1873 * 1874 * This _changes_ the previous behaviour, where 1875 * tcp_create_openreq_child always was incrementing the 1876 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1877 * to be taken into account in all callers. -acme 1878 */ 1879 sk_refcnt_debug_inc(newsk); 1880 sk_set_socket(newsk, NULL); 1881 RCU_INIT_POINTER(newsk->sk_wq, NULL); 1882 1883 if (newsk->sk_prot->sockets_allocated) 1884 sk_sockets_allocated_inc(newsk); 1885 1886 if (sock_needs_netstamp(sk) && 1887 newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1888 net_enable_timestamp(); 1889 } 1890 out: 1891 return newsk; 1892 } 1893 EXPORT_SYMBOL_GPL(sk_clone_lock); 1894 1895 void sk_free_unlock_clone(struct sock *sk) 1896 { 1897 /* It is still raw copy of parent, so invalidate 1898 * destructor and make plain sk_free() */ 1899 sk->sk_destruct = NULL; 1900 bh_unlock_sock(sk); 1901 sk_free(sk); 1902 } 1903 EXPORT_SYMBOL_GPL(sk_free_unlock_clone); 1904 1905 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1906 { 1907 u32 max_segs = 1; 1908 1909 sk_dst_set(sk, dst); 1910 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; 1911 if (sk->sk_route_caps & NETIF_F_GSO) 1912 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1913 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1914 if (sk_can_gso(sk)) { 1915 if (dst->header_len && !xfrm_dst_offload_ok(dst)) { 1916 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1917 } else { 1918 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1919 sk->sk_gso_max_size = dst->dev->gso_max_size; 1920 max_segs = max_t(u32, dst->dev->gso_max_segs, 1); 1921 } 1922 } 1923 sk->sk_gso_max_segs = max_segs; 1924 } 1925 EXPORT_SYMBOL_GPL(sk_setup_caps); 1926 1927 /* 1928 * Simple resource managers for sockets. 1929 */ 1930 1931 1932 /* 1933 * Write buffer destructor automatically called from kfree_skb. 1934 */ 1935 void sock_wfree(struct sk_buff *skb) 1936 { 1937 struct sock *sk = skb->sk; 1938 unsigned int len = skb->truesize; 1939 1940 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1941 /* 1942 * Keep a reference on sk_wmem_alloc, this will be released 1943 * after sk_write_space() call 1944 */ 1945 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); 1946 sk->sk_write_space(sk); 1947 len = 1; 1948 } 1949 /* 1950 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1951 * could not do because of in-flight packets 1952 */ 1953 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) 1954 __sk_free(sk); 1955 } 1956 EXPORT_SYMBOL(sock_wfree); 1957 1958 /* This variant of sock_wfree() is used by TCP, 1959 * since it sets SOCK_USE_WRITE_QUEUE. 1960 */ 1961 void __sock_wfree(struct sk_buff *skb) 1962 { 1963 struct sock *sk = skb->sk; 1964 1965 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) 1966 __sk_free(sk); 1967 } 1968 1969 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 1970 { 1971 skb_orphan(skb); 1972 skb->sk = sk; 1973 #ifdef CONFIG_INET 1974 if (unlikely(!sk_fullsock(sk))) { 1975 skb->destructor = sock_edemux; 1976 sock_hold(sk); 1977 return; 1978 } 1979 #endif 1980 skb->destructor = sock_wfree; 1981 skb_set_hash_from_sk(skb, sk); 1982 /* 1983 * We used to take a refcount on sk, but following operation 1984 * is enough to guarantee sk_free() wont free this sock until 1985 * all in-flight packets are completed 1986 */ 1987 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 1988 } 1989 EXPORT_SYMBOL(skb_set_owner_w); 1990 1991 /* This helper is used by netem, as it can hold packets in its 1992 * delay queue. We want to allow the owner socket to send more 1993 * packets, as if they were already TX completed by a typical driver. 1994 * But we also want to keep skb->sk set because some packet schedulers 1995 * rely on it (sch_fq for example). 1996 */ 1997 void skb_orphan_partial(struct sk_buff *skb) 1998 { 1999 if (skb_is_tcp_pure_ack(skb)) 2000 return; 2001 2002 if (skb->destructor == sock_wfree 2003 #ifdef CONFIG_INET 2004 || skb->destructor == tcp_wfree 2005 #endif 2006 ) { 2007 struct sock *sk = skb->sk; 2008 2009 if (refcount_inc_not_zero(&sk->sk_refcnt)) { 2010 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); 2011 skb->destructor = sock_efree; 2012 } 2013 } else { 2014 skb_orphan(skb); 2015 } 2016 } 2017 EXPORT_SYMBOL(skb_orphan_partial); 2018 2019 /* 2020 * Read buffer destructor automatically called from kfree_skb. 2021 */ 2022 void sock_rfree(struct sk_buff *skb) 2023 { 2024 struct sock *sk = skb->sk; 2025 unsigned int len = skb->truesize; 2026 2027 atomic_sub(len, &sk->sk_rmem_alloc); 2028 sk_mem_uncharge(sk, len); 2029 } 2030 EXPORT_SYMBOL(sock_rfree); 2031 2032 /* 2033 * Buffer destructor for skbs that are not used directly in read or write 2034 * path, e.g. for error handler skbs. Automatically called from kfree_skb. 2035 */ 2036 void sock_efree(struct sk_buff *skb) 2037 { 2038 sock_put(skb->sk); 2039 } 2040 EXPORT_SYMBOL(sock_efree); 2041 2042 kuid_t sock_i_uid(struct sock *sk) 2043 { 2044 kuid_t uid; 2045 2046 read_lock_bh(&sk->sk_callback_lock); 2047 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 2048 read_unlock_bh(&sk->sk_callback_lock); 2049 return uid; 2050 } 2051 EXPORT_SYMBOL(sock_i_uid); 2052 2053 unsigned long sock_i_ino(struct sock *sk) 2054 { 2055 unsigned long ino; 2056 2057 read_lock_bh(&sk->sk_callback_lock); 2058 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 2059 read_unlock_bh(&sk->sk_callback_lock); 2060 return ino; 2061 } 2062 EXPORT_SYMBOL(sock_i_ino); 2063 2064 /* 2065 * Allocate a skb from the socket's send buffer. 2066 */ 2067 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 2068 gfp_t priority) 2069 { 2070 if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 2071 struct sk_buff *skb = alloc_skb(size, priority); 2072 if (skb) { 2073 skb_set_owner_w(skb, sk); 2074 return skb; 2075 } 2076 } 2077 return NULL; 2078 } 2079 EXPORT_SYMBOL(sock_wmalloc); 2080 2081 static void sock_ofree(struct sk_buff *skb) 2082 { 2083 struct sock *sk = skb->sk; 2084 2085 atomic_sub(skb->truesize, &sk->sk_omem_alloc); 2086 } 2087 2088 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, 2089 gfp_t priority) 2090 { 2091 struct sk_buff *skb; 2092 2093 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ 2094 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > 2095 sysctl_optmem_max) 2096 return NULL; 2097 2098 skb = alloc_skb(size, priority); 2099 if (!skb) 2100 return NULL; 2101 2102 atomic_add(skb->truesize, &sk->sk_omem_alloc); 2103 skb->sk = sk; 2104 skb->destructor = sock_ofree; 2105 return skb; 2106 } 2107 2108 /* 2109 * Allocate a memory block from the socket's option memory buffer. 2110 */ 2111 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 2112 { 2113 if ((unsigned int)size <= sysctl_optmem_max && 2114 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 2115 void *mem; 2116 /* First do the add, to avoid the race if kmalloc 2117 * might sleep. 2118 */ 2119 atomic_add(size, &sk->sk_omem_alloc); 2120 mem = kmalloc(size, priority); 2121 if (mem) 2122 return mem; 2123 atomic_sub(size, &sk->sk_omem_alloc); 2124 } 2125 return NULL; 2126 } 2127 EXPORT_SYMBOL(sock_kmalloc); 2128 2129 /* Free an option memory block. Note, we actually want the inline 2130 * here as this allows gcc to detect the nullify and fold away the 2131 * condition entirely. 2132 */ 2133 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, 2134 const bool nullify) 2135 { 2136 if (WARN_ON_ONCE(!mem)) 2137 return; 2138 if (nullify) 2139 kzfree(mem); 2140 else 2141 kfree(mem); 2142 atomic_sub(size, &sk->sk_omem_alloc); 2143 } 2144 2145 void sock_kfree_s(struct sock *sk, void *mem, int size) 2146 { 2147 __sock_kfree_s(sk, mem, size, false); 2148 } 2149 EXPORT_SYMBOL(sock_kfree_s); 2150 2151 void sock_kzfree_s(struct sock *sk, void *mem, int size) 2152 { 2153 __sock_kfree_s(sk, mem, size, true); 2154 } 2155 EXPORT_SYMBOL(sock_kzfree_s); 2156 2157 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 2158 I think, these locks should be removed for datagram sockets. 2159 */ 2160 static long sock_wait_for_wmem(struct sock *sk, long timeo) 2161 { 2162 DEFINE_WAIT(wait); 2163 2164 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2165 for (;;) { 2166 if (!timeo) 2167 break; 2168 if (signal_pending(current)) 2169 break; 2170 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2171 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2172 if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 2173 break; 2174 if (sk->sk_shutdown & SEND_SHUTDOWN) 2175 break; 2176 if (sk->sk_err) 2177 break; 2178 timeo = schedule_timeout(timeo); 2179 } 2180 finish_wait(sk_sleep(sk), &wait); 2181 return timeo; 2182 } 2183 2184 2185 /* 2186 * Generic send/receive buffer handlers 2187 */ 2188 2189 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 2190 unsigned long data_len, int noblock, 2191 int *errcode, int max_page_order) 2192 { 2193 struct sk_buff *skb; 2194 long timeo; 2195 int err; 2196 2197 timeo = sock_sndtimeo(sk, noblock); 2198 for (;;) { 2199 err = sock_error(sk); 2200 if (err != 0) 2201 goto failure; 2202 2203 err = -EPIPE; 2204 if (sk->sk_shutdown & SEND_SHUTDOWN) 2205 goto failure; 2206 2207 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) 2208 break; 2209 2210 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2211 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2212 err = -EAGAIN; 2213 if (!timeo) 2214 goto failure; 2215 if (signal_pending(current)) 2216 goto interrupted; 2217 timeo = sock_wait_for_wmem(sk, timeo); 2218 } 2219 skb = alloc_skb_with_frags(header_len, data_len, max_page_order, 2220 errcode, sk->sk_allocation); 2221 if (skb) 2222 skb_set_owner_w(skb, sk); 2223 return skb; 2224 2225 interrupted: 2226 err = sock_intr_errno(timeo); 2227 failure: 2228 *errcode = err; 2229 return NULL; 2230 } 2231 EXPORT_SYMBOL(sock_alloc_send_pskb); 2232 2233 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 2234 int noblock, int *errcode) 2235 { 2236 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); 2237 } 2238 EXPORT_SYMBOL(sock_alloc_send_skb); 2239 2240 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, 2241 struct sockcm_cookie *sockc) 2242 { 2243 u32 tsflags; 2244 2245 switch (cmsg->cmsg_type) { 2246 case SO_MARK: 2247 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2248 return -EPERM; 2249 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 2250 return -EINVAL; 2251 sockc->mark = *(u32 *)CMSG_DATA(cmsg); 2252 break; 2253 case SO_TIMESTAMPING_OLD: 2254 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 2255 return -EINVAL; 2256 2257 tsflags = *(u32 *)CMSG_DATA(cmsg); 2258 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) 2259 return -EINVAL; 2260 2261 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; 2262 sockc->tsflags |= tsflags; 2263 break; 2264 case SCM_TXTIME: 2265 if (!sock_flag(sk, SOCK_TXTIME)) 2266 return -EINVAL; 2267 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64))) 2268 return -EINVAL; 2269 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); 2270 break; 2271 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ 2272 case SCM_RIGHTS: 2273 case SCM_CREDENTIALS: 2274 break; 2275 default: 2276 return -EINVAL; 2277 } 2278 return 0; 2279 } 2280 EXPORT_SYMBOL(__sock_cmsg_send); 2281 2282 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 2283 struct sockcm_cookie *sockc) 2284 { 2285 struct cmsghdr *cmsg; 2286 int ret; 2287 2288 for_each_cmsghdr(cmsg, msg) { 2289 if (!CMSG_OK(msg, cmsg)) 2290 return -EINVAL; 2291 if (cmsg->cmsg_level != SOL_SOCKET) 2292 continue; 2293 ret = __sock_cmsg_send(sk, msg, cmsg, sockc); 2294 if (ret) 2295 return ret; 2296 } 2297 return 0; 2298 } 2299 EXPORT_SYMBOL(sock_cmsg_send); 2300 2301 static void sk_enter_memory_pressure(struct sock *sk) 2302 { 2303 if (!sk->sk_prot->enter_memory_pressure) 2304 return; 2305 2306 sk->sk_prot->enter_memory_pressure(sk); 2307 } 2308 2309 static void sk_leave_memory_pressure(struct sock *sk) 2310 { 2311 if (sk->sk_prot->leave_memory_pressure) { 2312 sk->sk_prot->leave_memory_pressure(sk); 2313 } else { 2314 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; 2315 2316 if (memory_pressure && *memory_pressure) 2317 *memory_pressure = 0; 2318 } 2319 } 2320 2321 /* On 32bit arches, an skb frag is limited to 2^15 */ 2322 #define SKB_FRAG_PAGE_ORDER get_order(32768) 2323 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); 2324 2325 /** 2326 * skb_page_frag_refill - check that a page_frag contains enough room 2327 * @sz: minimum size of the fragment we want to get 2328 * @pfrag: pointer to page_frag 2329 * @gfp: priority for memory allocation 2330 * 2331 * Note: While this allocator tries to use high order pages, there is 2332 * no guarantee that allocations succeed. Therefore, @sz MUST be 2333 * less or equal than PAGE_SIZE. 2334 */ 2335 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) 2336 { 2337 if (pfrag->page) { 2338 if (page_ref_count(pfrag->page) == 1) { 2339 pfrag->offset = 0; 2340 return true; 2341 } 2342 if (pfrag->offset + sz <= pfrag->size) 2343 return true; 2344 put_page(pfrag->page); 2345 } 2346 2347 pfrag->offset = 0; 2348 if (SKB_FRAG_PAGE_ORDER && 2349 !static_branch_unlikely(&net_high_order_alloc_disable_key)) { 2350 /* Avoid direct reclaim but allow kswapd to wake */ 2351 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | 2352 __GFP_COMP | __GFP_NOWARN | 2353 __GFP_NORETRY, 2354 SKB_FRAG_PAGE_ORDER); 2355 if (likely(pfrag->page)) { 2356 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; 2357 return true; 2358 } 2359 } 2360 pfrag->page = alloc_page(gfp); 2361 if (likely(pfrag->page)) { 2362 pfrag->size = PAGE_SIZE; 2363 return true; 2364 } 2365 return false; 2366 } 2367 EXPORT_SYMBOL(skb_page_frag_refill); 2368 2369 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 2370 { 2371 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) 2372 return true; 2373 2374 sk_enter_memory_pressure(sk); 2375 sk_stream_moderate_sndbuf(sk); 2376 return false; 2377 } 2378 EXPORT_SYMBOL(sk_page_frag_refill); 2379 2380 static void __lock_sock(struct sock *sk) 2381 __releases(&sk->sk_lock.slock) 2382 __acquires(&sk->sk_lock.slock) 2383 { 2384 DEFINE_WAIT(wait); 2385 2386 for (;;) { 2387 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 2388 TASK_UNINTERRUPTIBLE); 2389 spin_unlock_bh(&sk->sk_lock.slock); 2390 schedule(); 2391 spin_lock_bh(&sk->sk_lock.slock); 2392 if (!sock_owned_by_user(sk)) 2393 break; 2394 } 2395 finish_wait(&sk->sk_lock.wq, &wait); 2396 } 2397 2398 void __release_sock(struct sock *sk) 2399 __releases(&sk->sk_lock.slock) 2400 __acquires(&sk->sk_lock.slock) 2401 { 2402 struct sk_buff *skb, *next; 2403 2404 while ((skb = sk->sk_backlog.head) != NULL) { 2405 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 2406 2407 spin_unlock_bh(&sk->sk_lock.slock); 2408 2409 do { 2410 next = skb->next; 2411 prefetch(next); 2412 WARN_ON_ONCE(skb_dst_is_noref(skb)); 2413 skb_mark_not_on_list(skb); 2414 sk_backlog_rcv(sk, skb); 2415 2416 cond_resched(); 2417 2418 skb = next; 2419 } while (skb != NULL); 2420 2421 spin_lock_bh(&sk->sk_lock.slock); 2422 } 2423 2424 /* 2425 * Doing the zeroing here guarantee we can not loop forever 2426 * while a wild producer attempts to flood us. 2427 */ 2428 sk->sk_backlog.len = 0; 2429 } 2430 2431 void __sk_flush_backlog(struct sock *sk) 2432 { 2433 spin_lock_bh(&sk->sk_lock.slock); 2434 __release_sock(sk); 2435 spin_unlock_bh(&sk->sk_lock.slock); 2436 } 2437 2438 /** 2439 * sk_wait_data - wait for data to arrive at sk_receive_queue 2440 * @sk: sock to wait on 2441 * @timeo: for how long 2442 * @skb: last skb seen on sk_receive_queue 2443 * 2444 * Now socket state including sk->sk_err is changed only under lock, 2445 * hence we may omit checks after joining wait queue. 2446 * We check receive queue before schedule() only as optimization; 2447 * it is very likely that release_sock() added new data. 2448 */ 2449 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) 2450 { 2451 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2452 int rc; 2453 2454 add_wait_queue(sk_sleep(sk), &wait); 2455 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2456 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); 2457 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2458 remove_wait_queue(sk_sleep(sk), &wait); 2459 return rc; 2460 } 2461 EXPORT_SYMBOL(sk_wait_data); 2462 2463 /** 2464 * __sk_mem_raise_allocated - increase memory_allocated 2465 * @sk: socket 2466 * @size: memory size to allocate 2467 * @amt: pages to allocate 2468 * @kind: allocation type 2469 * 2470 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc 2471 */ 2472 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) 2473 { 2474 struct proto *prot = sk->sk_prot; 2475 long allocated = sk_memory_allocated_add(sk, amt); 2476 bool charged = true; 2477 2478 if (mem_cgroup_sockets_enabled && sk->sk_memcg && 2479 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt))) 2480 goto suppress_allocation; 2481 2482 /* Under limit. */ 2483 if (allocated <= sk_prot_mem_limits(sk, 0)) { 2484 sk_leave_memory_pressure(sk); 2485 return 1; 2486 } 2487 2488 /* Under pressure. */ 2489 if (allocated > sk_prot_mem_limits(sk, 1)) 2490 sk_enter_memory_pressure(sk); 2491 2492 /* Over hard limit. */ 2493 if (allocated > sk_prot_mem_limits(sk, 2)) 2494 goto suppress_allocation; 2495 2496 /* guarantee minimum buffer size under pressure */ 2497 if (kind == SK_MEM_RECV) { 2498 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) 2499 return 1; 2500 2501 } else { /* SK_MEM_SEND */ 2502 int wmem0 = sk_get_wmem0(sk, prot); 2503 2504 if (sk->sk_type == SOCK_STREAM) { 2505 if (sk->sk_wmem_queued < wmem0) 2506 return 1; 2507 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { 2508 return 1; 2509 } 2510 } 2511 2512 if (sk_has_memory_pressure(sk)) { 2513 u64 alloc; 2514 2515 if (!sk_under_memory_pressure(sk)) 2516 return 1; 2517 alloc = sk_sockets_allocated_read_positive(sk); 2518 if (sk_prot_mem_limits(sk, 2) > alloc * 2519 sk_mem_pages(sk->sk_wmem_queued + 2520 atomic_read(&sk->sk_rmem_alloc) + 2521 sk->sk_forward_alloc)) 2522 return 1; 2523 } 2524 2525 suppress_allocation: 2526 2527 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 2528 sk_stream_moderate_sndbuf(sk); 2529 2530 /* Fail only if socket is _under_ its sndbuf. 2531 * In this case we cannot block, so that we have to fail. 2532 */ 2533 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 2534 return 1; 2535 } 2536 2537 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged)) 2538 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); 2539 2540 sk_memory_allocated_sub(sk, amt); 2541 2542 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 2543 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt); 2544 2545 return 0; 2546 } 2547 EXPORT_SYMBOL(__sk_mem_raise_allocated); 2548 2549 /** 2550 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 2551 * @sk: socket 2552 * @size: memory size to allocate 2553 * @kind: allocation type 2554 * 2555 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 2556 * rmem allocation. This function assumes that protocols which have 2557 * memory_pressure use sk_wmem_queued as write buffer accounting. 2558 */ 2559 int __sk_mem_schedule(struct sock *sk, int size, int kind) 2560 { 2561 int ret, amt = sk_mem_pages(size); 2562 2563 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT; 2564 ret = __sk_mem_raise_allocated(sk, size, amt, kind); 2565 if (!ret) 2566 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT; 2567 return ret; 2568 } 2569 EXPORT_SYMBOL(__sk_mem_schedule); 2570 2571 /** 2572 * __sk_mem_reduce_allocated - reclaim memory_allocated 2573 * @sk: socket 2574 * @amount: number of quanta 2575 * 2576 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc 2577 */ 2578 void __sk_mem_reduce_allocated(struct sock *sk, int amount) 2579 { 2580 sk_memory_allocated_sub(sk, amount); 2581 2582 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 2583 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); 2584 2585 if (sk_under_memory_pressure(sk) && 2586 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 2587 sk_leave_memory_pressure(sk); 2588 } 2589 EXPORT_SYMBOL(__sk_mem_reduce_allocated); 2590 2591 /** 2592 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated 2593 * @sk: socket 2594 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple) 2595 */ 2596 void __sk_mem_reclaim(struct sock *sk, int amount) 2597 { 2598 amount >>= SK_MEM_QUANTUM_SHIFT; 2599 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; 2600 __sk_mem_reduce_allocated(sk, amount); 2601 } 2602 EXPORT_SYMBOL(__sk_mem_reclaim); 2603 2604 int sk_set_peek_off(struct sock *sk, int val) 2605 { 2606 sk->sk_peek_off = val; 2607 return 0; 2608 } 2609 EXPORT_SYMBOL_GPL(sk_set_peek_off); 2610 2611 /* 2612 * Set of default routines for initialising struct proto_ops when 2613 * the protocol does not support a particular function. In certain 2614 * cases where it makes no sense for a protocol to have a "do nothing" 2615 * function, some default processing is provided. 2616 */ 2617 2618 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 2619 { 2620 return -EOPNOTSUPP; 2621 } 2622 EXPORT_SYMBOL(sock_no_bind); 2623 2624 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 2625 int len, int flags) 2626 { 2627 return -EOPNOTSUPP; 2628 } 2629 EXPORT_SYMBOL(sock_no_connect); 2630 2631 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 2632 { 2633 return -EOPNOTSUPP; 2634 } 2635 EXPORT_SYMBOL(sock_no_socketpair); 2636 2637 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags, 2638 bool kern) 2639 { 2640 return -EOPNOTSUPP; 2641 } 2642 EXPORT_SYMBOL(sock_no_accept); 2643 2644 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 2645 int peer) 2646 { 2647 return -EOPNOTSUPP; 2648 } 2649 EXPORT_SYMBOL(sock_no_getname); 2650 2651 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2652 { 2653 return -EOPNOTSUPP; 2654 } 2655 EXPORT_SYMBOL(sock_no_ioctl); 2656 2657 int sock_no_listen(struct socket *sock, int backlog) 2658 { 2659 return -EOPNOTSUPP; 2660 } 2661 EXPORT_SYMBOL(sock_no_listen); 2662 2663 int sock_no_shutdown(struct socket *sock, int how) 2664 { 2665 return -EOPNOTSUPP; 2666 } 2667 EXPORT_SYMBOL(sock_no_shutdown); 2668 2669 int sock_no_setsockopt(struct socket *sock, int level, int optname, 2670 char __user *optval, unsigned int optlen) 2671 { 2672 return -EOPNOTSUPP; 2673 } 2674 EXPORT_SYMBOL(sock_no_setsockopt); 2675 2676 int sock_no_getsockopt(struct socket *sock, int level, int optname, 2677 char __user *optval, int __user *optlen) 2678 { 2679 return -EOPNOTSUPP; 2680 } 2681 EXPORT_SYMBOL(sock_no_getsockopt); 2682 2683 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 2684 { 2685 return -EOPNOTSUPP; 2686 } 2687 EXPORT_SYMBOL(sock_no_sendmsg); 2688 2689 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) 2690 { 2691 return -EOPNOTSUPP; 2692 } 2693 EXPORT_SYMBOL(sock_no_sendmsg_locked); 2694 2695 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, 2696 int flags) 2697 { 2698 return -EOPNOTSUPP; 2699 } 2700 EXPORT_SYMBOL(sock_no_recvmsg); 2701 2702 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2703 { 2704 /* Mirror missing mmap method error code */ 2705 return -ENODEV; 2706 } 2707 EXPORT_SYMBOL(sock_no_mmap); 2708 2709 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 2710 { 2711 ssize_t res; 2712 struct msghdr msg = {.msg_flags = flags}; 2713 struct kvec iov; 2714 char *kaddr = kmap(page); 2715 iov.iov_base = kaddr + offset; 2716 iov.iov_len = size; 2717 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 2718 kunmap(page); 2719 return res; 2720 } 2721 EXPORT_SYMBOL(sock_no_sendpage); 2722 2723 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, 2724 int offset, size_t size, int flags) 2725 { 2726 ssize_t res; 2727 struct msghdr msg = {.msg_flags = flags}; 2728 struct kvec iov; 2729 char *kaddr = kmap(page); 2730 2731 iov.iov_base = kaddr + offset; 2732 iov.iov_len = size; 2733 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size); 2734 kunmap(page); 2735 return res; 2736 } 2737 EXPORT_SYMBOL(sock_no_sendpage_locked); 2738 2739 /* 2740 * Default Socket Callbacks 2741 */ 2742 2743 static void sock_def_wakeup(struct sock *sk) 2744 { 2745 struct socket_wq *wq; 2746 2747 rcu_read_lock(); 2748 wq = rcu_dereference(sk->sk_wq); 2749 if (skwq_has_sleeper(wq)) 2750 wake_up_interruptible_all(&wq->wait); 2751 rcu_read_unlock(); 2752 } 2753 2754 static void sock_def_error_report(struct sock *sk) 2755 { 2756 struct socket_wq *wq; 2757 2758 rcu_read_lock(); 2759 wq = rcu_dereference(sk->sk_wq); 2760 if (skwq_has_sleeper(wq)) 2761 wake_up_interruptible_poll(&wq->wait, EPOLLERR); 2762 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2763 rcu_read_unlock(); 2764 } 2765 2766 static void sock_def_readable(struct sock *sk) 2767 { 2768 struct socket_wq *wq; 2769 2770 rcu_read_lock(); 2771 wq = rcu_dereference(sk->sk_wq); 2772 if (skwq_has_sleeper(wq)) 2773 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 2774 EPOLLRDNORM | EPOLLRDBAND); 2775 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2776 rcu_read_unlock(); 2777 } 2778 2779 static void sock_def_write_space(struct sock *sk) 2780 { 2781 struct socket_wq *wq; 2782 2783 rcu_read_lock(); 2784 2785 /* Do not wake up a writer until he can make "significant" 2786 * progress. --DaveM 2787 */ 2788 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2789 wq = rcu_dereference(sk->sk_wq); 2790 if (skwq_has_sleeper(wq)) 2791 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 2792 EPOLLWRNORM | EPOLLWRBAND); 2793 2794 /* Should agree with poll, otherwise some programs break */ 2795 if (sock_writeable(sk)) 2796 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2797 } 2798 2799 rcu_read_unlock(); 2800 } 2801 2802 static void sock_def_destruct(struct sock *sk) 2803 { 2804 } 2805 2806 void sk_send_sigurg(struct sock *sk) 2807 { 2808 if (sk->sk_socket && sk->sk_socket->file) 2809 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2810 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2811 } 2812 EXPORT_SYMBOL(sk_send_sigurg); 2813 2814 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2815 unsigned long expires) 2816 { 2817 if (!mod_timer(timer, expires)) 2818 sock_hold(sk); 2819 } 2820 EXPORT_SYMBOL(sk_reset_timer); 2821 2822 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2823 { 2824 if (del_timer(timer)) 2825 __sock_put(sk); 2826 } 2827 EXPORT_SYMBOL(sk_stop_timer); 2828 2829 void sock_init_data(struct socket *sock, struct sock *sk) 2830 { 2831 sk_init_common(sk); 2832 sk->sk_send_head = NULL; 2833 2834 timer_setup(&sk->sk_timer, NULL, 0); 2835 2836 sk->sk_allocation = GFP_KERNEL; 2837 sk->sk_rcvbuf = sysctl_rmem_default; 2838 sk->sk_sndbuf = sysctl_wmem_default; 2839 sk->sk_state = TCP_CLOSE; 2840 sk_set_socket(sk, sock); 2841 2842 sock_set_flag(sk, SOCK_ZAPPED); 2843 2844 if (sock) { 2845 sk->sk_type = sock->type; 2846 RCU_INIT_POINTER(sk->sk_wq, sock->wq); 2847 sock->sk = sk; 2848 sk->sk_uid = SOCK_INODE(sock)->i_uid; 2849 } else { 2850 RCU_INIT_POINTER(sk->sk_wq, NULL); 2851 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0); 2852 } 2853 2854 rwlock_init(&sk->sk_callback_lock); 2855 if (sk->sk_kern_sock) 2856 lockdep_set_class_and_name( 2857 &sk->sk_callback_lock, 2858 af_kern_callback_keys + sk->sk_family, 2859 af_family_kern_clock_key_strings[sk->sk_family]); 2860 else 2861 lockdep_set_class_and_name( 2862 &sk->sk_callback_lock, 2863 af_callback_keys + sk->sk_family, 2864 af_family_clock_key_strings[sk->sk_family]); 2865 2866 sk->sk_state_change = sock_def_wakeup; 2867 sk->sk_data_ready = sock_def_readable; 2868 sk->sk_write_space = sock_def_write_space; 2869 sk->sk_error_report = sock_def_error_report; 2870 sk->sk_destruct = sock_def_destruct; 2871 2872 sk->sk_frag.page = NULL; 2873 sk->sk_frag.offset = 0; 2874 sk->sk_peek_off = -1; 2875 2876 sk->sk_peer_pid = NULL; 2877 sk->sk_peer_cred = NULL; 2878 sk->sk_write_pending = 0; 2879 sk->sk_rcvlowat = 1; 2880 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2881 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2882 2883 sk->sk_stamp = SK_DEFAULT_STAMP; 2884 #if BITS_PER_LONG==32 2885 seqlock_init(&sk->sk_stamp_seq); 2886 #endif 2887 atomic_set(&sk->sk_zckey, 0); 2888 2889 #ifdef CONFIG_NET_RX_BUSY_POLL 2890 sk->sk_napi_id = 0; 2891 sk->sk_ll_usec = sysctl_net_busy_read; 2892 #endif 2893 2894 sk->sk_max_pacing_rate = ~0UL; 2895 sk->sk_pacing_rate = ~0UL; 2896 sk->sk_pacing_shift = 10; 2897 sk->sk_incoming_cpu = -1; 2898 2899 sk_rx_queue_clear(sk); 2900 /* 2901 * Before updating sk_refcnt, we must commit prior changes to memory 2902 * (Documentation/RCU/rculist_nulls.txt for details) 2903 */ 2904 smp_wmb(); 2905 refcount_set(&sk->sk_refcnt, 1); 2906 atomic_set(&sk->sk_drops, 0); 2907 } 2908 EXPORT_SYMBOL(sock_init_data); 2909 2910 void lock_sock_nested(struct sock *sk, int subclass) 2911 { 2912 might_sleep(); 2913 spin_lock_bh(&sk->sk_lock.slock); 2914 if (sk->sk_lock.owned) 2915 __lock_sock(sk); 2916 sk->sk_lock.owned = 1; 2917 spin_unlock(&sk->sk_lock.slock); 2918 /* 2919 * The sk_lock has mutex_lock() semantics here: 2920 */ 2921 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2922 local_bh_enable(); 2923 } 2924 EXPORT_SYMBOL(lock_sock_nested); 2925 2926 void release_sock(struct sock *sk) 2927 { 2928 spin_lock_bh(&sk->sk_lock.slock); 2929 if (sk->sk_backlog.tail) 2930 __release_sock(sk); 2931 2932 /* Warning : release_cb() might need to release sk ownership, 2933 * ie call sock_release_ownership(sk) before us. 2934 */ 2935 if (sk->sk_prot->release_cb) 2936 sk->sk_prot->release_cb(sk); 2937 2938 sock_release_ownership(sk); 2939 if (waitqueue_active(&sk->sk_lock.wq)) 2940 wake_up(&sk->sk_lock.wq); 2941 spin_unlock_bh(&sk->sk_lock.slock); 2942 } 2943 EXPORT_SYMBOL(release_sock); 2944 2945 /** 2946 * lock_sock_fast - fast version of lock_sock 2947 * @sk: socket 2948 * 2949 * This version should be used for very small section, where process wont block 2950 * return false if fast path is taken: 2951 * 2952 * sk_lock.slock locked, owned = 0, BH disabled 2953 * 2954 * return true if slow path is taken: 2955 * 2956 * sk_lock.slock unlocked, owned = 1, BH enabled 2957 */ 2958 bool lock_sock_fast(struct sock *sk) 2959 { 2960 might_sleep(); 2961 spin_lock_bh(&sk->sk_lock.slock); 2962 2963 if (!sk->sk_lock.owned) 2964 /* 2965 * Note : We must disable BH 2966 */ 2967 return false; 2968 2969 __lock_sock(sk); 2970 sk->sk_lock.owned = 1; 2971 spin_unlock(&sk->sk_lock.slock); 2972 /* 2973 * The sk_lock has mutex_lock() semantics here: 2974 */ 2975 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2976 local_bh_enable(); 2977 return true; 2978 } 2979 EXPORT_SYMBOL(lock_sock_fast); 2980 2981 int sock_gettstamp(struct socket *sock, void __user *userstamp, 2982 bool timeval, bool time32) 2983 { 2984 struct sock *sk = sock->sk; 2985 struct timespec64 ts; 2986 2987 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2988 ts = ktime_to_timespec64(sock_read_timestamp(sk)); 2989 if (ts.tv_sec == -1) 2990 return -ENOENT; 2991 if (ts.tv_sec == 0) { 2992 ktime_t kt = ktime_get_real(); 2993 sock_write_timestamp(sk, kt);; 2994 ts = ktime_to_timespec64(kt); 2995 } 2996 2997 if (timeval) 2998 ts.tv_nsec /= 1000; 2999 3000 #ifdef CONFIG_COMPAT_32BIT_TIME 3001 if (time32) 3002 return put_old_timespec32(&ts, userstamp); 3003 #endif 3004 #ifdef CONFIG_SPARC64 3005 /* beware of padding in sparc64 timeval */ 3006 if (timeval && !in_compat_syscall()) { 3007 struct __kernel_old_timeval __user tv = { 3008 .tv_sec = ts.tv_sec, 3009 .tv_usec = ts.tv_nsec, 3010 }; 3011 if (copy_to_user(userstamp, &tv, sizeof(tv))) 3012 return -EFAULT; 3013 return 0; 3014 } 3015 #endif 3016 return put_timespec64(&ts, userstamp); 3017 } 3018 EXPORT_SYMBOL(sock_gettstamp); 3019 3020 void sock_enable_timestamp(struct sock *sk, int flag) 3021 { 3022 if (!sock_flag(sk, flag)) { 3023 unsigned long previous_flags = sk->sk_flags; 3024 3025 sock_set_flag(sk, flag); 3026 /* 3027 * we just set one of the two flags which require net 3028 * time stamping, but time stamping might have been on 3029 * already because of the other one 3030 */ 3031 if (sock_needs_netstamp(sk) && 3032 !(previous_flags & SK_FLAGS_TIMESTAMP)) 3033 net_enable_timestamp(); 3034 } 3035 } 3036 3037 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 3038 int level, int type) 3039 { 3040 struct sock_exterr_skb *serr; 3041 struct sk_buff *skb; 3042 int copied, err; 3043 3044 err = -EAGAIN; 3045 skb = sock_dequeue_err_skb(sk); 3046 if (skb == NULL) 3047 goto out; 3048 3049 copied = skb->len; 3050 if (copied > len) { 3051 msg->msg_flags |= MSG_TRUNC; 3052 copied = len; 3053 } 3054 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3055 if (err) 3056 goto out_free_skb; 3057 3058 sock_recv_timestamp(msg, sk, skb); 3059 3060 serr = SKB_EXT_ERR(skb); 3061 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); 3062 3063 msg->msg_flags |= MSG_ERRQUEUE; 3064 err = copied; 3065 3066 out_free_skb: 3067 kfree_skb(skb); 3068 out: 3069 return err; 3070 } 3071 EXPORT_SYMBOL(sock_recv_errqueue); 3072 3073 /* 3074 * Get a socket option on an socket. 3075 * 3076 * FIX: POSIX 1003.1g is very ambiguous here. It states that 3077 * asynchronous errors should be reported by getsockopt. We assume 3078 * this means if you specify SO_ERROR (otherwise whats the point of it). 3079 */ 3080 int sock_common_getsockopt(struct socket *sock, int level, int optname, 3081 char __user *optval, int __user *optlen) 3082 { 3083 struct sock *sk = sock->sk; 3084 3085 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 3086 } 3087 EXPORT_SYMBOL(sock_common_getsockopt); 3088 3089 #ifdef CONFIG_COMPAT 3090 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 3091 char __user *optval, int __user *optlen) 3092 { 3093 struct sock *sk = sock->sk; 3094 3095 if (sk->sk_prot->compat_getsockopt != NULL) 3096 return sk->sk_prot->compat_getsockopt(sk, level, optname, 3097 optval, optlen); 3098 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 3099 } 3100 EXPORT_SYMBOL(compat_sock_common_getsockopt); 3101 #endif 3102 3103 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 3104 int flags) 3105 { 3106 struct sock *sk = sock->sk; 3107 int addr_len = 0; 3108 int err; 3109 3110 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, 3111 flags & ~MSG_DONTWAIT, &addr_len); 3112 if (err >= 0) 3113 msg->msg_namelen = addr_len; 3114 return err; 3115 } 3116 EXPORT_SYMBOL(sock_common_recvmsg); 3117 3118 /* 3119 * Set socket options on an inet socket. 3120 */ 3121 int sock_common_setsockopt(struct socket *sock, int level, int optname, 3122 char __user *optval, unsigned int optlen) 3123 { 3124 struct sock *sk = sock->sk; 3125 3126 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 3127 } 3128 EXPORT_SYMBOL(sock_common_setsockopt); 3129 3130 #ifdef CONFIG_COMPAT 3131 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 3132 char __user *optval, unsigned int optlen) 3133 { 3134 struct sock *sk = sock->sk; 3135 3136 if (sk->sk_prot->compat_setsockopt != NULL) 3137 return sk->sk_prot->compat_setsockopt(sk, level, optname, 3138 optval, optlen); 3139 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 3140 } 3141 EXPORT_SYMBOL(compat_sock_common_setsockopt); 3142 #endif 3143 3144 void sk_common_release(struct sock *sk) 3145 { 3146 if (sk->sk_prot->destroy) 3147 sk->sk_prot->destroy(sk); 3148 3149 /* 3150 * Observation: when sock_common_release is called, processes have 3151 * no access to socket. But net still has. 3152 * Step one, detach it from networking: 3153 * 3154 * A. Remove from hash tables. 3155 */ 3156 3157 sk->sk_prot->unhash(sk); 3158 3159 /* 3160 * In this point socket cannot receive new packets, but it is possible 3161 * that some packets are in flight because some CPU runs receiver and 3162 * did hash table lookup before we unhashed socket. They will achieve 3163 * receive queue and will be purged by socket destructor. 3164 * 3165 * Also we still have packets pending on receive queue and probably, 3166 * our own packets waiting in device queues. sock_destroy will drain 3167 * receive queue, but transmitted packets will delay socket destruction 3168 * until the last reference will be released. 3169 */ 3170 3171 sock_orphan(sk); 3172 3173 xfrm_sk_free_policy(sk); 3174 3175 sk_refcnt_debug_release(sk); 3176 3177 sock_put(sk); 3178 } 3179 EXPORT_SYMBOL(sk_common_release); 3180 3181 void sk_get_meminfo(const struct sock *sk, u32 *mem) 3182 { 3183 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS); 3184 3185 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); 3186 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; 3187 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); 3188 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; 3189 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; 3190 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; 3191 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 3192 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; 3193 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 3194 } 3195 3196 #ifdef CONFIG_PROC_FS 3197 #define PROTO_INUSE_NR 64 /* should be enough for the first time */ 3198 struct prot_inuse { 3199 int val[PROTO_INUSE_NR]; 3200 }; 3201 3202 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 3203 3204 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 3205 { 3206 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val); 3207 } 3208 EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 3209 3210 int sock_prot_inuse_get(struct net *net, struct proto *prot) 3211 { 3212 int cpu, idx = prot->inuse_idx; 3213 int res = 0; 3214 3215 for_each_possible_cpu(cpu) 3216 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx]; 3217 3218 return res >= 0 ? res : 0; 3219 } 3220 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 3221 3222 static void sock_inuse_add(struct net *net, int val) 3223 { 3224 this_cpu_add(*net->core.sock_inuse, val); 3225 } 3226 3227 int sock_inuse_get(struct net *net) 3228 { 3229 int cpu, res = 0; 3230 3231 for_each_possible_cpu(cpu) 3232 res += *per_cpu_ptr(net->core.sock_inuse, cpu); 3233 3234 return res; 3235 } 3236 3237 EXPORT_SYMBOL_GPL(sock_inuse_get); 3238 3239 static int __net_init sock_inuse_init_net(struct net *net) 3240 { 3241 net->core.prot_inuse = alloc_percpu(struct prot_inuse); 3242 if (net->core.prot_inuse == NULL) 3243 return -ENOMEM; 3244 3245 net->core.sock_inuse = alloc_percpu(int); 3246 if (net->core.sock_inuse == NULL) 3247 goto out; 3248 3249 return 0; 3250 3251 out: 3252 free_percpu(net->core.prot_inuse); 3253 return -ENOMEM; 3254 } 3255 3256 static void __net_exit sock_inuse_exit_net(struct net *net) 3257 { 3258 free_percpu(net->core.prot_inuse); 3259 free_percpu(net->core.sock_inuse); 3260 } 3261 3262 static struct pernet_operations net_inuse_ops = { 3263 .init = sock_inuse_init_net, 3264 .exit = sock_inuse_exit_net, 3265 }; 3266 3267 static __init int net_inuse_init(void) 3268 { 3269 if (register_pernet_subsys(&net_inuse_ops)) 3270 panic("Cannot initialize net inuse counters"); 3271 3272 return 0; 3273 } 3274 3275 core_initcall(net_inuse_init); 3276 3277 static void assign_proto_idx(struct proto *prot) 3278 { 3279 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 3280 3281 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 3282 pr_err("PROTO_INUSE_NR exhausted\n"); 3283 return; 3284 } 3285 3286 set_bit(prot->inuse_idx, proto_inuse_idx); 3287 } 3288 3289 static void release_proto_idx(struct proto *prot) 3290 { 3291 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 3292 clear_bit(prot->inuse_idx, proto_inuse_idx); 3293 } 3294 #else 3295 static inline void assign_proto_idx(struct proto *prot) 3296 { 3297 } 3298 3299 static inline void release_proto_idx(struct proto *prot) 3300 { 3301 } 3302 3303 static void sock_inuse_add(struct net *net, int val) 3304 { 3305 } 3306 #endif 3307 3308 static void req_prot_cleanup(struct request_sock_ops *rsk_prot) 3309 { 3310 if (!rsk_prot) 3311 return; 3312 kfree(rsk_prot->slab_name); 3313 rsk_prot->slab_name = NULL; 3314 kmem_cache_destroy(rsk_prot->slab); 3315 rsk_prot->slab = NULL; 3316 } 3317 3318 static int req_prot_init(const struct proto *prot) 3319 { 3320 struct request_sock_ops *rsk_prot = prot->rsk_prot; 3321 3322 if (!rsk_prot) 3323 return 0; 3324 3325 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", 3326 prot->name); 3327 if (!rsk_prot->slab_name) 3328 return -ENOMEM; 3329 3330 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, 3331 rsk_prot->obj_size, 0, 3332 SLAB_ACCOUNT | prot->slab_flags, 3333 NULL); 3334 3335 if (!rsk_prot->slab) { 3336 pr_crit("%s: Can't create request sock SLAB cache!\n", 3337 prot->name); 3338 return -ENOMEM; 3339 } 3340 return 0; 3341 } 3342 3343 int proto_register(struct proto *prot, int alloc_slab) 3344 { 3345 if (alloc_slab) { 3346 prot->slab = kmem_cache_create_usercopy(prot->name, 3347 prot->obj_size, 0, 3348 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | 3349 prot->slab_flags, 3350 prot->useroffset, prot->usersize, 3351 NULL); 3352 3353 if (prot->slab == NULL) { 3354 pr_crit("%s: Can't create sock SLAB cache!\n", 3355 prot->name); 3356 goto out; 3357 } 3358 3359 if (req_prot_init(prot)) 3360 goto out_free_request_sock_slab; 3361 3362 if (prot->twsk_prot != NULL) { 3363 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 3364 3365 if (prot->twsk_prot->twsk_slab_name == NULL) 3366 goto out_free_request_sock_slab; 3367 3368 prot->twsk_prot->twsk_slab = 3369 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 3370 prot->twsk_prot->twsk_obj_size, 3371 0, 3372 SLAB_ACCOUNT | 3373 prot->slab_flags, 3374 NULL); 3375 if (prot->twsk_prot->twsk_slab == NULL) 3376 goto out_free_timewait_sock_slab_name; 3377 } 3378 } 3379 3380 mutex_lock(&proto_list_mutex); 3381 list_add(&prot->node, &proto_list); 3382 assign_proto_idx(prot); 3383 mutex_unlock(&proto_list_mutex); 3384 return 0; 3385 3386 out_free_timewait_sock_slab_name: 3387 kfree(prot->twsk_prot->twsk_slab_name); 3388 out_free_request_sock_slab: 3389 req_prot_cleanup(prot->rsk_prot); 3390 3391 kmem_cache_destroy(prot->slab); 3392 prot->slab = NULL; 3393 out: 3394 return -ENOBUFS; 3395 } 3396 EXPORT_SYMBOL(proto_register); 3397 3398 void proto_unregister(struct proto *prot) 3399 { 3400 mutex_lock(&proto_list_mutex); 3401 release_proto_idx(prot); 3402 list_del(&prot->node); 3403 mutex_unlock(&proto_list_mutex); 3404 3405 kmem_cache_destroy(prot->slab); 3406 prot->slab = NULL; 3407 3408 req_prot_cleanup(prot->rsk_prot); 3409 3410 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 3411 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 3412 kfree(prot->twsk_prot->twsk_slab_name); 3413 prot->twsk_prot->twsk_slab = NULL; 3414 } 3415 } 3416 EXPORT_SYMBOL(proto_unregister); 3417 3418 int sock_load_diag_module(int family, int protocol) 3419 { 3420 if (!protocol) { 3421 if (!sock_is_registered(family)) 3422 return -ENOENT; 3423 3424 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 3425 NETLINK_SOCK_DIAG, family); 3426 } 3427 3428 #ifdef CONFIG_INET 3429 if (family == AF_INET && 3430 protocol != IPPROTO_RAW && 3431 !rcu_access_pointer(inet_protos[protocol])) 3432 return -ENOENT; 3433 #endif 3434 3435 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 3436 NETLINK_SOCK_DIAG, family, protocol); 3437 } 3438 EXPORT_SYMBOL(sock_load_diag_module); 3439 3440 #ifdef CONFIG_PROC_FS 3441 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 3442 __acquires(proto_list_mutex) 3443 { 3444 mutex_lock(&proto_list_mutex); 3445 return seq_list_start_head(&proto_list, *pos); 3446 } 3447 3448 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3449 { 3450 return seq_list_next(v, &proto_list, pos); 3451 } 3452 3453 static void proto_seq_stop(struct seq_file *seq, void *v) 3454 __releases(proto_list_mutex) 3455 { 3456 mutex_unlock(&proto_list_mutex); 3457 } 3458 3459 static char proto_method_implemented(const void *method) 3460 { 3461 return method == NULL ? 'n' : 'y'; 3462 } 3463 static long sock_prot_memory_allocated(struct proto *proto) 3464 { 3465 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 3466 } 3467 3468 static char *sock_prot_memory_pressure(struct proto *proto) 3469 { 3470 return proto->memory_pressure != NULL ? 3471 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 3472 } 3473 3474 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 3475 { 3476 3477 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 3478 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 3479 proto->name, 3480 proto->obj_size, 3481 sock_prot_inuse_get(seq_file_net(seq), proto), 3482 sock_prot_memory_allocated(proto), 3483 sock_prot_memory_pressure(proto), 3484 proto->max_header, 3485 proto->slab == NULL ? "no" : "yes", 3486 module_name(proto->owner), 3487 proto_method_implemented(proto->close), 3488 proto_method_implemented(proto->connect), 3489 proto_method_implemented(proto->disconnect), 3490 proto_method_implemented(proto->accept), 3491 proto_method_implemented(proto->ioctl), 3492 proto_method_implemented(proto->init), 3493 proto_method_implemented(proto->destroy), 3494 proto_method_implemented(proto->shutdown), 3495 proto_method_implemented(proto->setsockopt), 3496 proto_method_implemented(proto->getsockopt), 3497 proto_method_implemented(proto->sendmsg), 3498 proto_method_implemented(proto->recvmsg), 3499 proto_method_implemented(proto->sendpage), 3500 proto_method_implemented(proto->bind), 3501 proto_method_implemented(proto->backlog_rcv), 3502 proto_method_implemented(proto->hash), 3503 proto_method_implemented(proto->unhash), 3504 proto_method_implemented(proto->get_port), 3505 proto_method_implemented(proto->enter_memory_pressure)); 3506 } 3507 3508 static int proto_seq_show(struct seq_file *seq, void *v) 3509 { 3510 if (v == &proto_list) 3511 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 3512 "protocol", 3513 "size", 3514 "sockets", 3515 "memory", 3516 "press", 3517 "maxhdr", 3518 "slab", 3519 "module", 3520 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 3521 else 3522 proto_seq_printf(seq, list_entry(v, struct proto, node)); 3523 return 0; 3524 } 3525 3526 static const struct seq_operations proto_seq_ops = { 3527 .start = proto_seq_start, 3528 .next = proto_seq_next, 3529 .stop = proto_seq_stop, 3530 .show = proto_seq_show, 3531 }; 3532 3533 static __net_init int proto_init_net(struct net *net) 3534 { 3535 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops, 3536 sizeof(struct seq_net_private))) 3537 return -ENOMEM; 3538 3539 return 0; 3540 } 3541 3542 static __net_exit void proto_exit_net(struct net *net) 3543 { 3544 remove_proc_entry("protocols", net->proc_net); 3545 } 3546 3547 3548 static __net_initdata struct pernet_operations proto_net_ops = { 3549 .init = proto_init_net, 3550 .exit = proto_exit_net, 3551 }; 3552 3553 static int __init proto_init(void) 3554 { 3555 return register_pernet_subsys(&proto_net_ops); 3556 } 3557 3558 subsys_initcall(proto_init); 3559 3560 #endif /* PROC_FS */ 3561 3562 #ifdef CONFIG_NET_RX_BUSY_POLL 3563 bool sk_busy_loop_end(void *p, unsigned long start_time) 3564 { 3565 struct sock *sk = p; 3566 3567 return !skb_queue_empty(&sk->sk_receive_queue) || 3568 sk_busy_loop_timeout(sk, start_time); 3569 } 3570 EXPORT_SYMBOL(sk_busy_loop_end); 3571 #endif /* CONFIG_NET_RX_BUSY_POLL */ 3572