1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 93 94 #include <linux/capability.h> 95 #include <linux/errno.h> 96 #include <linux/errqueue.h> 97 #include <linux/types.h> 98 #include <linux/socket.h> 99 #include <linux/in.h> 100 #include <linux/kernel.h> 101 #include <linux/module.h> 102 #include <linux/proc_fs.h> 103 #include <linux/seq_file.h> 104 #include <linux/sched.h> 105 #include <linux/timer.h> 106 #include <linux/string.h> 107 #include <linux/sockios.h> 108 #include <linux/net.h> 109 #include <linux/mm.h> 110 #include <linux/slab.h> 111 #include <linux/interrupt.h> 112 #include <linux/poll.h> 113 #include <linux/tcp.h> 114 #include <linux/init.h> 115 #include <linux/highmem.h> 116 #include <linux/user_namespace.h> 117 #include <linux/static_key.h> 118 #include <linux/memcontrol.h> 119 #include <linux/prefetch.h> 120 121 #include <asm/uaccess.h> 122 123 #include <linux/netdevice.h> 124 #include <net/protocol.h> 125 #include <linux/skbuff.h> 126 #include <net/net_namespace.h> 127 #include <net/request_sock.h> 128 #include <net/sock.h> 129 #include <linux/net_tstamp.h> 130 #include <net/xfrm.h> 131 #include <linux/ipsec.h> 132 #include <net/cls_cgroup.h> 133 #include <net/netprio_cgroup.h> 134 135 #include <linux/filter.h> 136 137 #include <trace/events/sock.h> 138 139 #ifdef CONFIG_INET 140 #include <net/tcp.h> 141 #endif 142 143 #include <net/busy_poll.h> 144 145 static DEFINE_MUTEX(proto_list_mutex); 146 static LIST_HEAD(proto_list); 147 148 /** 149 * sk_ns_capable - General socket capability test 150 * @sk: Socket to use a capability on or through 151 * @user_ns: The user namespace of the capability to use 152 * @cap: The capability to use 153 * 154 * Test to see if the opener of the socket had when the socket was 155 * created and the current process has the capability @cap in the user 156 * namespace @user_ns. 157 */ 158 bool sk_ns_capable(const struct sock *sk, 159 struct user_namespace *user_ns, int cap) 160 { 161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && 162 ns_capable(user_ns, cap); 163 } 164 EXPORT_SYMBOL(sk_ns_capable); 165 166 /** 167 * sk_capable - Socket global capability test 168 * @sk: Socket to use a capability on or through 169 * @cap: The global capability to use 170 * 171 * Test to see if the opener of the socket had when the socket was 172 * created and the current process has the capability @cap in all user 173 * namespaces. 174 */ 175 bool sk_capable(const struct sock *sk, int cap) 176 { 177 return sk_ns_capable(sk, &init_user_ns, cap); 178 } 179 EXPORT_SYMBOL(sk_capable); 180 181 /** 182 * sk_net_capable - Network namespace socket capability test 183 * @sk: Socket to use a capability on or through 184 * @cap: The capability to use 185 * 186 * Test to see if the opener of the socket had when the socket was created 187 * and the current process has the capability @cap over the network namespace 188 * the socket is a member of. 189 */ 190 bool sk_net_capable(const struct sock *sk, int cap) 191 { 192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); 193 } 194 EXPORT_SYMBOL(sk_net_capable); 195 196 197 #ifdef CONFIG_MEMCG_KMEM 198 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 199 { 200 struct proto *proto; 201 int ret = 0; 202 203 mutex_lock(&proto_list_mutex); 204 list_for_each_entry(proto, &proto_list, node) { 205 if (proto->init_cgroup) { 206 ret = proto->init_cgroup(memcg, ss); 207 if (ret) 208 goto out; 209 } 210 } 211 212 mutex_unlock(&proto_list_mutex); 213 return ret; 214 out: 215 list_for_each_entry_continue_reverse(proto, &proto_list, node) 216 if (proto->destroy_cgroup) 217 proto->destroy_cgroup(memcg); 218 mutex_unlock(&proto_list_mutex); 219 return ret; 220 } 221 222 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) 223 { 224 struct proto *proto; 225 226 mutex_lock(&proto_list_mutex); 227 list_for_each_entry_reverse(proto, &proto_list, node) 228 if (proto->destroy_cgroup) 229 proto->destroy_cgroup(memcg); 230 mutex_unlock(&proto_list_mutex); 231 } 232 #endif 233 234 /* 235 * Each address family might have different locking rules, so we have 236 * one slock key per address family: 237 */ 238 static struct lock_class_key af_family_keys[AF_MAX]; 239 static struct lock_class_key af_family_slock_keys[AF_MAX]; 240 241 #if defined(CONFIG_MEMCG_KMEM) 242 struct static_key memcg_socket_limit_enabled; 243 EXPORT_SYMBOL(memcg_socket_limit_enabled); 244 #endif 245 246 /* 247 * Make lock validator output more readable. (we pre-construct these 248 * strings build-time, so that runtime initialization of socket 249 * locks is fast): 250 */ 251 static const char *const af_family_key_strings[AF_MAX+1] = { 252 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 253 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 254 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 255 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 256 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 257 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 258 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 259 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 260 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 261 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 262 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 263 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 264 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 265 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX" 266 }; 267 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 268 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 269 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 270 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 271 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 272 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 273 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 274 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 275 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 276 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 277 "slock-27" , "slock-28" , "slock-AF_CAN" , 278 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 279 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 280 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 281 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX" 282 }; 283 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 284 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 285 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 286 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 287 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 288 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 289 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 290 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 291 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 292 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 293 "clock-27" , "clock-28" , "clock-AF_CAN" , 294 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 295 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 296 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 297 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX" 298 }; 299 300 /* 301 * sk_callback_lock locking rules are per-address-family, 302 * so split the lock classes by using a per-AF key: 303 */ 304 static struct lock_class_key af_callback_keys[AF_MAX]; 305 306 /* Take into consideration the size of the struct sk_buff overhead in the 307 * determination of these values, since that is non-constant across 308 * platforms. This makes socket queueing behavior and performance 309 * not depend upon such differences. 310 */ 311 #define _SK_MEM_PACKETS 256 312 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 313 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 314 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 315 316 /* Run time adjustable parameters. */ 317 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 318 EXPORT_SYMBOL(sysctl_wmem_max); 319 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 320 EXPORT_SYMBOL(sysctl_rmem_max); 321 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 322 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 323 324 /* Maximal space eaten by iovec or ancillary data plus some space */ 325 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 326 EXPORT_SYMBOL(sysctl_optmem_max); 327 328 int sysctl_tstamp_allow_data __read_mostly = 1; 329 330 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE; 331 EXPORT_SYMBOL_GPL(memalloc_socks); 332 333 /** 334 * sk_set_memalloc - sets %SOCK_MEMALLOC 335 * @sk: socket to set it on 336 * 337 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 338 * It's the responsibility of the admin to adjust min_free_kbytes 339 * to meet the requirements 340 */ 341 void sk_set_memalloc(struct sock *sk) 342 { 343 sock_set_flag(sk, SOCK_MEMALLOC); 344 sk->sk_allocation |= __GFP_MEMALLOC; 345 static_key_slow_inc(&memalloc_socks); 346 } 347 EXPORT_SYMBOL_GPL(sk_set_memalloc); 348 349 void sk_clear_memalloc(struct sock *sk) 350 { 351 sock_reset_flag(sk, SOCK_MEMALLOC); 352 sk->sk_allocation &= ~__GFP_MEMALLOC; 353 static_key_slow_dec(&memalloc_socks); 354 355 /* 356 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 357 * progress of swapping. However, if SOCK_MEMALLOC is cleared while 358 * it has rmem allocations there is a risk that the user of the 359 * socket cannot make forward progress due to exceeding the rmem 360 * limits. By rights, sk_clear_memalloc() should only be called 361 * on sockets being torn down but warn and reset the accounting if 362 * that assumption breaks. 363 */ 364 if (WARN_ON(sk->sk_forward_alloc)) 365 sk_mem_reclaim(sk); 366 } 367 EXPORT_SYMBOL_GPL(sk_clear_memalloc); 368 369 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 370 { 371 int ret; 372 unsigned long pflags = current->flags; 373 374 /* these should have been dropped before queueing */ 375 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 376 377 current->flags |= PF_MEMALLOC; 378 ret = sk->sk_backlog_rcv(sk, skb); 379 tsk_restore_flags(current, pflags, PF_MEMALLOC); 380 381 return ret; 382 } 383 EXPORT_SYMBOL(__sk_backlog_rcv); 384 385 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 386 { 387 struct timeval tv; 388 389 if (optlen < sizeof(tv)) 390 return -EINVAL; 391 if (copy_from_user(&tv, optval, sizeof(tv))) 392 return -EFAULT; 393 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 394 return -EDOM; 395 396 if (tv.tv_sec < 0) { 397 static int warned __read_mostly; 398 399 *timeo_p = 0; 400 if (warned < 10 && net_ratelimit()) { 401 warned++; 402 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 403 __func__, current->comm, task_pid_nr(current)); 404 } 405 return 0; 406 } 407 *timeo_p = MAX_SCHEDULE_TIMEOUT; 408 if (tv.tv_sec == 0 && tv.tv_usec == 0) 409 return 0; 410 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 411 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 412 return 0; 413 } 414 415 static void sock_warn_obsolete_bsdism(const char *name) 416 { 417 static int warned; 418 static char warncomm[TASK_COMM_LEN]; 419 if (strcmp(warncomm, current->comm) && warned < 5) { 420 strcpy(warncomm, current->comm); 421 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", 422 warncomm, name); 423 warned++; 424 } 425 } 426 427 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 428 429 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 430 { 431 if (sk->sk_flags & flags) { 432 sk->sk_flags &= ~flags; 433 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 434 net_disable_timestamp(); 435 } 436 } 437 438 439 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 440 { 441 int err; 442 unsigned long flags; 443 struct sk_buff_head *list = &sk->sk_receive_queue; 444 445 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 446 atomic_inc(&sk->sk_drops); 447 trace_sock_rcvqueue_full(sk, skb); 448 return -ENOMEM; 449 } 450 451 err = sk_filter(sk, skb); 452 if (err) 453 return err; 454 455 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 456 atomic_inc(&sk->sk_drops); 457 return -ENOBUFS; 458 } 459 460 skb->dev = NULL; 461 skb_set_owner_r(skb, sk); 462 463 /* we escape from rcu protected region, make sure we dont leak 464 * a norefcounted dst 465 */ 466 skb_dst_force(skb); 467 468 spin_lock_irqsave(&list->lock, flags); 469 skb->dropcount = atomic_read(&sk->sk_drops); 470 __skb_queue_tail(list, skb); 471 spin_unlock_irqrestore(&list->lock, flags); 472 473 if (!sock_flag(sk, SOCK_DEAD)) 474 sk->sk_data_ready(sk); 475 return 0; 476 } 477 EXPORT_SYMBOL(sock_queue_rcv_skb); 478 479 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 480 { 481 int rc = NET_RX_SUCCESS; 482 483 if (sk_filter(sk, skb)) 484 goto discard_and_relse; 485 486 skb->dev = NULL; 487 488 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 489 atomic_inc(&sk->sk_drops); 490 goto discard_and_relse; 491 } 492 if (nested) 493 bh_lock_sock_nested(sk); 494 else 495 bh_lock_sock(sk); 496 if (!sock_owned_by_user(sk)) { 497 /* 498 * trylock + unlock semantics: 499 */ 500 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 501 502 rc = sk_backlog_rcv(sk, skb); 503 504 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 505 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { 506 bh_unlock_sock(sk); 507 atomic_inc(&sk->sk_drops); 508 goto discard_and_relse; 509 } 510 511 bh_unlock_sock(sk); 512 out: 513 sock_put(sk); 514 return rc; 515 discard_and_relse: 516 kfree_skb(skb); 517 goto out; 518 } 519 EXPORT_SYMBOL(sk_receive_skb); 520 521 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 522 { 523 struct dst_entry *dst = __sk_dst_get(sk); 524 525 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 526 sk_tx_queue_clear(sk); 527 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 528 dst_release(dst); 529 return NULL; 530 } 531 532 return dst; 533 } 534 EXPORT_SYMBOL(__sk_dst_check); 535 536 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 537 { 538 struct dst_entry *dst = sk_dst_get(sk); 539 540 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 541 sk_dst_reset(sk); 542 dst_release(dst); 543 return NULL; 544 } 545 546 return dst; 547 } 548 EXPORT_SYMBOL(sk_dst_check); 549 550 static int sock_setbindtodevice(struct sock *sk, char __user *optval, 551 int optlen) 552 { 553 int ret = -ENOPROTOOPT; 554 #ifdef CONFIG_NETDEVICES 555 struct net *net = sock_net(sk); 556 char devname[IFNAMSIZ]; 557 int index; 558 559 /* Sorry... */ 560 ret = -EPERM; 561 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 562 goto out; 563 564 ret = -EINVAL; 565 if (optlen < 0) 566 goto out; 567 568 /* Bind this socket to a particular device like "eth0", 569 * as specified in the passed interface name. If the 570 * name is "" or the option length is zero the socket 571 * is not bound. 572 */ 573 if (optlen > IFNAMSIZ - 1) 574 optlen = IFNAMSIZ - 1; 575 memset(devname, 0, sizeof(devname)); 576 577 ret = -EFAULT; 578 if (copy_from_user(devname, optval, optlen)) 579 goto out; 580 581 index = 0; 582 if (devname[0] != '\0') { 583 struct net_device *dev; 584 585 rcu_read_lock(); 586 dev = dev_get_by_name_rcu(net, devname); 587 if (dev) 588 index = dev->ifindex; 589 rcu_read_unlock(); 590 ret = -ENODEV; 591 if (!dev) 592 goto out; 593 } 594 595 lock_sock(sk); 596 sk->sk_bound_dev_if = index; 597 sk_dst_reset(sk); 598 release_sock(sk); 599 600 ret = 0; 601 602 out: 603 #endif 604 605 return ret; 606 } 607 608 static int sock_getbindtodevice(struct sock *sk, char __user *optval, 609 int __user *optlen, int len) 610 { 611 int ret = -ENOPROTOOPT; 612 #ifdef CONFIG_NETDEVICES 613 struct net *net = sock_net(sk); 614 char devname[IFNAMSIZ]; 615 616 if (sk->sk_bound_dev_if == 0) { 617 len = 0; 618 goto zero; 619 } 620 621 ret = -EINVAL; 622 if (len < IFNAMSIZ) 623 goto out; 624 625 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); 626 if (ret) 627 goto out; 628 629 len = strlen(devname) + 1; 630 631 ret = -EFAULT; 632 if (copy_to_user(optval, devname, len)) 633 goto out; 634 635 zero: 636 ret = -EFAULT; 637 if (put_user(len, optlen)) 638 goto out; 639 640 ret = 0; 641 642 out: 643 #endif 644 645 return ret; 646 } 647 648 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 649 { 650 if (valbool) 651 sock_set_flag(sk, bit); 652 else 653 sock_reset_flag(sk, bit); 654 } 655 656 bool sk_mc_loop(struct sock *sk) 657 { 658 if (dev_recursion_level()) 659 return false; 660 if (!sk) 661 return true; 662 switch (sk->sk_family) { 663 case AF_INET: 664 return inet_sk(sk)->mc_loop; 665 #if IS_ENABLED(CONFIG_IPV6) 666 case AF_INET6: 667 return inet6_sk(sk)->mc_loop; 668 #endif 669 } 670 WARN_ON(1); 671 return true; 672 } 673 EXPORT_SYMBOL(sk_mc_loop); 674 675 /* 676 * This is meant for all protocols to use and covers goings on 677 * at the socket level. Everything here is generic. 678 */ 679 680 int sock_setsockopt(struct socket *sock, int level, int optname, 681 char __user *optval, unsigned int optlen) 682 { 683 struct sock *sk = sock->sk; 684 int val; 685 int valbool; 686 struct linger ling; 687 int ret = 0; 688 689 /* 690 * Options without arguments 691 */ 692 693 if (optname == SO_BINDTODEVICE) 694 return sock_setbindtodevice(sk, optval, optlen); 695 696 if (optlen < sizeof(int)) 697 return -EINVAL; 698 699 if (get_user(val, (int __user *)optval)) 700 return -EFAULT; 701 702 valbool = val ? 1 : 0; 703 704 lock_sock(sk); 705 706 switch (optname) { 707 case SO_DEBUG: 708 if (val && !capable(CAP_NET_ADMIN)) 709 ret = -EACCES; 710 else 711 sock_valbool_flag(sk, SOCK_DBG, valbool); 712 break; 713 case SO_REUSEADDR: 714 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 715 break; 716 case SO_REUSEPORT: 717 sk->sk_reuseport = valbool; 718 break; 719 case SO_TYPE: 720 case SO_PROTOCOL: 721 case SO_DOMAIN: 722 case SO_ERROR: 723 ret = -ENOPROTOOPT; 724 break; 725 case SO_DONTROUTE: 726 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 727 break; 728 case SO_BROADCAST: 729 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 730 break; 731 case SO_SNDBUF: 732 /* Don't error on this BSD doesn't and if you think 733 * about it this is right. Otherwise apps have to 734 * play 'guess the biggest size' games. RCVBUF/SNDBUF 735 * are treated in BSD as hints 736 */ 737 val = min_t(u32, val, sysctl_wmem_max); 738 set_sndbuf: 739 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 740 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); 741 /* Wake up sending tasks if we upped the value. */ 742 sk->sk_write_space(sk); 743 break; 744 745 case SO_SNDBUFFORCE: 746 if (!capable(CAP_NET_ADMIN)) { 747 ret = -EPERM; 748 break; 749 } 750 goto set_sndbuf; 751 752 case SO_RCVBUF: 753 /* Don't error on this BSD doesn't and if you think 754 * about it this is right. Otherwise apps have to 755 * play 'guess the biggest size' games. RCVBUF/SNDBUF 756 * are treated in BSD as hints 757 */ 758 val = min_t(u32, val, sysctl_rmem_max); 759 set_rcvbuf: 760 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 761 /* 762 * We double it on the way in to account for 763 * "struct sk_buff" etc. overhead. Applications 764 * assume that the SO_RCVBUF setting they make will 765 * allow that much actual data to be received on that 766 * socket. 767 * 768 * Applications are unaware that "struct sk_buff" and 769 * other overheads allocate from the receive buffer 770 * during socket buffer allocation. 771 * 772 * And after considering the possible alternatives, 773 * returning the value we actually used in getsockopt 774 * is the most desirable behavior. 775 */ 776 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); 777 break; 778 779 case SO_RCVBUFFORCE: 780 if (!capable(CAP_NET_ADMIN)) { 781 ret = -EPERM; 782 break; 783 } 784 goto set_rcvbuf; 785 786 case SO_KEEPALIVE: 787 #ifdef CONFIG_INET 788 if (sk->sk_protocol == IPPROTO_TCP && 789 sk->sk_type == SOCK_STREAM) 790 tcp_set_keepalive(sk, valbool); 791 #endif 792 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 793 break; 794 795 case SO_OOBINLINE: 796 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 797 break; 798 799 case SO_NO_CHECK: 800 sk->sk_no_check_tx = valbool; 801 break; 802 803 case SO_PRIORITY: 804 if ((val >= 0 && val <= 6) || 805 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 806 sk->sk_priority = val; 807 else 808 ret = -EPERM; 809 break; 810 811 case SO_LINGER: 812 if (optlen < sizeof(ling)) { 813 ret = -EINVAL; /* 1003.1g */ 814 break; 815 } 816 if (copy_from_user(&ling, optval, sizeof(ling))) { 817 ret = -EFAULT; 818 break; 819 } 820 if (!ling.l_onoff) 821 sock_reset_flag(sk, SOCK_LINGER); 822 else { 823 #if (BITS_PER_LONG == 32) 824 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 825 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 826 else 827 #endif 828 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 829 sock_set_flag(sk, SOCK_LINGER); 830 } 831 break; 832 833 case SO_BSDCOMPAT: 834 sock_warn_obsolete_bsdism("setsockopt"); 835 break; 836 837 case SO_PASSCRED: 838 if (valbool) 839 set_bit(SOCK_PASSCRED, &sock->flags); 840 else 841 clear_bit(SOCK_PASSCRED, &sock->flags); 842 break; 843 844 case SO_TIMESTAMP: 845 case SO_TIMESTAMPNS: 846 if (valbool) { 847 if (optname == SO_TIMESTAMP) 848 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 849 else 850 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 851 sock_set_flag(sk, SOCK_RCVTSTAMP); 852 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 853 } else { 854 sock_reset_flag(sk, SOCK_RCVTSTAMP); 855 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 856 } 857 break; 858 859 case SO_TIMESTAMPING: 860 if (val & ~SOF_TIMESTAMPING_MASK) { 861 ret = -EINVAL; 862 break; 863 } 864 865 if (val & SOF_TIMESTAMPING_OPT_ID && 866 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 867 if (sk->sk_protocol == IPPROTO_TCP) { 868 if (sk->sk_state != TCP_ESTABLISHED) { 869 ret = -EINVAL; 870 break; 871 } 872 sk->sk_tskey = tcp_sk(sk)->snd_una; 873 } else { 874 sk->sk_tskey = 0; 875 } 876 } 877 sk->sk_tsflags = val; 878 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 879 sock_enable_timestamp(sk, 880 SOCK_TIMESTAMPING_RX_SOFTWARE); 881 else 882 sock_disable_timestamp(sk, 883 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 884 break; 885 886 case SO_RCVLOWAT: 887 if (val < 0) 888 val = INT_MAX; 889 sk->sk_rcvlowat = val ? : 1; 890 break; 891 892 case SO_RCVTIMEO: 893 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 894 break; 895 896 case SO_SNDTIMEO: 897 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 898 break; 899 900 case SO_ATTACH_FILTER: 901 ret = -EINVAL; 902 if (optlen == sizeof(struct sock_fprog)) { 903 struct sock_fprog fprog; 904 905 ret = -EFAULT; 906 if (copy_from_user(&fprog, optval, sizeof(fprog))) 907 break; 908 909 ret = sk_attach_filter(&fprog, sk); 910 } 911 break; 912 913 case SO_ATTACH_BPF: 914 ret = -EINVAL; 915 if (optlen == sizeof(u32)) { 916 u32 ufd; 917 918 ret = -EFAULT; 919 if (copy_from_user(&ufd, optval, sizeof(ufd))) 920 break; 921 922 ret = sk_attach_bpf(ufd, sk); 923 } 924 break; 925 926 case SO_DETACH_FILTER: 927 ret = sk_detach_filter(sk); 928 break; 929 930 case SO_LOCK_FILTER: 931 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 932 ret = -EPERM; 933 else 934 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 935 break; 936 937 case SO_PASSSEC: 938 if (valbool) 939 set_bit(SOCK_PASSSEC, &sock->flags); 940 else 941 clear_bit(SOCK_PASSSEC, &sock->flags); 942 break; 943 case SO_MARK: 944 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 945 ret = -EPERM; 946 else 947 sk->sk_mark = val; 948 break; 949 950 /* We implement the SO_SNDLOWAT etc to 951 not be settable (1003.1g 5.3) */ 952 case SO_RXQ_OVFL: 953 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 954 break; 955 956 case SO_WIFI_STATUS: 957 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 958 break; 959 960 case SO_PEEK_OFF: 961 if (sock->ops->set_peek_off) 962 ret = sock->ops->set_peek_off(sk, val); 963 else 964 ret = -EOPNOTSUPP; 965 break; 966 967 case SO_NOFCS: 968 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 969 break; 970 971 case SO_SELECT_ERR_QUEUE: 972 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 973 break; 974 975 #ifdef CONFIG_NET_RX_BUSY_POLL 976 case SO_BUSY_POLL: 977 /* allow unprivileged users to decrease the value */ 978 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) 979 ret = -EPERM; 980 else { 981 if (val < 0) 982 ret = -EINVAL; 983 else 984 sk->sk_ll_usec = val; 985 } 986 break; 987 #endif 988 989 case SO_MAX_PACING_RATE: 990 sk->sk_max_pacing_rate = val; 991 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 992 sk->sk_max_pacing_rate); 993 break; 994 995 default: 996 ret = -ENOPROTOOPT; 997 break; 998 } 999 release_sock(sk); 1000 return ret; 1001 } 1002 EXPORT_SYMBOL(sock_setsockopt); 1003 1004 1005 static void cred_to_ucred(struct pid *pid, const struct cred *cred, 1006 struct ucred *ucred) 1007 { 1008 ucred->pid = pid_vnr(pid); 1009 ucred->uid = ucred->gid = -1; 1010 if (cred) { 1011 struct user_namespace *current_ns = current_user_ns(); 1012 1013 ucred->uid = from_kuid_munged(current_ns, cred->euid); 1014 ucred->gid = from_kgid_munged(current_ns, cred->egid); 1015 } 1016 } 1017 1018 int sock_getsockopt(struct socket *sock, int level, int optname, 1019 char __user *optval, int __user *optlen) 1020 { 1021 struct sock *sk = sock->sk; 1022 1023 union { 1024 int val; 1025 struct linger ling; 1026 struct timeval tm; 1027 } v; 1028 1029 int lv = sizeof(int); 1030 int len; 1031 1032 if (get_user(len, optlen)) 1033 return -EFAULT; 1034 if (len < 0) 1035 return -EINVAL; 1036 1037 memset(&v, 0, sizeof(v)); 1038 1039 switch (optname) { 1040 case SO_DEBUG: 1041 v.val = sock_flag(sk, SOCK_DBG); 1042 break; 1043 1044 case SO_DONTROUTE: 1045 v.val = sock_flag(sk, SOCK_LOCALROUTE); 1046 break; 1047 1048 case SO_BROADCAST: 1049 v.val = sock_flag(sk, SOCK_BROADCAST); 1050 break; 1051 1052 case SO_SNDBUF: 1053 v.val = sk->sk_sndbuf; 1054 break; 1055 1056 case SO_RCVBUF: 1057 v.val = sk->sk_rcvbuf; 1058 break; 1059 1060 case SO_REUSEADDR: 1061 v.val = sk->sk_reuse; 1062 break; 1063 1064 case SO_REUSEPORT: 1065 v.val = sk->sk_reuseport; 1066 break; 1067 1068 case SO_KEEPALIVE: 1069 v.val = sock_flag(sk, SOCK_KEEPOPEN); 1070 break; 1071 1072 case SO_TYPE: 1073 v.val = sk->sk_type; 1074 break; 1075 1076 case SO_PROTOCOL: 1077 v.val = sk->sk_protocol; 1078 break; 1079 1080 case SO_DOMAIN: 1081 v.val = sk->sk_family; 1082 break; 1083 1084 case SO_ERROR: 1085 v.val = -sock_error(sk); 1086 if (v.val == 0) 1087 v.val = xchg(&sk->sk_err_soft, 0); 1088 break; 1089 1090 case SO_OOBINLINE: 1091 v.val = sock_flag(sk, SOCK_URGINLINE); 1092 break; 1093 1094 case SO_NO_CHECK: 1095 v.val = sk->sk_no_check_tx; 1096 break; 1097 1098 case SO_PRIORITY: 1099 v.val = sk->sk_priority; 1100 break; 1101 1102 case SO_LINGER: 1103 lv = sizeof(v.ling); 1104 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1105 v.ling.l_linger = sk->sk_lingertime / HZ; 1106 break; 1107 1108 case SO_BSDCOMPAT: 1109 sock_warn_obsolete_bsdism("getsockopt"); 1110 break; 1111 1112 case SO_TIMESTAMP: 1113 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1114 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1115 break; 1116 1117 case SO_TIMESTAMPNS: 1118 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 1119 break; 1120 1121 case SO_TIMESTAMPING: 1122 v.val = sk->sk_tsflags; 1123 break; 1124 1125 case SO_RCVTIMEO: 1126 lv = sizeof(struct timeval); 1127 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 1128 v.tm.tv_sec = 0; 1129 v.tm.tv_usec = 0; 1130 } else { 1131 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 1132 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 1133 } 1134 break; 1135 1136 case SO_SNDTIMEO: 1137 lv = sizeof(struct timeval); 1138 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 1139 v.tm.tv_sec = 0; 1140 v.tm.tv_usec = 0; 1141 } else { 1142 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 1143 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 1144 } 1145 break; 1146 1147 case SO_RCVLOWAT: 1148 v.val = sk->sk_rcvlowat; 1149 break; 1150 1151 case SO_SNDLOWAT: 1152 v.val = 1; 1153 break; 1154 1155 case SO_PASSCRED: 1156 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1157 break; 1158 1159 case SO_PEERCRED: 1160 { 1161 struct ucred peercred; 1162 if (len > sizeof(peercred)) 1163 len = sizeof(peercred); 1164 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1165 if (copy_to_user(optval, &peercred, len)) 1166 return -EFAULT; 1167 goto lenout; 1168 } 1169 1170 case SO_PEERNAME: 1171 { 1172 char address[128]; 1173 1174 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 1175 return -ENOTCONN; 1176 if (lv < len) 1177 return -EINVAL; 1178 if (copy_to_user(optval, address, len)) 1179 return -EFAULT; 1180 goto lenout; 1181 } 1182 1183 /* Dubious BSD thing... Probably nobody even uses it, but 1184 * the UNIX standard wants it for whatever reason... -DaveM 1185 */ 1186 case SO_ACCEPTCONN: 1187 v.val = sk->sk_state == TCP_LISTEN; 1188 break; 1189 1190 case SO_PASSSEC: 1191 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1192 break; 1193 1194 case SO_PEERSEC: 1195 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1196 1197 case SO_MARK: 1198 v.val = sk->sk_mark; 1199 break; 1200 1201 case SO_RXQ_OVFL: 1202 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1203 break; 1204 1205 case SO_WIFI_STATUS: 1206 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1207 break; 1208 1209 case SO_PEEK_OFF: 1210 if (!sock->ops->set_peek_off) 1211 return -EOPNOTSUPP; 1212 1213 v.val = sk->sk_peek_off; 1214 break; 1215 case SO_NOFCS: 1216 v.val = sock_flag(sk, SOCK_NOFCS); 1217 break; 1218 1219 case SO_BINDTODEVICE: 1220 return sock_getbindtodevice(sk, optval, optlen, len); 1221 1222 case SO_GET_FILTER: 1223 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); 1224 if (len < 0) 1225 return len; 1226 1227 goto lenout; 1228 1229 case SO_LOCK_FILTER: 1230 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 1231 break; 1232 1233 case SO_BPF_EXTENSIONS: 1234 v.val = bpf_tell_extensions(); 1235 break; 1236 1237 case SO_SELECT_ERR_QUEUE: 1238 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 1239 break; 1240 1241 #ifdef CONFIG_NET_RX_BUSY_POLL 1242 case SO_BUSY_POLL: 1243 v.val = sk->sk_ll_usec; 1244 break; 1245 #endif 1246 1247 case SO_MAX_PACING_RATE: 1248 v.val = sk->sk_max_pacing_rate; 1249 break; 1250 1251 case SO_INCOMING_CPU: 1252 v.val = sk->sk_incoming_cpu; 1253 break; 1254 1255 default: 1256 return -ENOPROTOOPT; 1257 } 1258 1259 if (len > lv) 1260 len = lv; 1261 if (copy_to_user(optval, &v, len)) 1262 return -EFAULT; 1263 lenout: 1264 if (put_user(len, optlen)) 1265 return -EFAULT; 1266 return 0; 1267 } 1268 1269 /* 1270 * Initialize an sk_lock. 1271 * 1272 * (We also register the sk_lock with the lock validator.) 1273 */ 1274 static inline void sock_lock_init(struct sock *sk) 1275 { 1276 sock_lock_init_class_and_name(sk, 1277 af_family_slock_key_strings[sk->sk_family], 1278 af_family_slock_keys + sk->sk_family, 1279 af_family_key_strings[sk->sk_family], 1280 af_family_keys + sk->sk_family); 1281 } 1282 1283 /* 1284 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1285 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1286 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1287 */ 1288 static void sock_copy(struct sock *nsk, const struct sock *osk) 1289 { 1290 #ifdef CONFIG_SECURITY_NETWORK 1291 void *sptr = nsk->sk_security; 1292 #endif 1293 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1294 1295 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1296 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1297 1298 #ifdef CONFIG_SECURITY_NETWORK 1299 nsk->sk_security = sptr; 1300 security_sk_clone(osk, nsk); 1301 #endif 1302 } 1303 1304 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1305 { 1306 unsigned long nulls1, nulls2; 1307 1308 nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1309 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1310 if (nulls1 > nulls2) 1311 swap(nulls1, nulls2); 1312 1313 if (nulls1 != 0) 1314 memset((char *)sk, 0, nulls1); 1315 memset((char *)sk + nulls1 + sizeof(void *), 0, 1316 nulls2 - nulls1 - sizeof(void *)); 1317 memset((char *)sk + nulls2 + sizeof(void *), 0, 1318 size - nulls2 - sizeof(void *)); 1319 } 1320 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1321 1322 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1323 int family) 1324 { 1325 struct sock *sk; 1326 struct kmem_cache *slab; 1327 1328 slab = prot->slab; 1329 if (slab != NULL) { 1330 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1331 if (!sk) 1332 return sk; 1333 if (priority & __GFP_ZERO) { 1334 if (prot->clear_sk) 1335 prot->clear_sk(sk, prot->obj_size); 1336 else 1337 sk_prot_clear_nulls(sk, prot->obj_size); 1338 } 1339 } else 1340 sk = kmalloc(prot->obj_size, priority); 1341 1342 if (sk != NULL) { 1343 kmemcheck_annotate_bitfield(sk, flags); 1344 1345 if (security_sk_alloc(sk, family, priority)) 1346 goto out_free; 1347 1348 if (!try_module_get(prot->owner)) 1349 goto out_free_sec; 1350 sk_tx_queue_clear(sk); 1351 } 1352 1353 return sk; 1354 1355 out_free_sec: 1356 security_sk_free(sk); 1357 out_free: 1358 if (slab != NULL) 1359 kmem_cache_free(slab, sk); 1360 else 1361 kfree(sk); 1362 return NULL; 1363 } 1364 1365 static void sk_prot_free(struct proto *prot, struct sock *sk) 1366 { 1367 struct kmem_cache *slab; 1368 struct module *owner; 1369 1370 owner = prot->owner; 1371 slab = prot->slab; 1372 1373 security_sk_free(sk); 1374 if (slab != NULL) 1375 kmem_cache_free(slab, sk); 1376 else 1377 kfree(sk); 1378 module_put(owner); 1379 } 1380 1381 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 1382 void sock_update_netprioidx(struct sock *sk) 1383 { 1384 if (in_interrupt()) 1385 return; 1386 1387 sk->sk_cgrp_prioidx = task_netprioidx(current); 1388 } 1389 EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1390 #endif 1391 1392 /** 1393 * sk_alloc - All socket objects are allocated here 1394 * @net: the applicable net namespace 1395 * @family: protocol family 1396 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1397 * @prot: struct proto associated with this new sock instance 1398 */ 1399 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1400 struct proto *prot) 1401 { 1402 struct sock *sk; 1403 1404 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1405 if (sk) { 1406 sk->sk_family = family; 1407 /* 1408 * See comment in struct sock definition to understand 1409 * why we need sk_prot_creator -acme 1410 */ 1411 sk->sk_prot = sk->sk_prot_creator = prot; 1412 sock_lock_init(sk); 1413 sock_net_set(sk, get_net(net)); 1414 atomic_set(&sk->sk_wmem_alloc, 1); 1415 1416 sock_update_classid(sk); 1417 sock_update_netprioidx(sk); 1418 } 1419 1420 return sk; 1421 } 1422 EXPORT_SYMBOL(sk_alloc); 1423 1424 static void __sk_free(struct sock *sk) 1425 { 1426 struct sk_filter *filter; 1427 1428 if (sk->sk_destruct) 1429 sk->sk_destruct(sk); 1430 1431 filter = rcu_dereference_check(sk->sk_filter, 1432 atomic_read(&sk->sk_wmem_alloc) == 0); 1433 if (filter) { 1434 sk_filter_uncharge(sk, filter); 1435 RCU_INIT_POINTER(sk->sk_filter, NULL); 1436 } 1437 1438 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1439 1440 if (atomic_read(&sk->sk_omem_alloc)) 1441 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1442 __func__, atomic_read(&sk->sk_omem_alloc)); 1443 1444 if (sk->sk_peer_cred) 1445 put_cred(sk->sk_peer_cred); 1446 put_pid(sk->sk_peer_pid); 1447 put_net(sock_net(sk)); 1448 sk_prot_free(sk->sk_prot_creator, sk); 1449 } 1450 1451 void sk_free(struct sock *sk) 1452 { 1453 /* 1454 * We subtract one from sk_wmem_alloc and can know if 1455 * some packets are still in some tx queue. 1456 * If not null, sock_wfree() will call __sk_free(sk) later 1457 */ 1458 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1459 __sk_free(sk); 1460 } 1461 EXPORT_SYMBOL(sk_free); 1462 1463 /* 1464 * Last sock_put should drop reference to sk->sk_net. It has already 1465 * been dropped in sk_change_net. Taking reference to stopping namespace 1466 * is not an option. 1467 * Take reference to a socket to remove it from hash _alive_ and after that 1468 * destroy it in the context of init_net. 1469 */ 1470 void sk_release_kernel(struct sock *sk) 1471 { 1472 if (sk == NULL || sk->sk_socket == NULL) 1473 return; 1474 1475 sock_hold(sk); 1476 sock_release(sk->sk_socket); 1477 release_net(sock_net(sk)); 1478 sock_net_set(sk, get_net(&init_net)); 1479 sock_put(sk); 1480 } 1481 EXPORT_SYMBOL(sk_release_kernel); 1482 1483 static void sk_update_clone(const struct sock *sk, struct sock *newsk) 1484 { 1485 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1486 sock_update_memcg(newsk); 1487 } 1488 1489 /** 1490 * sk_clone_lock - clone a socket, and lock its clone 1491 * @sk: the socket to clone 1492 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1493 * 1494 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1495 */ 1496 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1497 { 1498 struct sock *newsk; 1499 bool is_charged = true; 1500 1501 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1502 if (newsk != NULL) { 1503 struct sk_filter *filter; 1504 1505 sock_copy(newsk, sk); 1506 1507 /* SANITY */ 1508 get_net(sock_net(newsk)); 1509 sk_node_init(&newsk->sk_node); 1510 sock_lock_init(newsk); 1511 bh_lock_sock(newsk); 1512 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1513 newsk->sk_backlog.len = 0; 1514 1515 atomic_set(&newsk->sk_rmem_alloc, 0); 1516 /* 1517 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1518 */ 1519 atomic_set(&newsk->sk_wmem_alloc, 1); 1520 atomic_set(&newsk->sk_omem_alloc, 0); 1521 skb_queue_head_init(&newsk->sk_receive_queue); 1522 skb_queue_head_init(&newsk->sk_write_queue); 1523 1524 spin_lock_init(&newsk->sk_dst_lock); 1525 rwlock_init(&newsk->sk_callback_lock); 1526 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1527 af_callback_keys + newsk->sk_family, 1528 af_family_clock_key_strings[newsk->sk_family]); 1529 1530 newsk->sk_dst_cache = NULL; 1531 newsk->sk_wmem_queued = 0; 1532 newsk->sk_forward_alloc = 0; 1533 newsk->sk_send_head = NULL; 1534 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1535 1536 sock_reset_flag(newsk, SOCK_DONE); 1537 skb_queue_head_init(&newsk->sk_error_queue); 1538 1539 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1540 if (filter != NULL) 1541 /* though it's an empty new sock, the charging may fail 1542 * if sysctl_optmem_max was changed between creation of 1543 * original socket and cloning 1544 */ 1545 is_charged = sk_filter_charge(newsk, filter); 1546 1547 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) { 1548 /* It is still raw copy of parent, so invalidate 1549 * destructor and make plain sk_free() */ 1550 newsk->sk_destruct = NULL; 1551 bh_unlock_sock(newsk); 1552 sk_free(newsk); 1553 newsk = NULL; 1554 goto out; 1555 } 1556 1557 newsk->sk_err = 0; 1558 newsk->sk_priority = 0; 1559 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1560 /* 1561 * Before updating sk_refcnt, we must commit prior changes to memory 1562 * (Documentation/RCU/rculist_nulls.txt for details) 1563 */ 1564 smp_wmb(); 1565 atomic_set(&newsk->sk_refcnt, 2); 1566 1567 /* 1568 * Increment the counter in the same struct proto as the master 1569 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1570 * is the same as sk->sk_prot->socks, as this field was copied 1571 * with memcpy). 1572 * 1573 * This _changes_ the previous behaviour, where 1574 * tcp_create_openreq_child always was incrementing the 1575 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1576 * to be taken into account in all callers. -acme 1577 */ 1578 sk_refcnt_debug_inc(newsk); 1579 sk_set_socket(newsk, NULL); 1580 newsk->sk_wq = NULL; 1581 1582 sk_update_clone(sk, newsk); 1583 1584 if (newsk->sk_prot->sockets_allocated) 1585 sk_sockets_allocated_inc(newsk); 1586 1587 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1588 net_enable_timestamp(); 1589 } 1590 out: 1591 return newsk; 1592 } 1593 EXPORT_SYMBOL_GPL(sk_clone_lock); 1594 1595 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1596 { 1597 __sk_dst_set(sk, dst); 1598 sk->sk_route_caps = dst->dev->features; 1599 if (sk->sk_route_caps & NETIF_F_GSO) 1600 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1601 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1602 if (sk_can_gso(sk)) { 1603 if (dst->header_len) { 1604 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1605 } else { 1606 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1607 sk->sk_gso_max_size = dst->dev->gso_max_size; 1608 sk->sk_gso_max_segs = dst->dev->gso_max_segs; 1609 } 1610 } 1611 } 1612 EXPORT_SYMBOL_GPL(sk_setup_caps); 1613 1614 /* 1615 * Simple resource managers for sockets. 1616 */ 1617 1618 1619 /* 1620 * Write buffer destructor automatically called from kfree_skb. 1621 */ 1622 void sock_wfree(struct sk_buff *skb) 1623 { 1624 struct sock *sk = skb->sk; 1625 unsigned int len = skb->truesize; 1626 1627 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1628 /* 1629 * Keep a reference on sk_wmem_alloc, this will be released 1630 * after sk_write_space() call 1631 */ 1632 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1633 sk->sk_write_space(sk); 1634 len = 1; 1635 } 1636 /* 1637 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1638 * could not do because of in-flight packets 1639 */ 1640 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1641 __sk_free(sk); 1642 } 1643 EXPORT_SYMBOL(sock_wfree); 1644 1645 void skb_orphan_partial(struct sk_buff *skb) 1646 { 1647 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, 1648 * so we do not completely orphan skb, but transfert all 1649 * accounted bytes but one, to avoid unexpected reorders. 1650 */ 1651 if (skb->destructor == sock_wfree 1652 #ifdef CONFIG_INET 1653 || skb->destructor == tcp_wfree 1654 #endif 1655 ) { 1656 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1657 skb->truesize = 1; 1658 } else { 1659 skb_orphan(skb); 1660 } 1661 } 1662 EXPORT_SYMBOL(skb_orphan_partial); 1663 1664 /* 1665 * Read buffer destructor automatically called from kfree_skb. 1666 */ 1667 void sock_rfree(struct sk_buff *skb) 1668 { 1669 struct sock *sk = skb->sk; 1670 unsigned int len = skb->truesize; 1671 1672 atomic_sub(len, &sk->sk_rmem_alloc); 1673 sk_mem_uncharge(sk, len); 1674 } 1675 EXPORT_SYMBOL(sock_rfree); 1676 1677 /* 1678 * Buffer destructor for skbs that are not used directly in read or write 1679 * path, e.g. for error handler skbs. Automatically called from kfree_skb. 1680 */ 1681 void sock_efree(struct sk_buff *skb) 1682 { 1683 sock_put(skb->sk); 1684 } 1685 EXPORT_SYMBOL(sock_efree); 1686 1687 #ifdef CONFIG_INET 1688 void sock_edemux(struct sk_buff *skb) 1689 { 1690 struct sock *sk = skb->sk; 1691 1692 if (sk->sk_state == TCP_TIME_WAIT) 1693 inet_twsk_put(inet_twsk(sk)); 1694 else 1695 sock_put(sk); 1696 } 1697 EXPORT_SYMBOL(sock_edemux); 1698 #endif 1699 1700 kuid_t sock_i_uid(struct sock *sk) 1701 { 1702 kuid_t uid; 1703 1704 read_lock_bh(&sk->sk_callback_lock); 1705 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 1706 read_unlock_bh(&sk->sk_callback_lock); 1707 return uid; 1708 } 1709 EXPORT_SYMBOL(sock_i_uid); 1710 1711 unsigned long sock_i_ino(struct sock *sk) 1712 { 1713 unsigned long ino; 1714 1715 read_lock_bh(&sk->sk_callback_lock); 1716 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1717 read_unlock_bh(&sk->sk_callback_lock); 1718 return ino; 1719 } 1720 EXPORT_SYMBOL(sock_i_ino); 1721 1722 /* 1723 * Allocate a skb from the socket's send buffer. 1724 */ 1725 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1726 gfp_t priority) 1727 { 1728 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1729 struct sk_buff *skb = alloc_skb(size, priority); 1730 if (skb) { 1731 skb_set_owner_w(skb, sk); 1732 return skb; 1733 } 1734 } 1735 return NULL; 1736 } 1737 EXPORT_SYMBOL(sock_wmalloc); 1738 1739 /* 1740 * Allocate a memory block from the socket's option memory buffer. 1741 */ 1742 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1743 { 1744 if ((unsigned int)size <= sysctl_optmem_max && 1745 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1746 void *mem; 1747 /* First do the add, to avoid the race if kmalloc 1748 * might sleep. 1749 */ 1750 atomic_add(size, &sk->sk_omem_alloc); 1751 mem = kmalloc(size, priority); 1752 if (mem) 1753 return mem; 1754 atomic_sub(size, &sk->sk_omem_alloc); 1755 } 1756 return NULL; 1757 } 1758 EXPORT_SYMBOL(sock_kmalloc); 1759 1760 /* Free an option memory block. Note, we actually want the inline 1761 * here as this allows gcc to detect the nullify and fold away the 1762 * condition entirely. 1763 */ 1764 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, 1765 const bool nullify) 1766 { 1767 if (WARN_ON_ONCE(!mem)) 1768 return; 1769 if (nullify) 1770 kzfree(mem); 1771 else 1772 kfree(mem); 1773 atomic_sub(size, &sk->sk_omem_alloc); 1774 } 1775 1776 void sock_kfree_s(struct sock *sk, void *mem, int size) 1777 { 1778 __sock_kfree_s(sk, mem, size, false); 1779 } 1780 EXPORT_SYMBOL(sock_kfree_s); 1781 1782 void sock_kzfree_s(struct sock *sk, void *mem, int size) 1783 { 1784 __sock_kfree_s(sk, mem, size, true); 1785 } 1786 EXPORT_SYMBOL(sock_kzfree_s); 1787 1788 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1789 I think, these locks should be removed for datagram sockets. 1790 */ 1791 static long sock_wait_for_wmem(struct sock *sk, long timeo) 1792 { 1793 DEFINE_WAIT(wait); 1794 1795 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1796 for (;;) { 1797 if (!timeo) 1798 break; 1799 if (signal_pending(current)) 1800 break; 1801 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1802 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1803 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1804 break; 1805 if (sk->sk_shutdown & SEND_SHUTDOWN) 1806 break; 1807 if (sk->sk_err) 1808 break; 1809 timeo = schedule_timeout(timeo); 1810 } 1811 finish_wait(sk_sleep(sk), &wait); 1812 return timeo; 1813 } 1814 1815 1816 /* 1817 * Generic send/receive buffer handlers 1818 */ 1819 1820 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1821 unsigned long data_len, int noblock, 1822 int *errcode, int max_page_order) 1823 { 1824 struct sk_buff *skb; 1825 long timeo; 1826 int err; 1827 1828 timeo = sock_sndtimeo(sk, noblock); 1829 for (;;) { 1830 err = sock_error(sk); 1831 if (err != 0) 1832 goto failure; 1833 1834 err = -EPIPE; 1835 if (sk->sk_shutdown & SEND_SHUTDOWN) 1836 goto failure; 1837 1838 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) 1839 break; 1840 1841 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1842 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1843 err = -EAGAIN; 1844 if (!timeo) 1845 goto failure; 1846 if (signal_pending(current)) 1847 goto interrupted; 1848 timeo = sock_wait_for_wmem(sk, timeo); 1849 } 1850 skb = alloc_skb_with_frags(header_len, data_len, max_page_order, 1851 errcode, sk->sk_allocation); 1852 if (skb) 1853 skb_set_owner_w(skb, sk); 1854 return skb; 1855 1856 interrupted: 1857 err = sock_intr_errno(timeo); 1858 failure: 1859 *errcode = err; 1860 return NULL; 1861 } 1862 EXPORT_SYMBOL(sock_alloc_send_pskb); 1863 1864 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1865 int noblock, int *errcode) 1866 { 1867 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); 1868 } 1869 EXPORT_SYMBOL(sock_alloc_send_skb); 1870 1871 /* On 32bit arches, an skb frag is limited to 2^15 */ 1872 #define SKB_FRAG_PAGE_ORDER get_order(32768) 1873 1874 /** 1875 * skb_page_frag_refill - check that a page_frag contains enough room 1876 * @sz: minimum size of the fragment we want to get 1877 * @pfrag: pointer to page_frag 1878 * @gfp: priority for memory allocation 1879 * 1880 * Note: While this allocator tries to use high order pages, there is 1881 * no guarantee that allocations succeed. Therefore, @sz MUST be 1882 * less or equal than PAGE_SIZE. 1883 */ 1884 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) 1885 { 1886 if (pfrag->page) { 1887 if (atomic_read(&pfrag->page->_count) == 1) { 1888 pfrag->offset = 0; 1889 return true; 1890 } 1891 if (pfrag->offset + sz <= pfrag->size) 1892 return true; 1893 put_page(pfrag->page); 1894 } 1895 1896 pfrag->offset = 0; 1897 if (SKB_FRAG_PAGE_ORDER) { 1898 pfrag->page = alloc_pages(gfp | __GFP_COMP | 1899 __GFP_NOWARN | __GFP_NORETRY, 1900 SKB_FRAG_PAGE_ORDER); 1901 if (likely(pfrag->page)) { 1902 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; 1903 return true; 1904 } 1905 } 1906 pfrag->page = alloc_page(gfp); 1907 if (likely(pfrag->page)) { 1908 pfrag->size = PAGE_SIZE; 1909 return true; 1910 } 1911 return false; 1912 } 1913 EXPORT_SYMBOL(skb_page_frag_refill); 1914 1915 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 1916 { 1917 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) 1918 return true; 1919 1920 sk_enter_memory_pressure(sk); 1921 sk_stream_moderate_sndbuf(sk); 1922 return false; 1923 } 1924 EXPORT_SYMBOL(sk_page_frag_refill); 1925 1926 static void __lock_sock(struct sock *sk) 1927 __releases(&sk->sk_lock.slock) 1928 __acquires(&sk->sk_lock.slock) 1929 { 1930 DEFINE_WAIT(wait); 1931 1932 for (;;) { 1933 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1934 TASK_UNINTERRUPTIBLE); 1935 spin_unlock_bh(&sk->sk_lock.slock); 1936 schedule(); 1937 spin_lock_bh(&sk->sk_lock.slock); 1938 if (!sock_owned_by_user(sk)) 1939 break; 1940 } 1941 finish_wait(&sk->sk_lock.wq, &wait); 1942 } 1943 1944 static void __release_sock(struct sock *sk) 1945 __releases(&sk->sk_lock.slock) 1946 __acquires(&sk->sk_lock.slock) 1947 { 1948 struct sk_buff *skb = sk->sk_backlog.head; 1949 1950 do { 1951 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1952 bh_unlock_sock(sk); 1953 1954 do { 1955 struct sk_buff *next = skb->next; 1956 1957 prefetch(next); 1958 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1959 skb->next = NULL; 1960 sk_backlog_rcv(sk, skb); 1961 1962 /* 1963 * We are in process context here with softirqs 1964 * disabled, use cond_resched_softirq() to preempt. 1965 * This is safe to do because we've taken the backlog 1966 * queue private: 1967 */ 1968 cond_resched_softirq(); 1969 1970 skb = next; 1971 } while (skb != NULL); 1972 1973 bh_lock_sock(sk); 1974 } while ((skb = sk->sk_backlog.head) != NULL); 1975 1976 /* 1977 * Doing the zeroing here guarantee we can not loop forever 1978 * while a wild producer attempts to flood us. 1979 */ 1980 sk->sk_backlog.len = 0; 1981 } 1982 1983 /** 1984 * sk_wait_data - wait for data to arrive at sk_receive_queue 1985 * @sk: sock to wait on 1986 * @timeo: for how long 1987 * 1988 * Now socket state including sk->sk_err is changed only under lock, 1989 * hence we may omit checks after joining wait queue. 1990 * We check receive queue before schedule() only as optimization; 1991 * it is very likely that release_sock() added new data. 1992 */ 1993 int sk_wait_data(struct sock *sk, long *timeo) 1994 { 1995 int rc; 1996 DEFINE_WAIT(wait); 1997 1998 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1999 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2000 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 2001 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2002 finish_wait(sk_sleep(sk), &wait); 2003 return rc; 2004 } 2005 EXPORT_SYMBOL(sk_wait_data); 2006 2007 /** 2008 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 2009 * @sk: socket 2010 * @size: memory size to allocate 2011 * @kind: allocation type 2012 * 2013 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 2014 * rmem allocation. This function assumes that protocols which have 2015 * memory_pressure use sk_wmem_queued as write buffer accounting. 2016 */ 2017 int __sk_mem_schedule(struct sock *sk, int size, int kind) 2018 { 2019 struct proto *prot = sk->sk_prot; 2020 int amt = sk_mem_pages(size); 2021 long allocated; 2022 int parent_status = UNDER_LIMIT; 2023 2024 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 2025 2026 allocated = sk_memory_allocated_add(sk, amt, &parent_status); 2027 2028 /* Under limit. */ 2029 if (parent_status == UNDER_LIMIT && 2030 allocated <= sk_prot_mem_limits(sk, 0)) { 2031 sk_leave_memory_pressure(sk); 2032 return 1; 2033 } 2034 2035 /* Under pressure. (we or our parents) */ 2036 if ((parent_status > SOFT_LIMIT) || 2037 allocated > sk_prot_mem_limits(sk, 1)) 2038 sk_enter_memory_pressure(sk); 2039 2040 /* Over hard limit (we or our parents) */ 2041 if ((parent_status == OVER_LIMIT) || 2042 (allocated > sk_prot_mem_limits(sk, 2))) 2043 goto suppress_allocation; 2044 2045 /* guarantee minimum buffer size under pressure */ 2046 if (kind == SK_MEM_RECV) { 2047 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 2048 return 1; 2049 2050 } else { /* SK_MEM_SEND */ 2051 if (sk->sk_type == SOCK_STREAM) { 2052 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 2053 return 1; 2054 } else if (atomic_read(&sk->sk_wmem_alloc) < 2055 prot->sysctl_wmem[0]) 2056 return 1; 2057 } 2058 2059 if (sk_has_memory_pressure(sk)) { 2060 int alloc; 2061 2062 if (!sk_under_memory_pressure(sk)) 2063 return 1; 2064 alloc = sk_sockets_allocated_read_positive(sk); 2065 if (sk_prot_mem_limits(sk, 2) > alloc * 2066 sk_mem_pages(sk->sk_wmem_queued + 2067 atomic_read(&sk->sk_rmem_alloc) + 2068 sk->sk_forward_alloc)) 2069 return 1; 2070 } 2071 2072 suppress_allocation: 2073 2074 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 2075 sk_stream_moderate_sndbuf(sk); 2076 2077 /* Fail only if socket is _under_ its sndbuf. 2078 * In this case we cannot block, so that we have to fail. 2079 */ 2080 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 2081 return 1; 2082 } 2083 2084 trace_sock_exceed_buf_limit(sk, prot, allocated); 2085 2086 /* Alas. Undo changes. */ 2087 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 2088 2089 sk_memory_allocated_sub(sk, amt); 2090 2091 return 0; 2092 } 2093 EXPORT_SYMBOL(__sk_mem_schedule); 2094 2095 /** 2096 * __sk_reclaim - reclaim memory_allocated 2097 * @sk: socket 2098 */ 2099 void __sk_mem_reclaim(struct sock *sk) 2100 { 2101 sk_memory_allocated_sub(sk, 2102 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); 2103 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 2104 2105 if (sk_under_memory_pressure(sk) && 2106 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 2107 sk_leave_memory_pressure(sk); 2108 } 2109 EXPORT_SYMBOL(__sk_mem_reclaim); 2110 2111 2112 /* 2113 * Set of default routines for initialising struct proto_ops when 2114 * the protocol does not support a particular function. In certain 2115 * cases where it makes no sense for a protocol to have a "do nothing" 2116 * function, some default processing is provided. 2117 */ 2118 2119 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 2120 { 2121 return -EOPNOTSUPP; 2122 } 2123 EXPORT_SYMBOL(sock_no_bind); 2124 2125 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 2126 int len, int flags) 2127 { 2128 return -EOPNOTSUPP; 2129 } 2130 EXPORT_SYMBOL(sock_no_connect); 2131 2132 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 2133 { 2134 return -EOPNOTSUPP; 2135 } 2136 EXPORT_SYMBOL(sock_no_socketpair); 2137 2138 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 2139 { 2140 return -EOPNOTSUPP; 2141 } 2142 EXPORT_SYMBOL(sock_no_accept); 2143 2144 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 2145 int *len, int peer) 2146 { 2147 return -EOPNOTSUPP; 2148 } 2149 EXPORT_SYMBOL(sock_no_getname); 2150 2151 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 2152 { 2153 return 0; 2154 } 2155 EXPORT_SYMBOL(sock_no_poll); 2156 2157 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2158 { 2159 return -EOPNOTSUPP; 2160 } 2161 EXPORT_SYMBOL(sock_no_ioctl); 2162 2163 int sock_no_listen(struct socket *sock, int backlog) 2164 { 2165 return -EOPNOTSUPP; 2166 } 2167 EXPORT_SYMBOL(sock_no_listen); 2168 2169 int sock_no_shutdown(struct socket *sock, int how) 2170 { 2171 return -EOPNOTSUPP; 2172 } 2173 EXPORT_SYMBOL(sock_no_shutdown); 2174 2175 int sock_no_setsockopt(struct socket *sock, int level, int optname, 2176 char __user *optval, unsigned int optlen) 2177 { 2178 return -EOPNOTSUPP; 2179 } 2180 EXPORT_SYMBOL(sock_no_setsockopt); 2181 2182 int sock_no_getsockopt(struct socket *sock, int level, int optname, 2183 char __user *optval, int __user *optlen) 2184 { 2185 return -EOPNOTSUPP; 2186 } 2187 EXPORT_SYMBOL(sock_no_getsockopt); 2188 2189 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 2190 size_t len) 2191 { 2192 return -EOPNOTSUPP; 2193 } 2194 EXPORT_SYMBOL(sock_no_sendmsg); 2195 2196 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 2197 size_t len, int flags) 2198 { 2199 return -EOPNOTSUPP; 2200 } 2201 EXPORT_SYMBOL(sock_no_recvmsg); 2202 2203 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2204 { 2205 /* Mirror missing mmap method error code */ 2206 return -ENODEV; 2207 } 2208 EXPORT_SYMBOL(sock_no_mmap); 2209 2210 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 2211 { 2212 ssize_t res; 2213 struct msghdr msg = {.msg_flags = flags}; 2214 struct kvec iov; 2215 char *kaddr = kmap(page); 2216 iov.iov_base = kaddr + offset; 2217 iov.iov_len = size; 2218 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 2219 kunmap(page); 2220 return res; 2221 } 2222 EXPORT_SYMBOL(sock_no_sendpage); 2223 2224 /* 2225 * Default Socket Callbacks 2226 */ 2227 2228 static void sock_def_wakeup(struct sock *sk) 2229 { 2230 struct socket_wq *wq; 2231 2232 rcu_read_lock(); 2233 wq = rcu_dereference(sk->sk_wq); 2234 if (wq_has_sleeper(wq)) 2235 wake_up_interruptible_all(&wq->wait); 2236 rcu_read_unlock(); 2237 } 2238 2239 static void sock_def_error_report(struct sock *sk) 2240 { 2241 struct socket_wq *wq; 2242 2243 rcu_read_lock(); 2244 wq = rcu_dereference(sk->sk_wq); 2245 if (wq_has_sleeper(wq)) 2246 wake_up_interruptible_poll(&wq->wait, POLLERR); 2247 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2248 rcu_read_unlock(); 2249 } 2250 2251 static void sock_def_readable(struct sock *sk) 2252 { 2253 struct socket_wq *wq; 2254 2255 rcu_read_lock(); 2256 wq = rcu_dereference(sk->sk_wq); 2257 if (wq_has_sleeper(wq)) 2258 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2259 POLLRDNORM | POLLRDBAND); 2260 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2261 rcu_read_unlock(); 2262 } 2263 2264 static void sock_def_write_space(struct sock *sk) 2265 { 2266 struct socket_wq *wq; 2267 2268 rcu_read_lock(); 2269 2270 /* Do not wake up a writer until he can make "significant" 2271 * progress. --DaveM 2272 */ 2273 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2274 wq = rcu_dereference(sk->sk_wq); 2275 if (wq_has_sleeper(wq)) 2276 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2277 POLLWRNORM | POLLWRBAND); 2278 2279 /* Should agree with poll, otherwise some programs break */ 2280 if (sock_writeable(sk)) 2281 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2282 } 2283 2284 rcu_read_unlock(); 2285 } 2286 2287 static void sock_def_destruct(struct sock *sk) 2288 { 2289 kfree(sk->sk_protinfo); 2290 } 2291 2292 void sk_send_sigurg(struct sock *sk) 2293 { 2294 if (sk->sk_socket && sk->sk_socket->file) 2295 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2296 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2297 } 2298 EXPORT_SYMBOL(sk_send_sigurg); 2299 2300 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2301 unsigned long expires) 2302 { 2303 if (!mod_timer(timer, expires)) 2304 sock_hold(sk); 2305 } 2306 EXPORT_SYMBOL(sk_reset_timer); 2307 2308 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2309 { 2310 if (del_timer(timer)) 2311 __sock_put(sk); 2312 } 2313 EXPORT_SYMBOL(sk_stop_timer); 2314 2315 void sock_init_data(struct socket *sock, struct sock *sk) 2316 { 2317 skb_queue_head_init(&sk->sk_receive_queue); 2318 skb_queue_head_init(&sk->sk_write_queue); 2319 skb_queue_head_init(&sk->sk_error_queue); 2320 2321 sk->sk_send_head = NULL; 2322 2323 init_timer(&sk->sk_timer); 2324 2325 sk->sk_allocation = GFP_KERNEL; 2326 sk->sk_rcvbuf = sysctl_rmem_default; 2327 sk->sk_sndbuf = sysctl_wmem_default; 2328 sk->sk_state = TCP_CLOSE; 2329 sk_set_socket(sk, sock); 2330 2331 sock_set_flag(sk, SOCK_ZAPPED); 2332 2333 if (sock) { 2334 sk->sk_type = sock->type; 2335 sk->sk_wq = sock->wq; 2336 sock->sk = sk; 2337 } else 2338 sk->sk_wq = NULL; 2339 2340 spin_lock_init(&sk->sk_dst_lock); 2341 rwlock_init(&sk->sk_callback_lock); 2342 lockdep_set_class_and_name(&sk->sk_callback_lock, 2343 af_callback_keys + sk->sk_family, 2344 af_family_clock_key_strings[sk->sk_family]); 2345 2346 sk->sk_state_change = sock_def_wakeup; 2347 sk->sk_data_ready = sock_def_readable; 2348 sk->sk_write_space = sock_def_write_space; 2349 sk->sk_error_report = sock_def_error_report; 2350 sk->sk_destruct = sock_def_destruct; 2351 2352 sk->sk_frag.page = NULL; 2353 sk->sk_frag.offset = 0; 2354 sk->sk_peek_off = -1; 2355 2356 sk->sk_peer_pid = NULL; 2357 sk->sk_peer_cred = NULL; 2358 sk->sk_write_pending = 0; 2359 sk->sk_rcvlowat = 1; 2360 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2361 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2362 2363 sk->sk_stamp = ktime_set(-1L, 0); 2364 2365 #ifdef CONFIG_NET_RX_BUSY_POLL 2366 sk->sk_napi_id = 0; 2367 sk->sk_ll_usec = sysctl_net_busy_read; 2368 #endif 2369 2370 sk->sk_max_pacing_rate = ~0U; 2371 sk->sk_pacing_rate = ~0U; 2372 /* 2373 * Before updating sk_refcnt, we must commit prior changes to memory 2374 * (Documentation/RCU/rculist_nulls.txt for details) 2375 */ 2376 smp_wmb(); 2377 atomic_set(&sk->sk_refcnt, 1); 2378 atomic_set(&sk->sk_drops, 0); 2379 } 2380 EXPORT_SYMBOL(sock_init_data); 2381 2382 void lock_sock_nested(struct sock *sk, int subclass) 2383 { 2384 might_sleep(); 2385 spin_lock_bh(&sk->sk_lock.slock); 2386 if (sk->sk_lock.owned) 2387 __lock_sock(sk); 2388 sk->sk_lock.owned = 1; 2389 spin_unlock(&sk->sk_lock.slock); 2390 /* 2391 * The sk_lock has mutex_lock() semantics here: 2392 */ 2393 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2394 local_bh_enable(); 2395 } 2396 EXPORT_SYMBOL(lock_sock_nested); 2397 2398 void release_sock(struct sock *sk) 2399 { 2400 /* 2401 * The sk_lock has mutex_unlock() semantics: 2402 */ 2403 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2404 2405 spin_lock_bh(&sk->sk_lock.slock); 2406 if (sk->sk_backlog.tail) 2407 __release_sock(sk); 2408 2409 /* Warning : release_cb() might need to release sk ownership, 2410 * ie call sock_release_ownership(sk) before us. 2411 */ 2412 if (sk->sk_prot->release_cb) 2413 sk->sk_prot->release_cb(sk); 2414 2415 sock_release_ownership(sk); 2416 if (waitqueue_active(&sk->sk_lock.wq)) 2417 wake_up(&sk->sk_lock.wq); 2418 spin_unlock_bh(&sk->sk_lock.slock); 2419 } 2420 EXPORT_SYMBOL(release_sock); 2421 2422 /** 2423 * lock_sock_fast - fast version of lock_sock 2424 * @sk: socket 2425 * 2426 * This version should be used for very small section, where process wont block 2427 * return false if fast path is taken 2428 * sk_lock.slock locked, owned = 0, BH disabled 2429 * return true if slow path is taken 2430 * sk_lock.slock unlocked, owned = 1, BH enabled 2431 */ 2432 bool lock_sock_fast(struct sock *sk) 2433 { 2434 might_sleep(); 2435 spin_lock_bh(&sk->sk_lock.slock); 2436 2437 if (!sk->sk_lock.owned) 2438 /* 2439 * Note : We must disable BH 2440 */ 2441 return false; 2442 2443 __lock_sock(sk); 2444 sk->sk_lock.owned = 1; 2445 spin_unlock(&sk->sk_lock.slock); 2446 /* 2447 * The sk_lock has mutex_lock() semantics here: 2448 */ 2449 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2450 local_bh_enable(); 2451 return true; 2452 } 2453 EXPORT_SYMBOL(lock_sock_fast); 2454 2455 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2456 { 2457 struct timeval tv; 2458 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2459 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2460 tv = ktime_to_timeval(sk->sk_stamp); 2461 if (tv.tv_sec == -1) 2462 return -ENOENT; 2463 if (tv.tv_sec == 0) { 2464 sk->sk_stamp = ktime_get_real(); 2465 tv = ktime_to_timeval(sk->sk_stamp); 2466 } 2467 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2468 } 2469 EXPORT_SYMBOL(sock_get_timestamp); 2470 2471 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2472 { 2473 struct timespec ts; 2474 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2475 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2476 ts = ktime_to_timespec(sk->sk_stamp); 2477 if (ts.tv_sec == -1) 2478 return -ENOENT; 2479 if (ts.tv_sec == 0) { 2480 sk->sk_stamp = ktime_get_real(); 2481 ts = ktime_to_timespec(sk->sk_stamp); 2482 } 2483 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2484 } 2485 EXPORT_SYMBOL(sock_get_timestampns); 2486 2487 void sock_enable_timestamp(struct sock *sk, int flag) 2488 { 2489 if (!sock_flag(sk, flag)) { 2490 unsigned long previous_flags = sk->sk_flags; 2491 2492 sock_set_flag(sk, flag); 2493 /* 2494 * we just set one of the two flags which require net 2495 * time stamping, but time stamping might have been on 2496 * already because of the other one 2497 */ 2498 if (!(previous_flags & SK_FLAGS_TIMESTAMP)) 2499 net_enable_timestamp(); 2500 } 2501 } 2502 2503 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 2504 int level, int type) 2505 { 2506 struct sock_exterr_skb *serr; 2507 struct sk_buff *skb; 2508 int copied, err; 2509 2510 err = -EAGAIN; 2511 skb = sock_dequeue_err_skb(sk); 2512 if (skb == NULL) 2513 goto out; 2514 2515 copied = skb->len; 2516 if (copied > len) { 2517 msg->msg_flags |= MSG_TRUNC; 2518 copied = len; 2519 } 2520 err = skb_copy_datagram_msg(skb, 0, msg, copied); 2521 if (err) 2522 goto out_free_skb; 2523 2524 sock_recv_timestamp(msg, sk, skb); 2525 2526 serr = SKB_EXT_ERR(skb); 2527 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); 2528 2529 msg->msg_flags |= MSG_ERRQUEUE; 2530 err = copied; 2531 2532 out_free_skb: 2533 kfree_skb(skb); 2534 out: 2535 return err; 2536 } 2537 EXPORT_SYMBOL(sock_recv_errqueue); 2538 2539 /* 2540 * Get a socket option on an socket. 2541 * 2542 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2543 * asynchronous errors should be reported by getsockopt. We assume 2544 * this means if you specify SO_ERROR (otherwise whats the point of it). 2545 */ 2546 int sock_common_getsockopt(struct socket *sock, int level, int optname, 2547 char __user *optval, int __user *optlen) 2548 { 2549 struct sock *sk = sock->sk; 2550 2551 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2552 } 2553 EXPORT_SYMBOL(sock_common_getsockopt); 2554 2555 #ifdef CONFIG_COMPAT 2556 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2557 char __user *optval, int __user *optlen) 2558 { 2559 struct sock *sk = sock->sk; 2560 2561 if (sk->sk_prot->compat_getsockopt != NULL) 2562 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2563 optval, optlen); 2564 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2565 } 2566 EXPORT_SYMBOL(compat_sock_common_getsockopt); 2567 #endif 2568 2569 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2570 struct msghdr *msg, size_t size, int flags) 2571 { 2572 struct sock *sk = sock->sk; 2573 int addr_len = 0; 2574 int err; 2575 2576 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2577 flags & ~MSG_DONTWAIT, &addr_len); 2578 if (err >= 0) 2579 msg->msg_namelen = addr_len; 2580 return err; 2581 } 2582 EXPORT_SYMBOL(sock_common_recvmsg); 2583 2584 /* 2585 * Set socket options on an inet socket. 2586 */ 2587 int sock_common_setsockopt(struct socket *sock, int level, int optname, 2588 char __user *optval, unsigned int optlen) 2589 { 2590 struct sock *sk = sock->sk; 2591 2592 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2593 } 2594 EXPORT_SYMBOL(sock_common_setsockopt); 2595 2596 #ifdef CONFIG_COMPAT 2597 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2598 char __user *optval, unsigned int optlen) 2599 { 2600 struct sock *sk = sock->sk; 2601 2602 if (sk->sk_prot->compat_setsockopt != NULL) 2603 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2604 optval, optlen); 2605 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2606 } 2607 EXPORT_SYMBOL(compat_sock_common_setsockopt); 2608 #endif 2609 2610 void sk_common_release(struct sock *sk) 2611 { 2612 if (sk->sk_prot->destroy) 2613 sk->sk_prot->destroy(sk); 2614 2615 /* 2616 * Observation: when sock_common_release is called, processes have 2617 * no access to socket. But net still has. 2618 * Step one, detach it from networking: 2619 * 2620 * A. Remove from hash tables. 2621 */ 2622 2623 sk->sk_prot->unhash(sk); 2624 2625 /* 2626 * In this point socket cannot receive new packets, but it is possible 2627 * that some packets are in flight because some CPU runs receiver and 2628 * did hash table lookup before we unhashed socket. They will achieve 2629 * receive queue and will be purged by socket destructor. 2630 * 2631 * Also we still have packets pending on receive queue and probably, 2632 * our own packets waiting in device queues. sock_destroy will drain 2633 * receive queue, but transmitted packets will delay socket destruction 2634 * until the last reference will be released. 2635 */ 2636 2637 sock_orphan(sk); 2638 2639 xfrm_sk_free_policy(sk); 2640 2641 sk_refcnt_debug_release(sk); 2642 2643 if (sk->sk_frag.page) { 2644 put_page(sk->sk_frag.page); 2645 sk->sk_frag.page = NULL; 2646 } 2647 2648 sock_put(sk); 2649 } 2650 EXPORT_SYMBOL(sk_common_release); 2651 2652 #ifdef CONFIG_PROC_FS 2653 #define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2654 struct prot_inuse { 2655 int val[PROTO_INUSE_NR]; 2656 }; 2657 2658 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2659 2660 #ifdef CONFIG_NET_NS 2661 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2662 { 2663 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2664 } 2665 EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2666 2667 int sock_prot_inuse_get(struct net *net, struct proto *prot) 2668 { 2669 int cpu, idx = prot->inuse_idx; 2670 int res = 0; 2671 2672 for_each_possible_cpu(cpu) 2673 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2674 2675 return res >= 0 ? res : 0; 2676 } 2677 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2678 2679 static int __net_init sock_inuse_init_net(struct net *net) 2680 { 2681 net->core.inuse = alloc_percpu(struct prot_inuse); 2682 return net->core.inuse ? 0 : -ENOMEM; 2683 } 2684 2685 static void __net_exit sock_inuse_exit_net(struct net *net) 2686 { 2687 free_percpu(net->core.inuse); 2688 } 2689 2690 static struct pernet_operations net_inuse_ops = { 2691 .init = sock_inuse_init_net, 2692 .exit = sock_inuse_exit_net, 2693 }; 2694 2695 static __init int net_inuse_init(void) 2696 { 2697 if (register_pernet_subsys(&net_inuse_ops)) 2698 panic("Cannot initialize net inuse counters"); 2699 2700 return 0; 2701 } 2702 2703 core_initcall(net_inuse_init); 2704 #else 2705 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2706 2707 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2708 { 2709 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2710 } 2711 EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2712 2713 int sock_prot_inuse_get(struct net *net, struct proto *prot) 2714 { 2715 int cpu, idx = prot->inuse_idx; 2716 int res = 0; 2717 2718 for_each_possible_cpu(cpu) 2719 res += per_cpu(prot_inuse, cpu).val[idx]; 2720 2721 return res >= 0 ? res : 0; 2722 } 2723 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2724 #endif 2725 2726 static void assign_proto_idx(struct proto *prot) 2727 { 2728 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2729 2730 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2731 pr_err("PROTO_INUSE_NR exhausted\n"); 2732 return; 2733 } 2734 2735 set_bit(prot->inuse_idx, proto_inuse_idx); 2736 } 2737 2738 static void release_proto_idx(struct proto *prot) 2739 { 2740 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2741 clear_bit(prot->inuse_idx, proto_inuse_idx); 2742 } 2743 #else 2744 static inline void assign_proto_idx(struct proto *prot) 2745 { 2746 } 2747 2748 static inline void release_proto_idx(struct proto *prot) 2749 { 2750 } 2751 #endif 2752 2753 int proto_register(struct proto *prot, int alloc_slab) 2754 { 2755 if (alloc_slab) { 2756 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2757 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2758 NULL); 2759 2760 if (prot->slab == NULL) { 2761 pr_crit("%s: Can't create sock SLAB cache!\n", 2762 prot->name); 2763 goto out; 2764 } 2765 2766 if (prot->rsk_prot != NULL) { 2767 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2768 if (prot->rsk_prot->slab_name == NULL) 2769 goto out_free_sock_slab; 2770 2771 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2772 prot->rsk_prot->obj_size, 0, 2773 SLAB_HWCACHE_ALIGN, NULL); 2774 2775 if (prot->rsk_prot->slab == NULL) { 2776 pr_crit("%s: Can't create request sock SLAB cache!\n", 2777 prot->name); 2778 goto out_free_request_sock_slab_name; 2779 } 2780 } 2781 2782 if (prot->twsk_prot != NULL) { 2783 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2784 2785 if (prot->twsk_prot->twsk_slab_name == NULL) 2786 goto out_free_request_sock_slab; 2787 2788 prot->twsk_prot->twsk_slab = 2789 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2790 prot->twsk_prot->twsk_obj_size, 2791 0, 2792 SLAB_HWCACHE_ALIGN | 2793 prot->slab_flags, 2794 NULL); 2795 if (prot->twsk_prot->twsk_slab == NULL) 2796 goto out_free_timewait_sock_slab_name; 2797 } 2798 } 2799 2800 mutex_lock(&proto_list_mutex); 2801 list_add(&prot->node, &proto_list); 2802 assign_proto_idx(prot); 2803 mutex_unlock(&proto_list_mutex); 2804 return 0; 2805 2806 out_free_timewait_sock_slab_name: 2807 kfree(prot->twsk_prot->twsk_slab_name); 2808 out_free_request_sock_slab: 2809 if (prot->rsk_prot && prot->rsk_prot->slab) { 2810 kmem_cache_destroy(prot->rsk_prot->slab); 2811 prot->rsk_prot->slab = NULL; 2812 } 2813 out_free_request_sock_slab_name: 2814 if (prot->rsk_prot) 2815 kfree(prot->rsk_prot->slab_name); 2816 out_free_sock_slab: 2817 kmem_cache_destroy(prot->slab); 2818 prot->slab = NULL; 2819 out: 2820 return -ENOBUFS; 2821 } 2822 EXPORT_SYMBOL(proto_register); 2823 2824 void proto_unregister(struct proto *prot) 2825 { 2826 mutex_lock(&proto_list_mutex); 2827 release_proto_idx(prot); 2828 list_del(&prot->node); 2829 mutex_unlock(&proto_list_mutex); 2830 2831 if (prot->slab != NULL) { 2832 kmem_cache_destroy(prot->slab); 2833 prot->slab = NULL; 2834 } 2835 2836 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2837 kmem_cache_destroy(prot->rsk_prot->slab); 2838 kfree(prot->rsk_prot->slab_name); 2839 prot->rsk_prot->slab = NULL; 2840 } 2841 2842 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2843 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2844 kfree(prot->twsk_prot->twsk_slab_name); 2845 prot->twsk_prot->twsk_slab = NULL; 2846 } 2847 } 2848 EXPORT_SYMBOL(proto_unregister); 2849 2850 #ifdef CONFIG_PROC_FS 2851 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2852 __acquires(proto_list_mutex) 2853 { 2854 mutex_lock(&proto_list_mutex); 2855 return seq_list_start_head(&proto_list, *pos); 2856 } 2857 2858 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2859 { 2860 return seq_list_next(v, &proto_list, pos); 2861 } 2862 2863 static void proto_seq_stop(struct seq_file *seq, void *v) 2864 __releases(proto_list_mutex) 2865 { 2866 mutex_unlock(&proto_list_mutex); 2867 } 2868 2869 static char proto_method_implemented(const void *method) 2870 { 2871 return method == NULL ? 'n' : 'y'; 2872 } 2873 static long sock_prot_memory_allocated(struct proto *proto) 2874 { 2875 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 2876 } 2877 2878 static char *sock_prot_memory_pressure(struct proto *proto) 2879 { 2880 return proto->memory_pressure != NULL ? 2881 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 2882 } 2883 2884 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2885 { 2886 2887 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2888 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2889 proto->name, 2890 proto->obj_size, 2891 sock_prot_inuse_get(seq_file_net(seq), proto), 2892 sock_prot_memory_allocated(proto), 2893 sock_prot_memory_pressure(proto), 2894 proto->max_header, 2895 proto->slab == NULL ? "no" : "yes", 2896 module_name(proto->owner), 2897 proto_method_implemented(proto->close), 2898 proto_method_implemented(proto->connect), 2899 proto_method_implemented(proto->disconnect), 2900 proto_method_implemented(proto->accept), 2901 proto_method_implemented(proto->ioctl), 2902 proto_method_implemented(proto->init), 2903 proto_method_implemented(proto->destroy), 2904 proto_method_implemented(proto->shutdown), 2905 proto_method_implemented(proto->setsockopt), 2906 proto_method_implemented(proto->getsockopt), 2907 proto_method_implemented(proto->sendmsg), 2908 proto_method_implemented(proto->recvmsg), 2909 proto_method_implemented(proto->sendpage), 2910 proto_method_implemented(proto->bind), 2911 proto_method_implemented(proto->backlog_rcv), 2912 proto_method_implemented(proto->hash), 2913 proto_method_implemented(proto->unhash), 2914 proto_method_implemented(proto->get_port), 2915 proto_method_implemented(proto->enter_memory_pressure)); 2916 } 2917 2918 static int proto_seq_show(struct seq_file *seq, void *v) 2919 { 2920 if (v == &proto_list) 2921 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2922 "protocol", 2923 "size", 2924 "sockets", 2925 "memory", 2926 "press", 2927 "maxhdr", 2928 "slab", 2929 "module", 2930 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2931 else 2932 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2933 return 0; 2934 } 2935 2936 static const struct seq_operations proto_seq_ops = { 2937 .start = proto_seq_start, 2938 .next = proto_seq_next, 2939 .stop = proto_seq_stop, 2940 .show = proto_seq_show, 2941 }; 2942 2943 static int proto_seq_open(struct inode *inode, struct file *file) 2944 { 2945 return seq_open_net(inode, file, &proto_seq_ops, 2946 sizeof(struct seq_net_private)); 2947 } 2948 2949 static const struct file_operations proto_seq_fops = { 2950 .owner = THIS_MODULE, 2951 .open = proto_seq_open, 2952 .read = seq_read, 2953 .llseek = seq_lseek, 2954 .release = seq_release_net, 2955 }; 2956 2957 static __net_init int proto_init_net(struct net *net) 2958 { 2959 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops)) 2960 return -ENOMEM; 2961 2962 return 0; 2963 } 2964 2965 static __net_exit void proto_exit_net(struct net *net) 2966 { 2967 remove_proc_entry("protocols", net->proc_net); 2968 } 2969 2970 2971 static __net_initdata struct pernet_operations proto_net_ops = { 2972 .init = proto_init_net, 2973 .exit = proto_exit_net, 2974 }; 2975 2976 static int __init proto_init(void) 2977 { 2978 return register_pernet_subsys(&proto_net_ops); 2979 } 2980 2981 subsys_initcall(proto_init); 2982 2983 #endif /* PROC_FS */ 2984