1 /* 2 * NET An implementation of the SOCKET network access protocol. 3 * 4 * Version: @(#)socket.c 1.1.93 18/02/95 5 * 6 * Authors: Orest Zborowski, <obz@Kodak.COM> 7 * Ross Biro 8 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * 10 * Fixes: 11 * Anonymous : NOTSOCK/BADF cleanup. Error fix in 12 * shutdown() 13 * Alan Cox : verify_area() fixes 14 * Alan Cox : Removed DDI 15 * Jonathan Kamens : SOCK_DGRAM reconnect bug 16 * Alan Cox : Moved a load of checks to the very 17 * top level. 18 * Alan Cox : Move address structures to/from user 19 * mode above the protocol layers. 20 * Rob Janssen : Allow 0 length sends. 21 * Alan Cox : Asynchronous I/O support (cribbed from the 22 * tty drivers). 23 * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) 24 * Jeff Uphoff : Made max number of sockets command-line 25 * configurable. 26 * Matti Aarnio : Made the number of sockets dynamic, 27 * to be allocated when needed, and mr. 28 * Uphoff's max is used as max to be 29 * allowed to allocate. 30 * Linus : Argh. removed all the socket allocation 31 * altogether: it's in the inode now. 32 * Alan Cox : Made sock_alloc()/sock_release() public 33 * for NetROM and future kernel nfsd type 34 * stuff. 35 * Alan Cox : sendmsg/recvmsg basics. 36 * Tom Dyas : Export net symbols. 37 * Marcin Dalecki : Fixed problems with CONFIG_NET="n". 38 * Alan Cox : Added thread locking to sys_* calls 39 * for sockets. May have errors at the 40 * moment. 41 * Kevin Buhr : Fixed the dumb errors in the above. 42 * Andi Kleen : Some small cleanups, optimizations, 43 * and fixed a copy_from_user() bug. 44 * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) 45 * Tigran Aivazian : Made listen(2) backlog sanity checks 46 * protocol-independent 47 * 48 * 49 * This program is free software; you can redistribute it and/or 50 * modify it under the terms of the GNU General Public License 51 * as published by the Free Software Foundation; either version 52 * 2 of the License, or (at your option) any later version. 53 * 54 * 55 * This module is effectively the top level interface to the BSD socket 56 * paradigm. 57 * 58 * Based upon Swansea University Computer Society NET3.039 59 */ 60 61 #include <linux/mm.h> 62 #include <linux/socket.h> 63 #include <linux/file.h> 64 #include <linux/net.h> 65 #include <linux/interrupt.h> 66 #include <linux/thread_info.h> 67 #include <linux/rcupdate.h> 68 #include <linux/netdevice.h> 69 #include <linux/proc_fs.h> 70 #include <linux/seq_file.h> 71 #include <linux/mutex.h> 72 #include <linux/if_bridge.h> 73 #include <linux/if_frad.h> 74 #include <linux/if_vlan.h> 75 #include <linux/init.h> 76 #include <linux/poll.h> 77 #include <linux/cache.h> 78 #include <linux/module.h> 79 #include <linux/highmem.h> 80 #include <linux/mount.h> 81 #include <linux/security.h> 82 #include <linux/syscalls.h> 83 #include <linux/compat.h> 84 #include <linux/kmod.h> 85 #include <linux/audit.h> 86 #include <linux/wireless.h> 87 #include <linux/nsproxy.h> 88 #include <linux/magic.h> 89 #include <linux/slab.h> 90 #include <linux/xattr.h> 91 92 #include <asm/uaccess.h> 93 #include <asm/unistd.h> 94 95 #include <net/compat.h> 96 #include <net/wext.h> 97 #include <net/cls_cgroup.h> 98 99 #include <net/sock.h> 100 #include <linux/netfilter.h> 101 102 #include <linux/if_tun.h> 103 #include <linux/ipv6_route.h> 104 #include <linux/route.h> 105 #include <linux/sockios.h> 106 #include <linux/atalk.h> 107 #include <net/busy_poll.h> 108 109 #ifdef CONFIG_NET_RX_BUSY_POLL 110 unsigned int sysctl_net_busy_read __read_mostly; 111 unsigned int sysctl_net_busy_poll __read_mostly; 112 #endif 113 114 static int sock_no_open(struct inode *irrelevant, struct file *dontcare); 115 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, 116 unsigned long nr_segs, loff_t pos); 117 static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, 118 unsigned long nr_segs, loff_t pos); 119 static int sock_mmap(struct file *file, struct vm_area_struct *vma); 120 121 static int sock_close(struct inode *inode, struct file *file); 122 static unsigned int sock_poll(struct file *file, 123 struct poll_table_struct *wait); 124 static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 125 #ifdef CONFIG_COMPAT 126 static long compat_sock_ioctl(struct file *file, 127 unsigned int cmd, unsigned long arg); 128 #endif 129 static int sock_fasync(int fd, struct file *filp, int on); 130 static ssize_t sock_sendpage(struct file *file, struct page *page, 131 int offset, size_t size, loff_t *ppos, int more); 132 static ssize_t sock_splice_read(struct file *file, loff_t *ppos, 133 struct pipe_inode_info *pipe, size_t len, 134 unsigned int flags); 135 136 /* 137 * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear 138 * in the operation structures but are done directly via the socketcall() multiplexor. 139 */ 140 141 static const struct file_operations socket_file_ops = { 142 .owner = THIS_MODULE, 143 .llseek = no_llseek, 144 .aio_read = sock_aio_read, 145 .aio_write = sock_aio_write, 146 .poll = sock_poll, 147 .unlocked_ioctl = sock_ioctl, 148 #ifdef CONFIG_COMPAT 149 .compat_ioctl = compat_sock_ioctl, 150 #endif 151 .mmap = sock_mmap, 152 .open = sock_no_open, /* special open code to disallow open via /proc */ 153 .release = sock_close, 154 .fasync = sock_fasync, 155 .sendpage = sock_sendpage, 156 .splice_write = generic_splice_sendpage, 157 .splice_read = sock_splice_read, 158 }; 159 160 /* 161 * The protocol list. Each protocol is registered in here. 162 */ 163 164 static DEFINE_SPINLOCK(net_family_lock); 165 static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; 166 167 /* 168 * Statistics counters of the socket lists 169 */ 170 171 static DEFINE_PER_CPU(int, sockets_in_use); 172 173 /* 174 * Support routines. 175 * Move socket addresses back and forth across the kernel/user 176 * divide and look after the messy bits. 177 */ 178 179 /** 180 * move_addr_to_kernel - copy a socket address into kernel space 181 * @uaddr: Address in user space 182 * @kaddr: Address in kernel space 183 * @ulen: Length in user space 184 * 185 * The address is copied into kernel space. If the provided address is 186 * too long an error code of -EINVAL is returned. If the copy gives 187 * invalid addresses -EFAULT is returned. On a success 0 is returned. 188 */ 189 190 int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) 191 { 192 if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) 193 return -EINVAL; 194 if (ulen == 0) 195 return 0; 196 if (copy_from_user(kaddr, uaddr, ulen)) 197 return -EFAULT; 198 return audit_sockaddr(ulen, kaddr); 199 } 200 201 /** 202 * move_addr_to_user - copy an address to user space 203 * @kaddr: kernel space address 204 * @klen: length of address in kernel 205 * @uaddr: user space address 206 * @ulen: pointer to user length field 207 * 208 * The value pointed to by ulen on entry is the buffer length available. 209 * This is overwritten with the buffer space used. -EINVAL is returned 210 * if an overlong buffer is specified or a negative buffer size. -EFAULT 211 * is returned if either the buffer or the length field are not 212 * accessible. 213 * After copying the data up to the limit the user specifies, the true 214 * length of the data is written over the length limit the user 215 * specified. Zero is returned for a success. 216 */ 217 218 static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, 219 void __user *uaddr, int __user *ulen) 220 { 221 int err; 222 int len; 223 224 BUG_ON(klen > sizeof(struct sockaddr_storage)); 225 err = get_user(len, ulen); 226 if (err) 227 return err; 228 if (len > klen) 229 len = klen; 230 if (len < 0) 231 return -EINVAL; 232 if (len) { 233 if (audit_sockaddr(klen, kaddr)) 234 return -ENOMEM; 235 if (copy_to_user(uaddr, kaddr, len)) 236 return -EFAULT; 237 } 238 /* 239 * "fromlen shall refer to the value before truncation.." 240 * 1003.1g 241 */ 242 return __put_user(klen, ulen); 243 } 244 245 static struct kmem_cache *sock_inode_cachep __read_mostly; 246 247 static struct inode *sock_alloc_inode(struct super_block *sb) 248 { 249 struct socket_alloc *ei; 250 struct socket_wq *wq; 251 252 ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); 253 if (!ei) 254 return NULL; 255 wq = kmalloc(sizeof(*wq), GFP_KERNEL); 256 if (!wq) { 257 kmem_cache_free(sock_inode_cachep, ei); 258 return NULL; 259 } 260 init_waitqueue_head(&wq->wait); 261 wq->fasync_list = NULL; 262 RCU_INIT_POINTER(ei->socket.wq, wq); 263 264 ei->socket.state = SS_UNCONNECTED; 265 ei->socket.flags = 0; 266 ei->socket.ops = NULL; 267 ei->socket.sk = NULL; 268 ei->socket.file = NULL; 269 270 return &ei->vfs_inode; 271 } 272 273 static void sock_destroy_inode(struct inode *inode) 274 { 275 struct socket_alloc *ei; 276 struct socket_wq *wq; 277 278 ei = container_of(inode, struct socket_alloc, vfs_inode); 279 wq = rcu_dereference_protected(ei->socket.wq, 1); 280 kfree_rcu(wq, rcu); 281 kmem_cache_free(sock_inode_cachep, ei); 282 } 283 284 static void init_once(void *foo) 285 { 286 struct socket_alloc *ei = (struct socket_alloc *)foo; 287 288 inode_init_once(&ei->vfs_inode); 289 } 290 291 static int init_inodecache(void) 292 { 293 sock_inode_cachep = kmem_cache_create("sock_inode_cache", 294 sizeof(struct socket_alloc), 295 0, 296 (SLAB_HWCACHE_ALIGN | 297 SLAB_RECLAIM_ACCOUNT | 298 SLAB_MEM_SPREAD), 299 init_once); 300 if (sock_inode_cachep == NULL) 301 return -ENOMEM; 302 return 0; 303 } 304 305 static const struct super_operations sockfs_ops = { 306 .alloc_inode = sock_alloc_inode, 307 .destroy_inode = sock_destroy_inode, 308 .statfs = simple_statfs, 309 }; 310 311 /* 312 * sockfs_dname() is called from d_path(). 313 */ 314 static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) 315 { 316 return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", 317 dentry->d_inode->i_ino); 318 } 319 320 static const struct dentry_operations sockfs_dentry_operations = { 321 .d_dname = sockfs_dname, 322 }; 323 324 static struct dentry *sockfs_mount(struct file_system_type *fs_type, 325 int flags, const char *dev_name, void *data) 326 { 327 return mount_pseudo(fs_type, "socket:", &sockfs_ops, 328 &sockfs_dentry_operations, SOCKFS_MAGIC); 329 } 330 331 static struct vfsmount *sock_mnt __read_mostly; 332 333 static struct file_system_type sock_fs_type = { 334 .name = "sockfs", 335 .mount = sockfs_mount, 336 .kill_sb = kill_anon_super, 337 }; 338 339 /* 340 * Obtains the first available file descriptor and sets it up for use. 341 * 342 * These functions create file structures and maps them to fd space 343 * of the current process. On success it returns file descriptor 344 * and file struct implicitly stored in sock->file. 345 * Note that another thread may close file descriptor before we return 346 * from this function. We use the fact that now we do not refer 347 * to socket after mapping. If one day we will need it, this 348 * function will increment ref. count on file by 1. 349 * 350 * In any case returned fd MAY BE not valid! 351 * This race condition is unavoidable 352 * with shared fd spaces, we cannot solve it inside kernel, 353 * but we take care of internal coherence yet. 354 */ 355 356 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) 357 { 358 struct qstr name = { .name = "" }; 359 struct path path; 360 struct file *file; 361 362 if (dname) { 363 name.name = dname; 364 name.len = strlen(name.name); 365 } else if (sock->sk) { 366 name.name = sock->sk->sk_prot_creator->name; 367 name.len = strlen(name.name); 368 } 369 path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); 370 if (unlikely(!path.dentry)) 371 return ERR_PTR(-ENOMEM); 372 path.mnt = mntget(sock_mnt); 373 374 d_instantiate(path.dentry, SOCK_INODE(sock)); 375 SOCK_INODE(sock)->i_fop = &socket_file_ops; 376 377 file = alloc_file(&path, FMODE_READ | FMODE_WRITE, 378 &socket_file_ops); 379 if (unlikely(IS_ERR(file))) { 380 /* drop dentry, keep inode */ 381 ihold(path.dentry->d_inode); 382 path_put(&path); 383 return file; 384 } 385 386 sock->file = file; 387 file->f_flags = O_RDWR | (flags & O_NONBLOCK); 388 file->private_data = sock; 389 return file; 390 } 391 EXPORT_SYMBOL(sock_alloc_file); 392 393 static int sock_map_fd(struct socket *sock, int flags) 394 { 395 struct file *newfile; 396 int fd = get_unused_fd_flags(flags); 397 if (unlikely(fd < 0)) 398 return fd; 399 400 newfile = sock_alloc_file(sock, flags, NULL); 401 if (likely(!IS_ERR(newfile))) { 402 fd_install(fd, newfile); 403 return fd; 404 } 405 406 put_unused_fd(fd); 407 return PTR_ERR(newfile); 408 } 409 410 struct socket *sock_from_file(struct file *file, int *err) 411 { 412 if (file->f_op == &socket_file_ops) 413 return file->private_data; /* set in sock_map_fd */ 414 415 *err = -ENOTSOCK; 416 return NULL; 417 } 418 EXPORT_SYMBOL(sock_from_file); 419 420 /** 421 * sockfd_lookup - Go from a file number to its socket slot 422 * @fd: file handle 423 * @err: pointer to an error code return 424 * 425 * The file handle passed in is locked and the socket it is bound 426 * too is returned. If an error occurs the err pointer is overwritten 427 * with a negative errno code and NULL is returned. The function checks 428 * for both invalid handles and passing a handle which is not a socket. 429 * 430 * On a success the socket object pointer is returned. 431 */ 432 433 struct socket *sockfd_lookup(int fd, int *err) 434 { 435 struct file *file; 436 struct socket *sock; 437 438 file = fget(fd); 439 if (!file) { 440 *err = -EBADF; 441 return NULL; 442 } 443 444 sock = sock_from_file(file, err); 445 if (!sock) 446 fput(file); 447 return sock; 448 } 449 EXPORT_SYMBOL(sockfd_lookup); 450 451 static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) 452 { 453 struct file *file; 454 struct socket *sock; 455 456 *err = -EBADF; 457 file = fget_light(fd, fput_needed); 458 if (file) { 459 sock = sock_from_file(file, err); 460 if (sock) 461 return sock; 462 fput_light(file, *fput_needed); 463 } 464 return NULL; 465 } 466 467 #define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname" 468 #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX) 469 #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1) 470 static ssize_t sockfs_getxattr(struct dentry *dentry, 471 const char *name, void *value, size_t size) 472 { 473 const char *proto_name; 474 size_t proto_size; 475 int error; 476 477 error = -ENODATA; 478 if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) { 479 proto_name = dentry->d_name.name; 480 proto_size = strlen(proto_name); 481 482 if (value) { 483 error = -ERANGE; 484 if (proto_size + 1 > size) 485 goto out; 486 487 strncpy(value, proto_name, proto_size + 1); 488 } 489 error = proto_size + 1; 490 } 491 492 out: 493 return error; 494 } 495 496 static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, 497 size_t size) 498 { 499 ssize_t len; 500 ssize_t used = 0; 501 502 len = security_inode_listsecurity(dentry->d_inode, buffer, size); 503 if (len < 0) 504 return len; 505 used += len; 506 if (buffer) { 507 if (size < used) 508 return -ERANGE; 509 buffer += len; 510 } 511 512 len = (XATTR_NAME_SOCKPROTONAME_LEN + 1); 513 used += len; 514 if (buffer) { 515 if (size < used) 516 return -ERANGE; 517 memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len); 518 buffer += len; 519 } 520 521 return used; 522 } 523 524 static const struct inode_operations sockfs_inode_ops = { 525 .getxattr = sockfs_getxattr, 526 .listxattr = sockfs_listxattr, 527 }; 528 529 /** 530 * sock_alloc - allocate a socket 531 * 532 * Allocate a new inode and socket object. The two are bound together 533 * and initialised. The socket is then returned. If we are out of inodes 534 * NULL is returned. 535 */ 536 537 static struct socket *sock_alloc(void) 538 { 539 struct inode *inode; 540 struct socket *sock; 541 542 inode = new_inode_pseudo(sock_mnt->mnt_sb); 543 if (!inode) 544 return NULL; 545 546 sock = SOCKET_I(inode); 547 548 kmemcheck_annotate_bitfield(sock, type); 549 inode->i_ino = get_next_ino(); 550 inode->i_mode = S_IFSOCK | S_IRWXUGO; 551 inode->i_uid = current_fsuid(); 552 inode->i_gid = current_fsgid(); 553 inode->i_op = &sockfs_inode_ops; 554 555 this_cpu_add(sockets_in_use, 1); 556 return sock; 557 } 558 559 /* 560 * In theory you can't get an open on this inode, but /proc provides 561 * a back door. Remember to keep it shut otherwise you'll let the 562 * creepy crawlies in. 563 */ 564 565 static int sock_no_open(struct inode *irrelevant, struct file *dontcare) 566 { 567 return -ENXIO; 568 } 569 570 const struct file_operations bad_sock_fops = { 571 .owner = THIS_MODULE, 572 .open = sock_no_open, 573 .llseek = noop_llseek, 574 }; 575 576 /** 577 * sock_release - close a socket 578 * @sock: socket to close 579 * 580 * The socket is released from the protocol stack if it has a release 581 * callback, and the inode is then released if the socket is bound to 582 * an inode not a file. 583 */ 584 585 void sock_release(struct socket *sock) 586 { 587 if (sock->ops) { 588 struct module *owner = sock->ops->owner; 589 590 sock->ops->release(sock); 591 sock->ops = NULL; 592 module_put(owner); 593 } 594 595 if (rcu_dereference_protected(sock->wq, 1)->fasync_list) 596 printk(KERN_ERR "sock_release: fasync list not empty!\n"); 597 598 if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags)) 599 return; 600 601 this_cpu_sub(sockets_in_use, 1); 602 if (!sock->file) { 603 iput(SOCK_INODE(sock)); 604 return; 605 } 606 sock->file = NULL; 607 } 608 EXPORT_SYMBOL(sock_release); 609 610 void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) 611 { 612 *tx_flags = 0; 613 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 614 *tx_flags |= SKBTX_HW_TSTAMP; 615 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 616 *tx_flags |= SKBTX_SW_TSTAMP; 617 if (sock_flag(sk, SOCK_WIFI_STATUS)) 618 *tx_flags |= SKBTX_WIFI_STATUS; 619 } 620 EXPORT_SYMBOL(sock_tx_timestamp); 621 622 static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, 623 struct msghdr *msg, size_t size) 624 { 625 struct sock_iocb *si = kiocb_to_siocb(iocb); 626 627 si->sock = sock; 628 si->scm = NULL; 629 si->msg = msg; 630 si->size = size; 631 632 return sock->ops->sendmsg(iocb, sock, msg, size); 633 } 634 635 static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, 636 struct msghdr *msg, size_t size) 637 { 638 int err = security_socket_sendmsg(sock, msg, size); 639 640 return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); 641 } 642 643 int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 644 { 645 struct kiocb iocb; 646 struct sock_iocb siocb; 647 int ret; 648 649 init_sync_kiocb(&iocb, NULL); 650 iocb.private = &siocb; 651 ret = __sock_sendmsg(&iocb, sock, msg, size); 652 if (-EIOCBQUEUED == ret) 653 ret = wait_on_sync_kiocb(&iocb); 654 return ret; 655 } 656 EXPORT_SYMBOL(sock_sendmsg); 657 658 static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) 659 { 660 struct kiocb iocb; 661 struct sock_iocb siocb; 662 int ret; 663 664 init_sync_kiocb(&iocb, NULL); 665 iocb.private = &siocb; 666 ret = __sock_sendmsg_nosec(&iocb, sock, msg, size); 667 if (-EIOCBQUEUED == ret) 668 ret = wait_on_sync_kiocb(&iocb); 669 return ret; 670 } 671 672 int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 673 struct kvec *vec, size_t num, size_t size) 674 { 675 mm_segment_t oldfs = get_fs(); 676 int result; 677 678 set_fs(KERNEL_DS); 679 /* 680 * the following is safe, since for compiler definitions of kvec and 681 * iovec are identical, yielding the same in-core layout and alignment 682 */ 683 msg->msg_iov = (struct iovec *)vec; 684 msg->msg_iovlen = num; 685 result = sock_sendmsg(sock, msg, size); 686 set_fs(oldfs); 687 return result; 688 } 689 EXPORT_SYMBOL(kernel_sendmsg); 690 691 /* 692 * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) 693 */ 694 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 695 struct sk_buff *skb) 696 { 697 int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); 698 struct timespec ts[3]; 699 int empty = 1; 700 struct skb_shared_hwtstamps *shhwtstamps = 701 skb_hwtstamps(skb); 702 703 /* Race occurred between timestamp enabling and packet 704 receiving. Fill in the current time for now. */ 705 if (need_software_tstamp && skb->tstamp.tv64 == 0) 706 __net_timestamp(skb); 707 708 if (need_software_tstamp) { 709 if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { 710 struct timeval tv; 711 skb_get_timestamp(skb, &tv); 712 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, 713 sizeof(tv), &tv); 714 } else { 715 skb_get_timestampns(skb, &ts[0]); 716 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, 717 sizeof(ts[0]), &ts[0]); 718 } 719 } 720 721 722 memset(ts, 0, sizeof(ts)); 723 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE) && 724 ktime_to_timespec_cond(skb->tstamp, ts + 0)) 725 empty = 0; 726 if (shhwtstamps) { 727 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && 728 ktime_to_timespec_cond(shhwtstamps->syststamp, ts + 1)) 729 empty = 0; 730 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && 731 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2)) 732 empty = 0; 733 } 734 if (!empty) 735 put_cmsg(msg, SOL_SOCKET, 736 SCM_TIMESTAMPING, sizeof(ts), &ts); 737 } 738 EXPORT_SYMBOL_GPL(__sock_recv_timestamp); 739 740 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, 741 struct sk_buff *skb) 742 { 743 int ack; 744 745 if (!sock_flag(sk, SOCK_WIFI_STATUS)) 746 return; 747 if (!skb->wifi_acked_valid) 748 return; 749 750 ack = skb->wifi_acked; 751 752 put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); 753 } 754 EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); 755 756 static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, 757 struct sk_buff *skb) 758 { 759 if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) 760 put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, 761 sizeof(__u32), &skb->dropcount); 762 } 763 764 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 765 struct sk_buff *skb) 766 { 767 sock_recv_timestamp(msg, sk, skb); 768 sock_recv_drops(msg, sk, skb); 769 } 770 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); 771 772 static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, 773 struct msghdr *msg, size_t size, int flags) 774 { 775 struct sock_iocb *si = kiocb_to_siocb(iocb); 776 777 si->sock = sock; 778 si->scm = NULL; 779 si->msg = msg; 780 si->size = size; 781 si->flags = flags; 782 783 return sock->ops->recvmsg(iocb, sock, msg, size, flags); 784 } 785 786 static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, 787 struct msghdr *msg, size_t size, int flags) 788 { 789 int err = security_socket_recvmsg(sock, msg, size, flags); 790 791 return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); 792 } 793 794 int sock_recvmsg(struct socket *sock, struct msghdr *msg, 795 size_t size, int flags) 796 { 797 struct kiocb iocb; 798 struct sock_iocb siocb; 799 int ret; 800 801 init_sync_kiocb(&iocb, NULL); 802 iocb.private = &siocb; 803 ret = __sock_recvmsg(&iocb, sock, msg, size, flags); 804 if (-EIOCBQUEUED == ret) 805 ret = wait_on_sync_kiocb(&iocb); 806 return ret; 807 } 808 EXPORT_SYMBOL(sock_recvmsg); 809 810 static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, 811 size_t size, int flags) 812 { 813 struct kiocb iocb; 814 struct sock_iocb siocb; 815 int ret; 816 817 init_sync_kiocb(&iocb, NULL); 818 iocb.private = &siocb; 819 ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); 820 if (-EIOCBQUEUED == ret) 821 ret = wait_on_sync_kiocb(&iocb); 822 return ret; 823 } 824 825 /** 826 * kernel_recvmsg - Receive a message from a socket (kernel space) 827 * @sock: The socket to receive the message from 828 * @msg: Received message 829 * @vec: Input s/g array for message data 830 * @num: Size of input s/g array 831 * @size: Number of bytes to read 832 * @flags: Message flags (MSG_DONTWAIT, etc...) 833 * 834 * On return the msg structure contains the scatter/gather array passed in the 835 * vec argument. The array is modified so that it consists of the unfilled 836 * portion of the original array. 837 * 838 * The returned value is the total number of bytes received, or an error. 839 */ 840 int kernel_recvmsg(struct socket *sock, struct msghdr *msg, 841 struct kvec *vec, size_t num, size_t size, int flags) 842 { 843 mm_segment_t oldfs = get_fs(); 844 int result; 845 846 set_fs(KERNEL_DS); 847 /* 848 * the following is safe, since for compiler definitions of kvec and 849 * iovec are identical, yielding the same in-core layout and alignment 850 */ 851 msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; 852 result = sock_recvmsg(sock, msg, size, flags); 853 set_fs(oldfs); 854 return result; 855 } 856 EXPORT_SYMBOL(kernel_recvmsg); 857 858 static ssize_t sock_sendpage(struct file *file, struct page *page, 859 int offset, size_t size, loff_t *ppos, int more) 860 { 861 struct socket *sock; 862 int flags; 863 864 sock = file->private_data; 865 866 flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; 867 /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ 868 flags |= more; 869 870 return kernel_sendpage(sock, page, offset, size, flags); 871 } 872 873 static ssize_t sock_splice_read(struct file *file, loff_t *ppos, 874 struct pipe_inode_info *pipe, size_t len, 875 unsigned int flags) 876 { 877 struct socket *sock = file->private_data; 878 879 if (unlikely(!sock->ops->splice_read)) 880 return -EINVAL; 881 882 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 883 } 884 885 static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, 886 struct sock_iocb *siocb) 887 { 888 if (!is_sync_kiocb(iocb)) 889 BUG(); 890 891 siocb->kiocb = iocb; 892 iocb->private = siocb; 893 return siocb; 894 } 895 896 static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, 897 struct file *file, const struct iovec *iov, 898 unsigned long nr_segs) 899 { 900 struct socket *sock = file->private_data; 901 size_t size = 0; 902 int i; 903 904 for (i = 0; i < nr_segs; i++) 905 size += iov[i].iov_len; 906 907 msg->msg_name = NULL; 908 msg->msg_namelen = 0; 909 msg->msg_control = NULL; 910 msg->msg_controllen = 0; 911 msg->msg_iov = (struct iovec *)iov; 912 msg->msg_iovlen = nr_segs; 913 msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; 914 915 return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); 916 } 917 918 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, 919 unsigned long nr_segs, loff_t pos) 920 { 921 struct sock_iocb siocb, *x; 922 923 if (pos != 0) 924 return -ESPIPE; 925 926 if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */ 927 return 0; 928 929 930 x = alloc_sock_iocb(iocb, &siocb); 931 if (!x) 932 return -ENOMEM; 933 return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); 934 } 935 936 static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, 937 struct file *file, const struct iovec *iov, 938 unsigned long nr_segs) 939 { 940 struct socket *sock = file->private_data; 941 size_t size = 0; 942 int i; 943 944 for (i = 0; i < nr_segs; i++) 945 size += iov[i].iov_len; 946 947 msg->msg_name = NULL; 948 msg->msg_namelen = 0; 949 msg->msg_control = NULL; 950 msg->msg_controllen = 0; 951 msg->msg_iov = (struct iovec *)iov; 952 msg->msg_iovlen = nr_segs; 953 msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; 954 if (sock->type == SOCK_SEQPACKET) 955 msg->msg_flags |= MSG_EOR; 956 957 return __sock_sendmsg(iocb, sock, msg, size); 958 } 959 960 static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, 961 unsigned long nr_segs, loff_t pos) 962 { 963 struct sock_iocb siocb, *x; 964 965 if (pos != 0) 966 return -ESPIPE; 967 968 x = alloc_sock_iocb(iocb, &siocb); 969 if (!x) 970 return -ENOMEM; 971 972 return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); 973 } 974 975 /* 976 * Atomic setting of ioctl hooks to avoid race 977 * with module unload. 978 */ 979 980 static DEFINE_MUTEX(br_ioctl_mutex); 981 static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); 982 983 void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) 984 { 985 mutex_lock(&br_ioctl_mutex); 986 br_ioctl_hook = hook; 987 mutex_unlock(&br_ioctl_mutex); 988 } 989 EXPORT_SYMBOL(brioctl_set); 990 991 static DEFINE_MUTEX(vlan_ioctl_mutex); 992 static int (*vlan_ioctl_hook) (struct net *, void __user *arg); 993 994 void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) 995 { 996 mutex_lock(&vlan_ioctl_mutex); 997 vlan_ioctl_hook = hook; 998 mutex_unlock(&vlan_ioctl_mutex); 999 } 1000 EXPORT_SYMBOL(vlan_ioctl_set); 1001 1002 static DEFINE_MUTEX(dlci_ioctl_mutex); 1003 static int (*dlci_ioctl_hook) (unsigned int, void __user *); 1004 1005 void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) 1006 { 1007 mutex_lock(&dlci_ioctl_mutex); 1008 dlci_ioctl_hook = hook; 1009 mutex_unlock(&dlci_ioctl_mutex); 1010 } 1011 EXPORT_SYMBOL(dlci_ioctl_set); 1012 1013 static long sock_do_ioctl(struct net *net, struct socket *sock, 1014 unsigned int cmd, unsigned long arg) 1015 { 1016 int err; 1017 void __user *argp = (void __user *)arg; 1018 1019 err = sock->ops->ioctl(sock, cmd, arg); 1020 1021 /* 1022 * If this ioctl is unknown try to hand it down 1023 * to the NIC driver. 1024 */ 1025 if (err == -ENOIOCTLCMD) 1026 err = dev_ioctl(net, cmd, argp); 1027 1028 return err; 1029 } 1030 1031 /* 1032 * With an ioctl, arg may well be a user mode pointer, but we don't know 1033 * what to do with it - that's up to the protocol still. 1034 */ 1035 1036 static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) 1037 { 1038 struct socket *sock; 1039 struct sock *sk; 1040 void __user *argp = (void __user *)arg; 1041 int pid, err; 1042 struct net *net; 1043 1044 sock = file->private_data; 1045 sk = sock->sk; 1046 net = sock_net(sk); 1047 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { 1048 err = dev_ioctl(net, cmd, argp); 1049 } else 1050 #ifdef CONFIG_WEXT_CORE 1051 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { 1052 err = dev_ioctl(net, cmd, argp); 1053 } else 1054 #endif 1055 switch (cmd) { 1056 case FIOSETOWN: 1057 case SIOCSPGRP: 1058 err = -EFAULT; 1059 if (get_user(pid, (int __user *)argp)) 1060 break; 1061 err = f_setown(sock->file, pid, 1); 1062 break; 1063 case FIOGETOWN: 1064 case SIOCGPGRP: 1065 err = put_user(f_getown(sock->file), 1066 (int __user *)argp); 1067 break; 1068 case SIOCGIFBR: 1069 case SIOCSIFBR: 1070 case SIOCBRADDBR: 1071 case SIOCBRDELBR: 1072 err = -ENOPKG; 1073 if (!br_ioctl_hook) 1074 request_module("bridge"); 1075 1076 mutex_lock(&br_ioctl_mutex); 1077 if (br_ioctl_hook) 1078 err = br_ioctl_hook(net, cmd, argp); 1079 mutex_unlock(&br_ioctl_mutex); 1080 break; 1081 case SIOCGIFVLAN: 1082 case SIOCSIFVLAN: 1083 err = -ENOPKG; 1084 if (!vlan_ioctl_hook) 1085 request_module("8021q"); 1086 1087 mutex_lock(&vlan_ioctl_mutex); 1088 if (vlan_ioctl_hook) 1089 err = vlan_ioctl_hook(net, argp); 1090 mutex_unlock(&vlan_ioctl_mutex); 1091 break; 1092 case SIOCADDDLCI: 1093 case SIOCDELDLCI: 1094 err = -ENOPKG; 1095 if (!dlci_ioctl_hook) 1096 request_module("dlci"); 1097 1098 mutex_lock(&dlci_ioctl_mutex); 1099 if (dlci_ioctl_hook) 1100 err = dlci_ioctl_hook(cmd, argp); 1101 mutex_unlock(&dlci_ioctl_mutex); 1102 break; 1103 default: 1104 err = sock_do_ioctl(net, sock, cmd, arg); 1105 break; 1106 } 1107 return err; 1108 } 1109 1110 int sock_create_lite(int family, int type, int protocol, struct socket **res) 1111 { 1112 int err; 1113 struct socket *sock = NULL; 1114 1115 err = security_socket_create(family, type, protocol, 1); 1116 if (err) 1117 goto out; 1118 1119 sock = sock_alloc(); 1120 if (!sock) { 1121 err = -ENOMEM; 1122 goto out; 1123 } 1124 1125 sock->type = type; 1126 err = security_socket_post_create(sock, family, type, protocol, 1); 1127 if (err) 1128 goto out_release; 1129 1130 out: 1131 *res = sock; 1132 return err; 1133 out_release: 1134 sock_release(sock); 1135 sock = NULL; 1136 goto out; 1137 } 1138 EXPORT_SYMBOL(sock_create_lite); 1139 1140 /* No kernel lock held - perfect */ 1141 static unsigned int sock_poll(struct file *file, poll_table *wait) 1142 { 1143 unsigned int busy_flag = 0; 1144 struct socket *sock; 1145 1146 /* 1147 * We can't return errors to poll, so it's either yes or no. 1148 */ 1149 sock = file->private_data; 1150 1151 if (sk_can_busy_loop(sock->sk)) { 1152 /* this socket can poll_ll so tell the system call */ 1153 busy_flag = POLL_BUSY_LOOP; 1154 1155 /* once, only if requested by syscall */ 1156 if (wait && (wait->_key & POLL_BUSY_LOOP)) 1157 sk_busy_loop(sock->sk, 1); 1158 } 1159 1160 return busy_flag | sock->ops->poll(file, sock, wait); 1161 } 1162 1163 static int sock_mmap(struct file *file, struct vm_area_struct *vma) 1164 { 1165 struct socket *sock = file->private_data; 1166 1167 return sock->ops->mmap(file, sock, vma); 1168 } 1169 1170 static int sock_close(struct inode *inode, struct file *filp) 1171 { 1172 sock_release(SOCKET_I(inode)); 1173 return 0; 1174 } 1175 1176 /* 1177 * Update the socket async list 1178 * 1179 * Fasync_list locking strategy. 1180 * 1181 * 1. fasync_list is modified only under process context socket lock 1182 * i.e. under semaphore. 1183 * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) 1184 * or under socket lock 1185 */ 1186 1187 static int sock_fasync(int fd, struct file *filp, int on) 1188 { 1189 struct socket *sock = filp->private_data; 1190 struct sock *sk = sock->sk; 1191 struct socket_wq *wq; 1192 1193 if (sk == NULL) 1194 return -EINVAL; 1195 1196 lock_sock(sk); 1197 wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); 1198 fasync_helper(fd, filp, on, &wq->fasync_list); 1199 1200 if (!wq->fasync_list) 1201 sock_reset_flag(sk, SOCK_FASYNC); 1202 else 1203 sock_set_flag(sk, SOCK_FASYNC); 1204 1205 release_sock(sk); 1206 return 0; 1207 } 1208 1209 /* This function may be called only under socket lock or callback_lock or rcu_lock */ 1210 1211 int sock_wake_async(struct socket *sock, int how, int band) 1212 { 1213 struct socket_wq *wq; 1214 1215 if (!sock) 1216 return -1; 1217 rcu_read_lock(); 1218 wq = rcu_dereference(sock->wq); 1219 if (!wq || !wq->fasync_list) { 1220 rcu_read_unlock(); 1221 return -1; 1222 } 1223 switch (how) { 1224 case SOCK_WAKE_WAITD: 1225 if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) 1226 break; 1227 goto call_kill; 1228 case SOCK_WAKE_SPACE: 1229 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) 1230 break; 1231 /* fall through */ 1232 case SOCK_WAKE_IO: 1233 call_kill: 1234 kill_fasync(&wq->fasync_list, SIGIO, band); 1235 break; 1236 case SOCK_WAKE_URG: 1237 kill_fasync(&wq->fasync_list, SIGURG, band); 1238 } 1239 rcu_read_unlock(); 1240 return 0; 1241 } 1242 EXPORT_SYMBOL(sock_wake_async); 1243 1244 int __sock_create(struct net *net, int family, int type, int protocol, 1245 struct socket **res, int kern) 1246 { 1247 int err; 1248 struct socket *sock; 1249 const struct net_proto_family *pf; 1250 1251 /* 1252 * Check protocol is in range 1253 */ 1254 if (family < 0 || family >= NPROTO) 1255 return -EAFNOSUPPORT; 1256 if (type < 0 || type >= SOCK_MAX) 1257 return -EINVAL; 1258 1259 /* Compatibility. 1260 1261 This uglymoron is moved from INET layer to here to avoid 1262 deadlock in module load. 1263 */ 1264 if (family == PF_INET && type == SOCK_PACKET) { 1265 static int warned; 1266 if (!warned) { 1267 warned = 1; 1268 printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", 1269 current->comm); 1270 } 1271 family = PF_PACKET; 1272 } 1273 1274 err = security_socket_create(family, type, protocol, kern); 1275 if (err) 1276 return err; 1277 1278 /* 1279 * Allocate the socket and allow the family to set things up. if 1280 * the protocol is 0, the family is instructed to select an appropriate 1281 * default. 1282 */ 1283 sock = sock_alloc(); 1284 if (!sock) { 1285 net_warn_ratelimited("socket: no more sockets\n"); 1286 return -ENFILE; /* Not exactly a match, but its the 1287 closest posix thing */ 1288 } 1289 1290 sock->type = type; 1291 1292 #ifdef CONFIG_MODULES 1293 /* Attempt to load a protocol module if the find failed. 1294 * 1295 * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user 1296 * requested real, full-featured networking support upon configuration. 1297 * Otherwise module support will break! 1298 */ 1299 if (rcu_access_pointer(net_families[family]) == NULL) 1300 request_module("net-pf-%d", family); 1301 #endif 1302 1303 rcu_read_lock(); 1304 pf = rcu_dereference(net_families[family]); 1305 err = -EAFNOSUPPORT; 1306 if (!pf) 1307 goto out_release; 1308 1309 /* 1310 * We will call the ->create function, that possibly is in a loadable 1311 * module, so we have to bump that loadable module refcnt first. 1312 */ 1313 if (!try_module_get(pf->owner)) 1314 goto out_release; 1315 1316 /* Now protected by module ref count */ 1317 rcu_read_unlock(); 1318 1319 err = pf->create(net, sock, protocol, kern); 1320 if (err < 0) 1321 goto out_module_put; 1322 1323 /* 1324 * Now to bump the refcnt of the [loadable] module that owns this 1325 * socket at sock_release time we decrement its refcnt. 1326 */ 1327 if (!try_module_get(sock->ops->owner)) 1328 goto out_module_busy; 1329 1330 /* 1331 * Now that we're done with the ->create function, the [loadable] 1332 * module can have its refcnt decremented 1333 */ 1334 module_put(pf->owner); 1335 err = security_socket_post_create(sock, family, type, protocol, kern); 1336 if (err) 1337 goto out_sock_release; 1338 *res = sock; 1339 1340 return 0; 1341 1342 out_module_busy: 1343 err = -EAFNOSUPPORT; 1344 out_module_put: 1345 sock->ops = NULL; 1346 module_put(pf->owner); 1347 out_sock_release: 1348 sock_release(sock); 1349 return err; 1350 1351 out_release: 1352 rcu_read_unlock(); 1353 goto out_sock_release; 1354 } 1355 EXPORT_SYMBOL(__sock_create); 1356 1357 int sock_create(int family, int type, int protocol, struct socket **res) 1358 { 1359 return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); 1360 } 1361 EXPORT_SYMBOL(sock_create); 1362 1363 int sock_create_kern(int family, int type, int protocol, struct socket **res) 1364 { 1365 return __sock_create(&init_net, family, type, protocol, res, 1); 1366 } 1367 EXPORT_SYMBOL(sock_create_kern); 1368 1369 SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) 1370 { 1371 int retval; 1372 struct socket *sock; 1373 int flags; 1374 1375 /* Check the SOCK_* constants for consistency. */ 1376 BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); 1377 BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); 1378 BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); 1379 BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); 1380 1381 flags = type & ~SOCK_TYPE_MASK; 1382 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1383 return -EINVAL; 1384 type &= SOCK_TYPE_MASK; 1385 1386 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) 1387 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1388 1389 retval = sock_create(family, type, protocol, &sock); 1390 if (retval < 0) 1391 goto out; 1392 1393 retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); 1394 if (retval < 0) 1395 goto out_release; 1396 1397 out: 1398 /* It may be already another descriptor 8) Not kernel problem. */ 1399 return retval; 1400 1401 out_release: 1402 sock_release(sock); 1403 return retval; 1404 } 1405 1406 /* 1407 * Create a pair of connected sockets. 1408 */ 1409 1410 SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, 1411 int __user *, usockvec) 1412 { 1413 struct socket *sock1, *sock2; 1414 int fd1, fd2, err; 1415 struct file *newfile1, *newfile2; 1416 int flags; 1417 1418 flags = type & ~SOCK_TYPE_MASK; 1419 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1420 return -EINVAL; 1421 type &= SOCK_TYPE_MASK; 1422 1423 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) 1424 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1425 1426 /* 1427 * Obtain the first socket and check if the underlying protocol 1428 * supports the socketpair call. 1429 */ 1430 1431 err = sock_create(family, type, protocol, &sock1); 1432 if (err < 0) 1433 goto out; 1434 1435 err = sock_create(family, type, protocol, &sock2); 1436 if (err < 0) 1437 goto out_release_1; 1438 1439 err = sock1->ops->socketpair(sock1, sock2); 1440 if (err < 0) 1441 goto out_release_both; 1442 1443 fd1 = get_unused_fd_flags(flags); 1444 if (unlikely(fd1 < 0)) { 1445 err = fd1; 1446 goto out_release_both; 1447 } 1448 fd2 = get_unused_fd_flags(flags); 1449 if (unlikely(fd2 < 0)) { 1450 err = fd2; 1451 put_unused_fd(fd1); 1452 goto out_release_both; 1453 } 1454 1455 newfile1 = sock_alloc_file(sock1, flags, NULL); 1456 if (unlikely(IS_ERR(newfile1))) { 1457 err = PTR_ERR(newfile1); 1458 put_unused_fd(fd1); 1459 put_unused_fd(fd2); 1460 goto out_release_both; 1461 } 1462 1463 newfile2 = sock_alloc_file(sock2, flags, NULL); 1464 if (IS_ERR(newfile2)) { 1465 err = PTR_ERR(newfile2); 1466 fput(newfile1); 1467 put_unused_fd(fd1); 1468 put_unused_fd(fd2); 1469 sock_release(sock2); 1470 goto out; 1471 } 1472 1473 audit_fd_pair(fd1, fd2); 1474 fd_install(fd1, newfile1); 1475 fd_install(fd2, newfile2); 1476 /* fd1 and fd2 may be already another descriptors. 1477 * Not kernel problem. 1478 */ 1479 1480 err = put_user(fd1, &usockvec[0]); 1481 if (!err) 1482 err = put_user(fd2, &usockvec[1]); 1483 if (!err) 1484 return 0; 1485 1486 sys_close(fd2); 1487 sys_close(fd1); 1488 return err; 1489 1490 out_release_both: 1491 sock_release(sock2); 1492 out_release_1: 1493 sock_release(sock1); 1494 out: 1495 return err; 1496 } 1497 1498 /* 1499 * Bind a name to a socket. Nothing much to do here since it's 1500 * the protocol's responsibility to handle the local address. 1501 * 1502 * We move the socket address to kernel space before we call 1503 * the protocol layer (having also checked the address is ok). 1504 */ 1505 1506 SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) 1507 { 1508 struct socket *sock; 1509 struct sockaddr_storage address; 1510 int err, fput_needed; 1511 1512 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1513 if (sock) { 1514 err = move_addr_to_kernel(umyaddr, addrlen, &address); 1515 if (err >= 0) { 1516 err = security_socket_bind(sock, 1517 (struct sockaddr *)&address, 1518 addrlen); 1519 if (!err) 1520 err = sock->ops->bind(sock, 1521 (struct sockaddr *) 1522 &address, addrlen); 1523 } 1524 fput_light(sock->file, fput_needed); 1525 } 1526 return err; 1527 } 1528 1529 /* 1530 * Perform a listen. Basically, we allow the protocol to do anything 1531 * necessary for a listen, and if that works, we mark the socket as 1532 * ready for listening. 1533 */ 1534 1535 SYSCALL_DEFINE2(listen, int, fd, int, backlog) 1536 { 1537 struct socket *sock; 1538 int err, fput_needed; 1539 int somaxconn; 1540 1541 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1542 if (sock) { 1543 somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; 1544 if ((unsigned int)backlog > somaxconn) 1545 backlog = somaxconn; 1546 1547 err = security_socket_listen(sock, backlog); 1548 if (!err) 1549 err = sock->ops->listen(sock, backlog); 1550 1551 fput_light(sock->file, fput_needed); 1552 } 1553 return err; 1554 } 1555 1556 /* 1557 * For accept, we attempt to create a new socket, set up the link 1558 * with the client, wake up the client, then return the new 1559 * connected fd. We collect the address of the connector in kernel 1560 * space and move it to user at the very end. This is unclean because 1561 * we open the socket then return an error. 1562 * 1563 * 1003.1g adds the ability to recvmsg() to query connection pending 1564 * status to recvmsg. We need to add that support in a way thats 1565 * clean when we restucture accept also. 1566 */ 1567 1568 SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, 1569 int __user *, upeer_addrlen, int, flags) 1570 { 1571 struct socket *sock, *newsock; 1572 struct file *newfile; 1573 int err, len, newfd, fput_needed; 1574 struct sockaddr_storage address; 1575 1576 if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1577 return -EINVAL; 1578 1579 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) 1580 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1581 1582 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1583 if (!sock) 1584 goto out; 1585 1586 err = -ENFILE; 1587 newsock = sock_alloc(); 1588 if (!newsock) 1589 goto out_put; 1590 1591 newsock->type = sock->type; 1592 newsock->ops = sock->ops; 1593 1594 /* 1595 * We don't need try_module_get here, as the listening socket (sock) 1596 * has the protocol module (sock->ops->owner) held. 1597 */ 1598 __module_get(newsock->ops->owner); 1599 1600 newfd = get_unused_fd_flags(flags); 1601 if (unlikely(newfd < 0)) { 1602 err = newfd; 1603 sock_release(newsock); 1604 goto out_put; 1605 } 1606 newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); 1607 if (unlikely(IS_ERR(newfile))) { 1608 err = PTR_ERR(newfile); 1609 put_unused_fd(newfd); 1610 sock_release(newsock); 1611 goto out_put; 1612 } 1613 1614 err = security_socket_accept(sock, newsock); 1615 if (err) 1616 goto out_fd; 1617 1618 err = sock->ops->accept(sock, newsock, sock->file->f_flags); 1619 if (err < 0) 1620 goto out_fd; 1621 1622 if (upeer_sockaddr) { 1623 if (newsock->ops->getname(newsock, (struct sockaddr *)&address, 1624 &len, 2) < 0) { 1625 err = -ECONNABORTED; 1626 goto out_fd; 1627 } 1628 err = move_addr_to_user(&address, 1629 len, upeer_sockaddr, upeer_addrlen); 1630 if (err < 0) 1631 goto out_fd; 1632 } 1633 1634 /* File flags are not inherited via accept() unlike another OSes. */ 1635 1636 fd_install(newfd, newfile); 1637 err = newfd; 1638 1639 out_put: 1640 fput_light(sock->file, fput_needed); 1641 out: 1642 return err; 1643 out_fd: 1644 fput(newfile); 1645 put_unused_fd(newfd); 1646 goto out_put; 1647 } 1648 1649 SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, 1650 int __user *, upeer_addrlen) 1651 { 1652 return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); 1653 } 1654 1655 /* 1656 * Attempt to connect to a socket with the server address. The address 1657 * is in user space so we verify it is OK and move it to kernel space. 1658 * 1659 * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to 1660 * break bindings 1661 * 1662 * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and 1663 * other SEQPACKET protocols that take time to connect() as it doesn't 1664 * include the -EINPROGRESS status for such sockets. 1665 */ 1666 1667 SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, 1668 int, addrlen) 1669 { 1670 struct socket *sock; 1671 struct sockaddr_storage address; 1672 int err, fput_needed; 1673 1674 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1675 if (!sock) 1676 goto out; 1677 err = move_addr_to_kernel(uservaddr, addrlen, &address); 1678 if (err < 0) 1679 goto out_put; 1680 1681 err = 1682 security_socket_connect(sock, (struct sockaddr *)&address, addrlen); 1683 if (err) 1684 goto out_put; 1685 1686 err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, 1687 sock->file->f_flags); 1688 out_put: 1689 fput_light(sock->file, fput_needed); 1690 out: 1691 return err; 1692 } 1693 1694 /* 1695 * Get the local address ('name') of a socket object. Move the obtained 1696 * name to user space. 1697 */ 1698 1699 SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, 1700 int __user *, usockaddr_len) 1701 { 1702 struct socket *sock; 1703 struct sockaddr_storage address; 1704 int len, err, fput_needed; 1705 1706 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1707 if (!sock) 1708 goto out; 1709 1710 err = security_socket_getsockname(sock); 1711 if (err) 1712 goto out_put; 1713 1714 err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); 1715 if (err) 1716 goto out_put; 1717 err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); 1718 1719 out_put: 1720 fput_light(sock->file, fput_needed); 1721 out: 1722 return err; 1723 } 1724 1725 /* 1726 * Get the remote address ('name') of a socket object. Move the obtained 1727 * name to user space. 1728 */ 1729 1730 SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, 1731 int __user *, usockaddr_len) 1732 { 1733 struct socket *sock; 1734 struct sockaddr_storage address; 1735 int len, err, fput_needed; 1736 1737 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1738 if (sock != NULL) { 1739 err = security_socket_getpeername(sock); 1740 if (err) { 1741 fput_light(sock->file, fput_needed); 1742 return err; 1743 } 1744 1745 err = 1746 sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1747 1); 1748 if (!err) 1749 err = move_addr_to_user(&address, len, usockaddr, 1750 usockaddr_len); 1751 fput_light(sock->file, fput_needed); 1752 } 1753 return err; 1754 } 1755 1756 /* 1757 * Send a datagram to a given address. We move the address into kernel 1758 * space and check the user space data area is readable before invoking 1759 * the protocol. 1760 */ 1761 1762 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, 1763 unsigned int, flags, struct sockaddr __user *, addr, 1764 int, addr_len) 1765 { 1766 struct socket *sock; 1767 struct sockaddr_storage address; 1768 int err; 1769 struct msghdr msg; 1770 struct iovec iov; 1771 int fput_needed; 1772 1773 if (len > INT_MAX) 1774 len = INT_MAX; 1775 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1776 if (!sock) 1777 goto out; 1778 1779 iov.iov_base = buff; 1780 iov.iov_len = len; 1781 msg.msg_name = NULL; 1782 msg.msg_iov = &iov; 1783 msg.msg_iovlen = 1; 1784 msg.msg_control = NULL; 1785 msg.msg_controllen = 0; 1786 msg.msg_namelen = 0; 1787 if (addr) { 1788 err = move_addr_to_kernel(addr, addr_len, &address); 1789 if (err < 0) 1790 goto out_put; 1791 msg.msg_name = (struct sockaddr *)&address; 1792 msg.msg_namelen = addr_len; 1793 } 1794 if (sock->file->f_flags & O_NONBLOCK) 1795 flags |= MSG_DONTWAIT; 1796 msg.msg_flags = flags; 1797 err = sock_sendmsg(sock, &msg, len); 1798 1799 out_put: 1800 fput_light(sock->file, fput_needed); 1801 out: 1802 return err; 1803 } 1804 1805 /* 1806 * Send a datagram down a socket. 1807 */ 1808 1809 SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, 1810 unsigned int, flags) 1811 { 1812 return sys_sendto(fd, buff, len, flags, NULL, 0); 1813 } 1814 1815 /* 1816 * Receive a frame from the socket and optionally record the address of the 1817 * sender. We verify the buffers are writable and if needed move the 1818 * sender address from kernel to user space. 1819 */ 1820 1821 SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, 1822 unsigned int, flags, struct sockaddr __user *, addr, 1823 int __user *, addr_len) 1824 { 1825 struct socket *sock; 1826 struct iovec iov; 1827 struct msghdr msg; 1828 struct sockaddr_storage address; 1829 int err, err2; 1830 int fput_needed; 1831 1832 if (size > INT_MAX) 1833 size = INT_MAX; 1834 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1835 if (!sock) 1836 goto out; 1837 1838 msg.msg_control = NULL; 1839 msg.msg_controllen = 0; 1840 msg.msg_iovlen = 1; 1841 msg.msg_iov = &iov; 1842 iov.iov_len = size; 1843 iov.iov_base = ubuf; 1844 /* Save some cycles and don't copy the address if not needed */ 1845 msg.msg_name = addr ? (struct sockaddr *)&address : NULL; 1846 /* We assume all kernel code knows the size of sockaddr_storage */ 1847 msg.msg_namelen = 0; 1848 if (sock->file->f_flags & O_NONBLOCK) 1849 flags |= MSG_DONTWAIT; 1850 err = sock_recvmsg(sock, &msg, size, flags); 1851 1852 if (err >= 0 && addr != NULL) { 1853 err2 = move_addr_to_user(&address, 1854 msg.msg_namelen, addr, addr_len); 1855 if (err2 < 0) 1856 err = err2; 1857 } 1858 1859 fput_light(sock->file, fput_needed); 1860 out: 1861 return err; 1862 } 1863 1864 /* 1865 * Receive a datagram from a socket. 1866 */ 1867 1868 asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, 1869 unsigned int flags) 1870 { 1871 return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); 1872 } 1873 1874 /* 1875 * Set a socket option. Because we don't know the option lengths we have 1876 * to pass the user mode parameter for the protocols to sort out. 1877 */ 1878 1879 SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, 1880 char __user *, optval, int, optlen) 1881 { 1882 int err, fput_needed; 1883 struct socket *sock; 1884 1885 if (optlen < 0) 1886 return -EINVAL; 1887 1888 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1889 if (sock != NULL) { 1890 err = security_socket_setsockopt(sock, level, optname); 1891 if (err) 1892 goto out_put; 1893 1894 if (level == SOL_SOCKET) 1895 err = 1896 sock_setsockopt(sock, level, optname, optval, 1897 optlen); 1898 else 1899 err = 1900 sock->ops->setsockopt(sock, level, optname, optval, 1901 optlen); 1902 out_put: 1903 fput_light(sock->file, fput_needed); 1904 } 1905 return err; 1906 } 1907 1908 /* 1909 * Get a socket option. Because we don't know the option lengths we have 1910 * to pass a user mode parameter for the protocols to sort out. 1911 */ 1912 1913 SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, 1914 char __user *, optval, int __user *, optlen) 1915 { 1916 int err, fput_needed; 1917 struct socket *sock; 1918 1919 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1920 if (sock != NULL) { 1921 err = security_socket_getsockopt(sock, level, optname); 1922 if (err) 1923 goto out_put; 1924 1925 if (level == SOL_SOCKET) 1926 err = 1927 sock_getsockopt(sock, level, optname, optval, 1928 optlen); 1929 else 1930 err = 1931 sock->ops->getsockopt(sock, level, optname, optval, 1932 optlen); 1933 out_put: 1934 fput_light(sock->file, fput_needed); 1935 } 1936 return err; 1937 } 1938 1939 /* 1940 * Shutdown a socket. 1941 */ 1942 1943 SYSCALL_DEFINE2(shutdown, int, fd, int, how) 1944 { 1945 int err, fput_needed; 1946 struct socket *sock; 1947 1948 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1949 if (sock != NULL) { 1950 err = security_socket_shutdown(sock, how); 1951 if (!err) 1952 err = sock->ops->shutdown(sock, how); 1953 fput_light(sock->file, fput_needed); 1954 } 1955 return err; 1956 } 1957 1958 /* A couple of helpful macros for getting the address of the 32/64 bit 1959 * fields which are the same type (int / unsigned) on our platforms. 1960 */ 1961 #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) 1962 #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) 1963 #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) 1964 1965 struct used_address { 1966 struct sockaddr_storage name; 1967 unsigned int name_len; 1968 }; 1969 1970 static int copy_msghdr_from_user(struct msghdr *kmsg, 1971 struct msghdr __user *umsg) 1972 { 1973 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) 1974 return -EFAULT; 1975 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 1976 kmsg->msg_namelen = sizeof(struct sockaddr_storage); 1977 return 0; 1978 } 1979 1980 static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1981 struct msghdr *msg_sys, unsigned int flags, 1982 struct used_address *used_address) 1983 { 1984 struct compat_msghdr __user *msg_compat = 1985 (struct compat_msghdr __user *)msg; 1986 struct sockaddr_storage address; 1987 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; 1988 unsigned char ctl[sizeof(struct cmsghdr) + 20] 1989 __attribute__ ((aligned(sizeof(__kernel_size_t)))); 1990 /* 20 is size of ipv6_pktinfo */ 1991 unsigned char *ctl_buf = ctl; 1992 int err, ctl_len, total_len; 1993 1994 err = -EFAULT; 1995 if (MSG_CMSG_COMPAT & flags) { 1996 if (get_compat_msghdr(msg_sys, msg_compat)) 1997 return -EFAULT; 1998 } else { 1999 err = copy_msghdr_from_user(msg_sys, msg); 2000 if (err) 2001 return err; 2002 } 2003 2004 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2005 err = -EMSGSIZE; 2006 if (msg_sys->msg_iovlen > UIO_MAXIOV) 2007 goto out; 2008 err = -ENOMEM; 2009 iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), 2010 GFP_KERNEL); 2011 if (!iov) 2012 goto out; 2013 } 2014 2015 /* This will also move the address data into kernel space */ 2016 if (MSG_CMSG_COMPAT & flags) { 2017 err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ); 2018 } else 2019 err = verify_iovec(msg_sys, iov, &address, VERIFY_READ); 2020 if (err < 0) 2021 goto out_freeiov; 2022 total_len = err; 2023 2024 err = -ENOBUFS; 2025 2026 if (msg_sys->msg_controllen > INT_MAX) 2027 goto out_freeiov; 2028 ctl_len = msg_sys->msg_controllen; 2029 if ((MSG_CMSG_COMPAT & flags) && ctl_len) { 2030 err = 2031 cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, 2032 sizeof(ctl)); 2033 if (err) 2034 goto out_freeiov; 2035 ctl_buf = msg_sys->msg_control; 2036 ctl_len = msg_sys->msg_controllen; 2037 } else if (ctl_len) { 2038 if (ctl_len > sizeof(ctl)) { 2039 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); 2040 if (ctl_buf == NULL) 2041 goto out_freeiov; 2042 } 2043 err = -EFAULT; 2044 /* 2045 * Careful! Before this, msg_sys->msg_control contains a user pointer. 2046 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted 2047 * checking falls down on this. 2048 */ 2049 if (copy_from_user(ctl_buf, 2050 (void __user __force *)msg_sys->msg_control, 2051 ctl_len)) 2052 goto out_freectl; 2053 msg_sys->msg_control = ctl_buf; 2054 } 2055 msg_sys->msg_flags = flags; 2056 2057 if (sock->file->f_flags & O_NONBLOCK) 2058 msg_sys->msg_flags |= MSG_DONTWAIT; 2059 /* 2060 * If this is sendmmsg() and current destination address is same as 2061 * previously succeeded address, omit asking LSM's decision. 2062 * used_address->name_len is initialized to UINT_MAX so that the first 2063 * destination address never matches. 2064 */ 2065 if (used_address && msg_sys->msg_name && 2066 used_address->name_len == msg_sys->msg_namelen && 2067 !memcmp(&used_address->name, msg_sys->msg_name, 2068 used_address->name_len)) { 2069 err = sock_sendmsg_nosec(sock, msg_sys, total_len); 2070 goto out_freectl; 2071 } 2072 err = sock_sendmsg(sock, msg_sys, total_len); 2073 /* 2074 * If this is sendmmsg() and sending to current destination address was 2075 * successful, remember it. 2076 */ 2077 if (used_address && err >= 0) { 2078 used_address->name_len = msg_sys->msg_namelen; 2079 if (msg_sys->msg_name) 2080 memcpy(&used_address->name, msg_sys->msg_name, 2081 used_address->name_len); 2082 } 2083 2084 out_freectl: 2085 if (ctl_buf != ctl) 2086 sock_kfree_s(sock->sk, ctl_buf, ctl_len); 2087 out_freeiov: 2088 if (iov != iovstack) 2089 kfree(iov); 2090 out: 2091 return err; 2092 } 2093 2094 /* 2095 * BSD sendmsg interface 2096 */ 2097 2098 long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) 2099 { 2100 int fput_needed, err; 2101 struct msghdr msg_sys; 2102 struct socket *sock; 2103 2104 sock = sockfd_lookup_light(fd, &err, &fput_needed); 2105 if (!sock) 2106 goto out; 2107 2108 err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL); 2109 2110 fput_light(sock->file, fput_needed); 2111 out: 2112 return err; 2113 } 2114 2115 SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) 2116 { 2117 if (flags & MSG_CMSG_COMPAT) 2118 return -EINVAL; 2119 return __sys_sendmsg(fd, msg, flags); 2120 } 2121 2122 /* 2123 * Linux sendmmsg interface 2124 */ 2125 2126 int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, 2127 unsigned int flags) 2128 { 2129 int fput_needed, err, datagrams; 2130 struct socket *sock; 2131 struct mmsghdr __user *entry; 2132 struct compat_mmsghdr __user *compat_entry; 2133 struct msghdr msg_sys; 2134 struct used_address used_address; 2135 2136 if (vlen > UIO_MAXIOV) 2137 vlen = UIO_MAXIOV; 2138 2139 datagrams = 0; 2140 2141 sock = sockfd_lookup_light(fd, &err, &fput_needed); 2142 if (!sock) 2143 return err; 2144 2145 used_address.name_len = UINT_MAX; 2146 entry = mmsg; 2147 compat_entry = (struct compat_mmsghdr __user *)mmsg; 2148 err = 0; 2149 2150 while (datagrams < vlen) { 2151 if (MSG_CMSG_COMPAT & flags) { 2152 err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, 2153 &msg_sys, flags, &used_address); 2154 if (err < 0) 2155 break; 2156 err = __put_user(err, &compat_entry->msg_len); 2157 ++compat_entry; 2158 } else { 2159 err = ___sys_sendmsg(sock, 2160 (struct msghdr __user *)entry, 2161 &msg_sys, flags, &used_address); 2162 if (err < 0) 2163 break; 2164 err = put_user(err, &entry->msg_len); 2165 ++entry; 2166 } 2167 2168 if (err) 2169 break; 2170 ++datagrams; 2171 } 2172 2173 fput_light(sock->file, fput_needed); 2174 2175 /* We only return an error if no datagrams were able to be sent */ 2176 if (datagrams != 0) 2177 return datagrams; 2178 2179 return err; 2180 } 2181 2182 SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, 2183 unsigned int, vlen, unsigned int, flags) 2184 { 2185 if (flags & MSG_CMSG_COMPAT) 2186 return -EINVAL; 2187 return __sys_sendmmsg(fd, mmsg, vlen, flags); 2188 } 2189 2190 static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, 2191 struct msghdr *msg_sys, unsigned int flags, int nosec) 2192 { 2193 struct compat_msghdr __user *msg_compat = 2194 (struct compat_msghdr __user *)msg; 2195 struct iovec iovstack[UIO_FASTIOV]; 2196 struct iovec *iov = iovstack; 2197 unsigned long cmsg_ptr; 2198 int err, total_len, len; 2199 2200 /* kernel mode address */ 2201 struct sockaddr_storage addr; 2202 2203 /* user mode address pointers */ 2204 struct sockaddr __user *uaddr; 2205 int __user *uaddr_len; 2206 2207 if (MSG_CMSG_COMPAT & flags) { 2208 if (get_compat_msghdr(msg_sys, msg_compat)) 2209 return -EFAULT; 2210 } else { 2211 err = copy_msghdr_from_user(msg_sys, msg); 2212 if (err) 2213 return err; 2214 } 2215 2216 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2217 err = -EMSGSIZE; 2218 if (msg_sys->msg_iovlen > UIO_MAXIOV) 2219 goto out; 2220 err = -ENOMEM; 2221 iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), 2222 GFP_KERNEL); 2223 if (!iov) 2224 goto out; 2225 } 2226 2227 /* Save the user-mode address (verify_iovec will change the 2228 * kernel msghdr to use the kernel address space) 2229 */ 2230 uaddr = (__force void __user *)msg_sys->msg_name; 2231 uaddr_len = COMPAT_NAMELEN(msg); 2232 if (MSG_CMSG_COMPAT & flags) 2233 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); 2234 else 2235 err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE); 2236 if (err < 0) 2237 goto out_freeiov; 2238 total_len = err; 2239 2240 cmsg_ptr = (unsigned long)msg_sys->msg_control; 2241 msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 2242 2243 /* We assume all kernel code knows the size of sockaddr_storage */ 2244 msg_sys->msg_namelen = 0; 2245 2246 if (sock->file->f_flags & O_NONBLOCK) 2247 flags |= MSG_DONTWAIT; 2248 err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, 2249 total_len, flags); 2250 if (err < 0) 2251 goto out_freeiov; 2252 len = err; 2253 2254 if (uaddr != NULL) { 2255 err = move_addr_to_user(&addr, 2256 msg_sys->msg_namelen, uaddr, 2257 uaddr_len); 2258 if (err < 0) 2259 goto out_freeiov; 2260 } 2261 err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), 2262 COMPAT_FLAGS(msg)); 2263 if (err) 2264 goto out_freeiov; 2265 if (MSG_CMSG_COMPAT & flags) 2266 err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, 2267 &msg_compat->msg_controllen); 2268 else 2269 err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, 2270 &msg->msg_controllen); 2271 if (err) 2272 goto out_freeiov; 2273 err = len; 2274 2275 out_freeiov: 2276 if (iov != iovstack) 2277 kfree(iov); 2278 out: 2279 return err; 2280 } 2281 2282 /* 2283 * BSD recvmsg interface 2284 */ 2285 2286 long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) 2287 { 2288 int fput_needed, err; 2289 struct msghdr msg_sys; 2290 struct socket *sock; 2291 2292 sock = sockfd_lookup_light(fd, &err, &fput_needed); 2293 if (!sock) 2294 goto out; 2295 2296 err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); 2297 2298 fput_light(sock->file, fput_needed); 2299 out: 2300 return err; 2301 } 2302 2303 SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, 2304 unsigned int, flags) 2305 { 2306 if (flags & MSG_CMSG_COMPAT) 2307 return -EINVAL; 2308 return __sys_recvmsg(fd, msg, flags); 2309 } 2310 2311 /* 2312 * Linux recvmmsg interface 2313 */ 2314 2315 int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, 2316 unsigned int flags, struct timespec *timeout) 2317 { 2318 int fput_needed, err, datagrams; 2319 struct socket *sock; 2320 struct mmsghdr __user *entry; 2321 struct compat_mmsghdr __user *compat_entry; 2322 struct msghdr msg_sys; 2323 struct timespec end_time; 2324 2325 if (timeout && 2326 poll_select_set_timeout(&end_time, timeout->tv_sec, 2327 timeout->tv_nsec)) 2328 return -EINVAL; 2329 2330 datagrams = 0; 2331 2332 sock = sockfd_lookup_light(fd, &err, &fput_needed); 2333 if (!sock) 2334 return err; 2335 2336 err = sock_error(sock->sk); 2337 if (err) 2338 goto out_put; 2339 2340 entry = mmsg; 2341 compat_entry = (struct compat_mmsghdr __user *)mmsg; 2342 2343 while (datagrams < vlen) { 2344 /* 2345 * No need to ask LSM for more than the first datagram. 2346 */ 2347 if (MSG_CMSG_COMPAT & flags) { 2348 err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, 2349 &msg_sys, flags & ~MSG_WAITFORONE, 2350 datagrams); 2351 if (err < 0) 2352 break; 2353 err = __put_user(err, &compat_entry->msg_len); 2354 ++compat_entry; 2355 } else { 2356 err = ___sys_recvmsg(sock, 2357 (struct msghdr __user *)entry, 2358 &msg_sys, flags & ~MSG_WAITFORONE, 2359 datagrams); 2360 if (err < 0) 2361 break; 2362 err = put_user(err, &entry->msg_len); 2363 ++entry; 2364 } 2365 2366 if (err) 2367 break; 2368 ++datagrams; 2369 2370 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2371 if (flags & MSG_WAITFORONE) 2372 flags |= MSG_DONTWAIT; 2373 2374 if (timeout) { 2375 ktime_get_ts(timeout); 2376 *timeout = timespec_sub(end_time, *timeout); 2377 if (timeout->tv_sec < 0) { 2378 timeout->tv_sec = timeout->tv_nsec = 0; 2379 break; 2380 } 2381 2382 /* Timeout, return less than vlen datagrams */ 2383 if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) 2384 break; 2385 } 2386 2387 /* Out of band data, return right away */ 2388 if (msg_sys.msg_flags & MSG_OOB) 2389 break; 2390 } 2391 2392 out_put: 2393 fput_light(sock->file, fput_needed); 2394 2395 if (err == 0) 2396 return datagrams; 2397 2398 if (datagrams != 0) { 2399 /* 2400 * We may return less entries than requested (vlen) if the 2401 * sock is non block and there aren't enough datagrams... 2402 */ 2403 if (err != -EAGAIN) { 2404 /* 2405 * ... or if recvmsg returns an error after we 2406 * received some datagrams, where we record the 2407 * error to return on the next call or if the 2408 * app asks about it using getsockopt(SO_ERROR). 2409 */ 2410 sock->sk->sk_err = -err; 2411 } 2412 2413 return datagrams; 2414 } 2415 2416 return err; 2417 } 2418 2419 SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, 2420 unsigned int, vlen, unsigned int, flags, 2421 struct timespec __user *, timeout) 2422 { 2423 int datagrams; 2424 struct timespec timeout_sys; 2425 2426 if (flags & MSG_CMSG_COMPAT) 2427 return -EINVAL; 2428 2429 if (!timeout) 2430 return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); 2431 2432 if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) 2433 return -EFAULT; 2434 2435 datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); 2436 2437 if (datagrams > 0 && 2438 copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) 2439 datagrams = -EFAULT; 2440 2441 return datagrams; 2442 } 2443 2444 #ifdef __ARCH_WANT_SYS_SOCKETCALL 2445 /* Argument list sizes for sys_socketcall */ 2446 #define AL(x) ((x) * sizeof(unsigned long)) 2447 static const unsigned char nargs[21] = { 2448 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), 2449 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), 2450 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), 2451 AL(4), AL(5), AL(4) 2452 }; 2453 2454 #undef AL 2455 2456 /* 2457 * System call vectors. 2458 * 2459 * Argument checking cleaned up. Saved 20% in size. 2460 * This function doesn't need to set the kernel lock because 2461 * it is set by the callees. 2462 */ 2463 2464 SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) 2465 { 2466 unsigned long a[AUDITSC_ARGS]; 2467 unsigned long a0, a1; 2468 int err; 2469 unsigned int len; 2470 2471 if (call < 1 || call > SYS_SENDMMSG) 2472 return -EINVAL; 2473 2474 len = nargs[call]; 2475 if (len > sizeof(a)) 2476 return -EINVAL; 2477 2478 /* copy_from_user should be SMP safe. */ 2479 if (copy_from_user(a, args, len)) 2480 return -EFAULT; 2481 2482 err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); 2483 if (err) 2484 return err; 2485 2486 a0 = a[0]; 2487 a1 = a[1]; 2488 2489 switch (call) { 2490 case SYS_SOCKET: 2491 err = sys_socket(a0, a1, a[2]); 2492 break; 2493 case SYS_BIND: 2494 err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); 2495 break; 2496 case SYS_CONNECT: 2497 err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); 2498 break; 2499 case SYS_LISTEN: 2500 err = sys_listen(a0, a1); 2501 break; 2502 case SYS_ACCEPT: 2503 err = sys_accept4(a0, (struct sockaddr __user *)a1, 2504 (int __user *)a[2], 0); 2505 break; 2506 case SYS_GETSOCKNAME: 2507 err = 2508 sys_getsockname(a0, (struct sockaddr __user *)a1, 2509 (int __user *)a[2]); 2510 break; 2511 case SYS_GETPEERNAME: 2512 err = 2513 sys_getpeername(a0, (struct sockaddr __user *)a1, 2514 (int __user *)a[2]); 2515 break; 2516 case SYS_SOCKETPAIR: 2517 err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); 2518 break; 2519 case SYS_SEND: 2520 err = sys_send(a0, (void __user *)a1, a[2], a[3]); 2521 break; 2522 case SYS_SENDTO: 2523 err = sys_sendto(a0, (void __user *)a1, a[2], a[3], 2524 (struct sockaddr __user *)a[4], a[5]); 2525 break; 2526 case SYS_RECV: 2527 err = sys_recv(a0, (void __user *)a1, a[2], a[3]); 2528 break; 2529 case SYS_RECVFROM: 2530 err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], 2531 (struct sockaddr __user *)a[4], 2532 (int __user *)a[5]); 2533 break; 2534 case SYS_SHUTDOWN: 2535 err = sys_shutdown(a0, a1); 2536 break; 2537 case SYS_SETSOCKOPT: 2538 err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); 2539 break; 2540 case SYS_GETSOCKOPT: 2541 err = 2542 sys_getsockopt(a0, a1, a[2], (char __user *)a[3], 2543 (int __user *)a[4]); 2544 break; 2545 case SYS_SENDMSG: 2546 err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); 2547 break; 2548 case SYS_SENDMMSG: 2549 err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); 2550 break; 2551 case SYS_RECVMSG: 2552 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); 2553 break; 2554 case SYS_RECVMMSG: 2555 err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], 2556 (struct timespec __user *)a[4]); 2557 break; 2558 case SYS_ACCEPT4: 2559 err = sys_accept4(a0, (struct sockaddr __user *)a1, 2560 (int __user *)a[2], a[3]); 2561 break; 2562 default: 2563 err = -EINVAL; 2564 break; 2565 } 2566 return err; 2567 } 2568 2569 #endif /* __ARCH_WANT_SYS_SOCKETCALL */ 2570 2571 /** 2572 * sock_register - add a socket protocol handler 2573 * @ops: description of protocol 2574 * 2575 * This function is called by a protocol handler that wants to 2576 * advertise its address family, and have it linked into the 2577 * socket interface. The value ops->family coresponds to the 2578 * socket system call protocol family. 2579 */ 2580 int sock_register(const struct net_proto_family *ops) 2581 { 2582 int err; 2583 2584 if (ops->family >= NPROTO) { 2585 printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family, 2586 NPROTO); 2587 return -ENOBUFS; 2588 } 2589 2590 spin_lock(&net_family_lock); 2591 if (rcu_dereference_protected(net_families[ops->family], 2592 lockdep_is_held(&net_family_lock))) 2593 err = -EEXIST; 2594 else { 2595 rcu_assign_pointer(net_families[ops->family], ops); 2596 err = 0; 2597 } 2598 spin_unlock(&net_family_lock); 2599 2600 printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); 2601 return err; 2602 } 2603 EXPORT_SYMBOL(sock_register); 2604 2605 /** 2606 * sock_unregister - remove a protocol handler 2607 * @family: protocol family to remove 2608 * 2609 * This function is called by a protocol handler that wants to 2610 * remove its address family, and have it unlinked from the 2611 * new socket creation. 2612 * 2613 * If protocol handler is a module, then it can use module reference 2614 * counts to protect against new references. If protocol handler is not 2615 * a module then it needs to provide its own protection in 2616 * the ops->create routine. 2617 */ 2618 void sock_unregister(int family) 2619 { 2620 BUG_ON(family < 0 || family >= NPROTO); 2621 2622 spin_lock(&net_family_lock); 2623 RCU_INIT_POINTER(net_families[family], NULL); 2624 spin_unlock(&net_family_lock); 2625 2626 synchronize_rcu(); 2627 2628 printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); 2629 } 2630 EXPORT_SYMBOL(sock_unregister); 2631 2632 static int __init sock_init(void) 2633 { 2634 int err; 2635 /* 2636 * Initialize the network sysctl infrastructure. 2637 */ 2638 err = net_sysctl_init(); 2639 if (err) 2640 goto out; 2641 2642 /* 2643 * Initialize skbuff SLAB cache 2644 */ 2645 skb_init(); 2646 2647 /* 2648 * Initialize the protocols module. 2649 */ 2650 2651 init_inodecache(); 2652 2653 err = register_filesystem(&sock_fs_type); 2654 if (err) 2655 goto out_fs; 2656 sock_mnt = kern_mount(&sock_fs_type); 2657 if (IS_ERR(sock_mnt)) { 2658 err = PTR_ERR(sock_mnt); 2659 goto out_mount; 2660 } 2661 2662 /* The real protocol initialization is performed in later initcalls. 2663 */ 2664 2665 #ifdef CONFIG_NETFILTER 2666 err = netfilter_init(); 2667 if (err) 2668 goto out; 2669 #endif 2670 2671 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2672 skb_timestamping_init(); 2673 #endif 2674 2675 out: 2676 return err; 2677 2678 out_mount: 2679 unregister_filesystem(&sock_fs_type); 2680 out_fs: 2681 goto out; 2682 } 2683 2684 core_initcall(sock_init); /* early initcall */ 2685 2686 #ifdef CONFIG_PROC_FS 2687 void socket_seq_show(struct seq_file *seq) 2688 { 2689 int cpu; 2690 int counter = 0; 2691 2692 for_each_possible_cpu(cpu) 2693 counter += per_cpu(sockets_in_use, cpu); 2694 2695 /* It can be negative, by the way. 8) */ 2696 if (counter < 0) 2697 counter = 0; 2698 2699 seq_printf(seq, "sockets: used %d\n", counter); 2700 } 2701 #endif /* CONFIG_PROC_FS */ 2702 2703 #ifdef CONFIG_COMPAT 2704 static int do_siocgstamp(struct net *net, struct socket *sock, 2705 unsigned int cmd, void __user *up) 2706 { 2707 mm_segment_t old_fs = get_fs(); 2708 struct timeval ktv; 2709 int err; 2710 2711 set_fs(KERNEL_DS); 2712 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); 2713 set_fs(old_fs); 2714 if (!err) 2715 err = compat_put_timeval(&ktv, up); 2716 2717 return err; 2718 } 2719 2720 static int do_siocgstampns(struct net *net, struct socket *sock, 2721 unsigned int cmd, void __user *up) 2722 { 2723 mm_segment_t old_fs = get_fs(); 2724 struct timespec kts; 2725 int err; 2726 2727 set_fs(KERNEL_DS); 2728 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); 2729 set_fs(old_fs); 2730 if (!err) 2731 err = compat_put_timespec(&kts, up); 2732 2733 return err; 2734 } 2735 2736 static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) 2737 { 2738 struct ifreq __user *uifr; 2739 int err; 2740 2741 uifr = compat_alloc_user_space(sizeof(struct ifreq)); 2742 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) 2743 return -EFAULT; 2744 2745 err = dev_ioctl(net, SIOCGIFNAME, uifr); 2746 if (err) 2747 return err; 2748 2749 if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) 2750 return -EFAULT; 2751 2752 return 0; 2753 } 2754 2755 static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) 2756 { 2757 struct compat_ifconf ifc32; 2758 struct ifconf ifc; 2759 struct ifconf __user *uifc; 2760 struct compat_ifreq __user *ifr32; 2761 struct ifreq __user *ifr; 2762 unsigned int i, j; 2763 int err; 2764 2765 if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) 2766 return -EFAULT; 2767 2768 memset(&ifc, 0, sizeof(ifc)); 2769 if (ifc32.ifcbuf == 0) { 2770 ifc32.ifc_len = 0; 2771 ifc.ifc_len = 0; 2772 ifc.ifc_req = NULL; 2773 uifc = compat_alloc_user_space(sizeof(struct ifconf)); 2774 } else { 2775 size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * 2776 sizeof(struct ifreq); 2777 uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); 2778 ifc.ifc_len = len; 2779 ifr = ifc.ifc_req = (void __user *)(uifc + 1); 2780 ifr32 = compat_ptr(ifc32.ifcbuf); 2781 for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { 2782 if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) 2783 return -EFAULT; 2784 ifr++; 2785 ifr32++; 2786 } 2787 } 2788 if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) 2789 return -EFAULT; 2790 2791 err = dev_ioctl(net, SIOCGIFCONF, uifc); 2792 if (err) 2793 return err; 2794 2795 if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) 2796 return -EFAULT; 2797 2798 ifr = ifc.ifc_req; 2799 ifr32 = compat_ptr(ifc32.ifcbuf); 2800 for (i = 0, j = 0; 2801 i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; 2802 i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { 2803 if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) 2804 return -EFAULT; 2805 ifr32++; 2806 ifr++; 2807 } 2808 2809 if (ifc32.ifcbuf == 0) { 2810 /* Translate from 64-bit structure multiple to 2811 * a 32-bit one. 2812 */ 2813 i = ifc.ifc_len; 2814 i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); 2815 ifc32.ifc_len = i; 2816 } else { 2817 ifc32.ifc_len = i; 2818 } 2819 if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) 2820 return -EFAULT; 2821 2822 return 0; 2823 } 2824 2825 static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) 2826 { 2827 struct compat_ethtool_rxnfc __user *compat_rxnfc; 2828 bool convert_in = false, convert_out = false; 2829 size_t buf_size = ALIGN(sizeof(struct ifreq), 8); 2830 struct ethtool_rxnfc __user *rxnfc; 2831 struct ifreq __user *ifr; 2832 u32 rule_cnt = 0, actual_rule_cnt; 2833 u32 ethcmd; 2834 u32 data; 2835 int ret; 2836 2837 if (get_user(data, &ifr32->ifr_ifru.ifru_data)) 2838 return -EFAULT; 2839 2840 compat_rxnfc = compat_ptr(data); 2841 2842 if (get_user(ethcmd, &compat_rxnfc->cmd)) 2843 return -EFAULT; 2844 2845 /* Most ethtool structures are defined without padding. 2846 * Unfortunately struct ethtool_rxnfc is an exception. 2847 */ 2848 switch (ethcmd) { 2849 default: 2850 break; 2851 case ETHTOOL_GRXCLSRLALL: 2852 /* Buffer size is variable */ 2853 if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) 2854 return -EFAULT; 2855 if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) 2856 return -ENOMEM; 2857 buf_size += rule_cnt * sizeof(u32); 2858 /* fall through */ 2859 case ETHTOOL_GRXRINGS: 2860 case ETHTOOL_GRXCLSRLCNT: 2861 case ETHTOOL_GRXCLSRULE: 2862 case ETHTOOL_SRXCLSRLINS: 2863 convert_out = true; 2864 /* fall through */ 2865 case ETHTOOL_SRXCLSRLDEL: 2866 buf_size += sizeof(struct ethtool_rxnfc); 2867 convert_in = true; 2868 break; 2869 } 2870 2871 ifr = compat_alloc_user_space(buf_size); 2872 rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); 2873 2874 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) 2875 return -EFAULT; 2876 2877 if (put_user(convert_in ? rxnfc : compat_ptr(data), 2878 &ifr->ifr_ifru.ifru_data)) 2879 return -EFAULT; 2880 2881 if (convert_in) { 2882 /* We expect there to be holes between fs.m_ext and 2883 * fs.ring_cookie and at the end of fs, but nowhere else. 2884 */ 2885 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + 2886 sizeof(compat_rxnfc->fs.m_ext) != 2887 offsetof(struct ethtool_rxnfc, fs.m_ext) + 2888 sizeof(rxnfc->fs.m_ext)); 2889 BUILD_BUG_ON( 2890 offsetof(struct compat_ethtool_rxnfc, fs.location) - 2891 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 2892 offsetof(struct ethtool_rxnfc, fs.location) - 2893 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 2894 2895 if (copy_in_user(rxnfc, compat_rxnfc, 2896 (void __user *)(&rxnfc->fs.m_ext + 1) - 2897 (void __user *)rxnfc) || 2898 copy_in_user(&rxnfc->fs.ring_cookie, 2899 &compat_rxnfc->fs.ring_cookie, 2900 (void __user *)(&rxnfc->fs.location + 1) - 2901 (void __user *)&rxnfc->fs.ring_cookie) || 2902 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, 2903 sizeof(rxnfc->rule_cnt))) 2904 return -EFAULT; 2905 } 2906 2907 ret = dev_ioctl(net, SIOCETHTOOL, ifr); 2908 if (ret) 2909 return ret; 2910 2911 if (convert_out) { 2912 if (copy_in_user(compat_rxnfc, rxnfc, 2913 (const void __user *)(&rxnfc->fs.m_ext + 1) - 2914 (const void __user *)rxnfc) || 2915 copy_in_user(&compat_rxnfc->fs.ring_cookie, 2916 &rxnfc->fs.ring_cookie, 2917 (const void __user *)(&rxnfc->fs.location + 1) - 2918 (const void __user *)&rxnfc->fs.ring_cookie) || 2919 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, 2920 sizeof(rxnfc->rule_cnt))) 2921 return -EFAULT; 2922 2923 if (ethcmd == ETHTOOL_GRXCLSRLALL) { 2924 /* As an optimisation, we only copy the actual 2925 * number of rules that the underlying 2926 * function returned. Since Mallory might 2927 * change the rule count in user memory, we 2928 * check that it is less than the rule count 2929 * originally given (as the user buffer size), 2930 * which has been range-checked. 2931 */ 2932 if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) 2933 return -EFAULT; 2934 if (actual_rule_cnt < rule_cnt) 2935 rule_cnt = actual_rule_cnt; 2936 if (copy_in_user(&compat_rxnfc->rule_locs[0], 2937 &rxnfc->rule_locs[0], 2938 rule_cnt * sizeof(u32))) 2939 return -EFAULT; 2940 } 2941 } 2942 2943 return 0; 2944 } 2945 2946 static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) 2947 { 2948 void __user *uptr; 2949 compat_uptr_t uptr32; 2950 struct ifreq __user *uifr; 2951 2952 uifr = compat_alloc_user_space(sizeof(*uifr)); 2953 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) 2954 return -EFAULT; 2955 2956 if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) 2957 return -EFAULT; 2958 2959 uptr = compat_ptr(uptr32); 2960 2961 if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) 2962 return -EFAULT; 2963 2964 return dev_ioctl(net, SIOCWANDEV, uifr); 2965 } 2966 2967 static int bond_ioctl(struct net *net, unsigned int cmd, 2968 struct compat_ifreq __user *ifr32) 2969 { 2970 struct ifreq kifr; 2971 struct ifreq __user *uifr; 2972 mm_segment_t old_fs; 2973 int err; 2974 u32 data; 2975 void __user *datap; 2976 2977 switch (cmd) { 2978 case SIOCBONDENSLAVE: 2979 case SIOCBONDRELEASE: 2980 case SIOCBONDSETHWADDR: 2981 case SIOCBONDCHANGEACTIVE: 2982 if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) 2983 return -EFAULT; 2984 2985 old_fs = get_fs(); 2986 set_fs(KERNEL_DS); 2987 err = dev_ioctl(net, cmd, 2988 (struct ifreq __user __force *) &kifr); 2989 set_fs(old_fs); 2990 2991 return err; 2992 case SIOCBONDSLAVEINFOQUERY: 2993 case SIOCBONDINFOQUERY: 2994 uifr = compat_alloc_user_space(sizeof(*uifr)); 2995 if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) 2996 return -EFAULT; 2997 2998 if (get_user(data, &ifr32->ifr_ifru.ifru_data)) 2999 return -EFAULT; 3000 3001 datap = compat_ptr(data); 3002 if (put_user(datap, &uifr->ifr_ifru.ifru_data)) 3003 return -EFAULT; 3004 3005 return dev_ioctl(net, cmd, uifr); 3006 default: 3007 return -ENOIOCTLCMD; 3008 } 3009 } 3010 3011 static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, 3012 struct compat_ifreq __user *u_ifreq32) 3013 { 3014 struct ifreq __user *u_ifreq64; 3015 char tmp_buf[IFNAMSIZ]; 3016 void __user *data64; 3017 u32 data32; 3018 3019 if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), 3020 IFNAMSIZ)) 3021 return -EFAULT; 3022 if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) 3023 return -EFAULT; 3024 data64 = compat_ptr(data32); 3025 3026 u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); 3027 3028 /* Don't check these user accesses, just let that get trapped 3029 * in the ioctl handler instead. 3030 */ 3031 if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], 3032 IFNAMSIZ)) 3033 return -EFAULT; 3034 if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) 3035 return -EFAULT; 3036 3037 return dev_ioctl(net, cmd, u_ifreq64); 3038 } 3039 3040 static int dev_ifsioc(struct net *net, struct socket *sock, 3041 unsigned int cmd, struct compat_ifreq __user *uifr32) 3042 { 3043 struct ifreq __user *uifr; 3044 int err; 3045 3046 uifr = compat_alloc_user_space(sizeof(*uifr)); 3047 if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) 3048 return -EFAULT; 3049 3050 err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); 3051 3052 if (!err) { 3053 switch (cmd) { 3054 case SIOCGIFFLAGS: 3055 case SIOCGIFMETRIC: 3056 case SIOCGIFMTU: 3057 case SIOCGIFMEM: 3058 case SIOCGIFHWADDR: 3059 case SIOCGIFINDEX: 3060 case SIOCGIFADDR: 3061 case SIOCGIFBRDADDR: 3062 case SIOCGIFDSTADDR: 3063 case SIOCGIFNETMASK: 3064 case SIOCGIFPFLAGS: 3065 case SIOCGIFTXQLEN: 3066 case SIOCGMIIPHY: 3067 case SIOCGMIIREG: 3068 if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) 3069 err = -EFAULT; 3070 break; 3071 } 3072 } 3073 return err; 3074 } 3075 3076 static int compat_sioc_ifmap(struct net *net, unsigned int cmd, 3077 struct compat_ifreq __user *uifr32) 3078 { 3079 struct ifreq ifr; 3080 struct compat_ifmap __user *uifmap32; 3081 mm_segment_t old_fs; 3082 int err; 3083 3084 uifmap32 = &uifr32->ifr_ifru.ifru_map; 3085 err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); 3086 err |= get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); 3087 err |= get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); 3088 err |= get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); 3089 err |= get_user(ifr.ifr_map.irq, &uifmap32->irq); 3090 err |= get_user(ifr.ifr_map.dma, &uifmap32->dma); 3091 err |= get_user(ifr.ifr_map.port, &uifmap32->port); 3092 if (err) 3093 return -EFAULT; 3094 3095 old_fs = get_fs(); 3096 set_fs(KERNEL_DS); 3097 err = dev_ioctl(net, cmd, (void __user __force *)&ifr); 3098 set_fs(old_fs); 3099 3100 if (cmd == SIOCGIFMAP && !err) { 3101 err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); 3102 err |= put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); 3103 err |= put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); 3104 err |= put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); 3105 err |= put_user(ifr.ifr_map.irq, &uifmap32->irq); 3106 err |= put_user(ifr.ifr_map.dma, &uifmap32->dma); 3107 err |= put_user(ifr.ifr_map.port, &uifmap32->port); 3108 if (err) 3109 err = -EFAULT; 3110 } 3111 return err; 3112 } 3113 3114 static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) 3115 { 3116 void __user *uptr; 3117 compat_uptr_t uptr32; 3118 struct ifreq __user *uifr; 3119 3120 uifr = compat_alloc_user_space(sizeof(*uifr)); 3121 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) 3122 return -EFAULT; 3123 3124 if (get_user(uptr32, &uifr32->ifr_data)) 3125 return -EFAULT; 3126 3127 uptr = compat_ptr(uptr32); 3128 3129 if (put_user(uptr, &uifr->ifr_data)) 3130 return -EFAULT; 3131 3132 return dev_ioctl(net, SIOCSHWTSTAMP, uifr); 3133 } 3134 3135 struct rtentry32 { 3136 u32 rt_pad1; 3137 struct sockaddr rt_dst; /* target address */ 3138 struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ 3139 struct sockaddr rt_genmask; /* target network mask (IP) */ 3140 unsigned short rt_flags; 3141 short rt_pad2; 3142 u32 rt_pad3; 3143 unsigned char rt_tos; 3144 unsigned char rt_class; 3145 short rt_pad4; 3146 short rt_metric; /* +1 for binary compatibility! */ 3147 /* char * */ u32 rt_dev; /* forcing the device at add */ 3148 u32 rt_mtu; /* per route MTU/Window */ 3149 u32 rt_window; /* Window clamping */ 3150 unsigned short rt_irtt; /* Initial RTT */ 3151 }; 3152 3153 struct in6_rtmsg32 { 3154 struct in6_addr rtmsg_dst; 3155 struct in6_addr rtmsg_src; 3156 struct in6_addr rtmsg_gateway; 3157 u32 rtmsg_type; 3158 u16 rtmsg_dst_len; 3159 u16 rtmsg_src_len; 3160 u32 rtmsg_metric; 3161 u32 rtmsg_info; 3162 u32 rtmsg_flags; 3163 s32 rtmsg_ifindex; 3164 }; 3165 3166 static int routing_ioctl(struct net *net, struct socket *sock, 3167 unsigned int cmd, void __user *argp) 3168 { 3169 int ret; 3170 void *r = NULL; 3171 struct in6_rtmsg r6; 3172 struct rtentry r4; 3173 char devname[16]; 3174 u32 rtdev; 3175 mm_segment_t old_fs = get_fs(); 3176 3177 if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ 3178 struct in6_rtmsg32 __user *ur6 = argp; 3179 ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3180 3 * sizeof(struct in6_addr)); 3181 ret |= get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); 3182 ret |= get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); 3183 ret |= get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); 3184 ret |= get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); 3185 ret |= get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); 3186 ret |= get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); 3187 ret |= get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); 3188 3189 r = (void *) &r6; 3190 } else { /* ipv4 */ 3191 struct rtentry32 __user *ur4 = argp; 3192 ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3193 3 * sizeof(struct sockaddr)); 3194 ret |= get_user(r4.rt_flags, &(ur4->rt_flags)); 3195 ret |= get_user(r4.rt_metric, &(ur4->rt_metric)); 3196 ret |= get_user(r4.rt_mtu, &(ur4->rt_mtu)); 3197 ret |= get_user(r4.rt_window, &(ur4->rt_window)); 3198 ret |= get_user(r4.rt_irtt, &(ur4->rt_irtt)); 3199 ret |= get_user(rtdev, &(ur4->rt_dev)); 3200 if (rtdev) { 3201 ret |= copy_from_user(devname, compat_ptr(rtdev), 15); 3202 r4.rt_dev = (char __user __force *)devname; 3203 devname[15] = 0; 3204 } else 3205 r4.rt_dev = NULL; 3206 3207 r = (void *) &r4; 3208 } 3209 3210 if (ret) { 3211 ret = -EFAULT; 3212 goto out; 3213 } 3214 3215 set_fs(KERNEL_DS); 3216 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); 3217 set_fs(old_fs); 3218 3219 out: 3220 return ret; 3221 } 3222 3223 /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE 3224 * for some operations; this forces use of the newer bridge-utils that 3225 * use compatible ioctls 3226 */ 3227 static int old_bridge_ioctl(compat_ulong_t __user *argp) 3228 { 3229 compat_ulong_t tmp; 3230 3231 if (get_user(tmp, argp)) 3232 return -EFAULT; 3233 if (tmp == BRCTL_GET_VERSION) 3234 return BRCTL_VERSION + 1; 3235 return -EINVAL; 3236 } 3237 3238 static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, 3239 unsigned int cmd, unsigned long arg) 3240 { 3241 void __user *argp = compat_ptr(arg); 3242 struct sock *sk = sock->sk; 3243 struct net *net = sock_net(sk); 3244 3245 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) 3246 return siocdevprivate_ioctl(net, cmd, argp); 3247 3248 switch (cmd) { 3249 case SIOCSIFBR: 3250 case SIOCGIFBR: 3251 return old_bridge_ioctl(argp); 3252 case SIOCGIFNAME: 3253 return dev_ifname32(net, argp); 3254 case SIOCGIFCONF: 3255 return dev_ifconf(net, argp); 3256 case SIOCETHTOOL: 3257 return ethtool_ioctl(net, argp); 3258 case SIOCWANDEV: 3259 return compat_siocwandev(net, argp); 3260 case SIOCGIFMAP: 3261 case SIOCSIFMAP: 3262 return compat_sioc_ifmap(net, cmd, argp); 3263 case SIOCBONDENSLAVE: 3264 case SIOCBONDRELEASE: 3265 case SIOCBONDSETHWADDR: 3266 case SIOCBONDSLAVEINFOQUERY: 3267 case SIOCBONDINFOQUERY: 3268 case SIOCBONDCHANGEACTIVE: 3269 return bond_ioctl(net, cmd, argp); 3270 case SIOCADDRT: 3271 case SIOCDELRT: 3272 return routing_ioctl(net, sock, cmd, argp); 3273 case SIOCGSTAMP: 3274 return do_siocgstamp(net, sock, cmd, argp); 3275 case SIOCGSTAMPNS: 3276 return do_siocgstampns(net, sock, cmd, argp); 3277 case SIOCSHWTSTAMP: 3278 return compat_siocshwtstamp(net, argp); 3279 3280 case FIOSETOWN: 3281 case SIOCSPGRP: 3282 case FIOGETOWN: 3283 case SIOCGPGRP: 3284 case SIOCBRADDBR: 3285 case SIOCBRDELBR: 3286 case SIOCGIFVLAN: 3287 case SIOCSIFVLAN: 3288 case SIOCADDDLCI: 3289 case SIOCDELDLCI: 3290 return sock_ioctl(file, cmd, arg); 3291 3292 case SIOCGIFFLAGS: 3293 case SIOCSIFFLAGS: 3294 case SIOCGIFMETRIC: 3295 case SIOCSIFMETRIC: 3296 case SIOCGIFMTU: 3297 case SIOCSIFMTU: 3298 case SIOCGIFMEM: 3299 case SIOCSIFMEM: 3300 case SIOCGIFHWADDR: 3301 case SIOCSIFHWADDR: 3302 case SIOCADDMULTI: 3303 case SIOCDELMULTI: 3304 case SIOCGIFINDEX: 3305 case SIOCGIFADDR: 3306 case SIOCSIFADDR: 3307 case SIOCSIFHWBROADCAST: 3308 case SIOCDIFADDR: 3309 case SIOCGIFBRDADDR: 3310 case SIOCSIFBRDADDR: 3311 case SIOCGIFDSTADDR: 3312 case SIOCSIFDSTADDR: 3313 case SIOCGIFNETMASK: 3314 case SIOCSIFNETMASK: 3315 case SIOCSIFPFLAGS: 3316 case SIOCGIFPFLAGS: 3317 case SIOCGIFTXQLEN: 3318 case SIOCSIFTXQLEN: 3319 case SIOCBRADDIF: 3320 case SIOCBRDELIF: 3321 case SIOCSIFNAME: 3322 case SIOCGMIIPHY: 3323 case SIOCGMIIREG: 3324 case SIOCSMIIREG: 3325 return dev_ifsioc(net, sock, cmd, argp); 3326 3327 case SIOCSARP: 3328 case SIOCGARP: 3329 case SIOCDARP: 3330 case SIOCATMARK: 3331 return sock_do_ioctl(net, sock, cmd, arg); 3332 } 3333 3334 return -ENOIOCTLCMD; 3335 } 3336 3337 static long compat_sock_ioctl(struct file *file, unsigned int cmd, 3338 unsigned long arg) 3339 { 3340 struct socket *sock = file->private_data; 3341 int ret = -ENOIOCTLCMD; 3342 struct sock *sk; 3343 struct net *net; 3344 3345 sk = sock->sk; 3346 net = sock_net(sk); 3347 3348 if (sock->ops->compat_ioctl) 3349 ret = sock->ops->compat_ioctl(sock, cmd, arg); 3350 3351 if (ret == -ENOIOCTLCMD && 3352 (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) 3353 ret = compat_wext_handle_ioctl(net, cmd, arg); 3354 3355 if (ret == -ENOIOCTLCMD) 3356 ret = compat_sock_ioctl_trans(file, sock, cmd, arg); 3357 3358 return ret; 3359 } 3360 #endif 3361 3362 int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) 3363 { 3364 return sock->ops->bind(sock, addr, addrlen); 3365 } 3366 EXPORT_SYMBOL(kernel_bind); 3367 3368 int kernel_listen(struct socket *sock, int backlog) 3369 { 3370 return sock->ops->listen(sock, backlog); 3371 } 3372 EXPORT_SYMBOL(kernel_listen); 3373 3374 int kernel_accept(struct socket *sock, struct socket **newsock, int flags) 3375 { 3376 struct sock *sk = sock->sk; 3377 int err; 3378 3379 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, 3380 newsock); 3381 if (err < 0) 3382 goto done; 3383 3384 err = sock->ops->accept(sock, *newsock, flags); 3385 if (err < 0) { 3386 sock_release(*newsock); 3387 *newsock = NULL; 3388 goto done; 3389 } 3390 3391 (*newsock)->ops = sock->ops; 3392 __module_get((*newsock)->ops->owner); 3393 3394 done: 3395 return err; 3396 } 3397 EXPORT_SYMBOL(kernel_accept); 3398 3399 int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, 3400 int flags) 3401 { 3402 return sock->ops->connect(sock, addr, addrlen, flags); 3403 } 3404 EXPORT_SYMBOL(kernel_connect); 3405 3406 int kernel_getsockname(struct socket *sock, struct sockaddr *addr, 3407 int *addrlen) 3408 { 3409 return sock->ops->getname(sock, addr, addrlen, 0); 3410 } 3411 EXPORT_SYMBOL(kernel_getsockname); 3412 3413 int kernel_getpeername(struct socket *sock, struct sockaddr *addr, 3414 int *addrlen) 3415 { 3416 return sock->ops->getname(sock, addr, addrlen, 1); 3417 } 3418 EXPORT_SYMBOL(kernel_getpeername); 3419 3420 int kernel_getsockopt(struct socket *sock, int level, int optname, 3421 char *optval, int *optlen) 3422 { 3423 mm_segment_t oldfs = get_fs(); 3424 char __user *uoptval; 3425 int __user *uoptlen; 3426 int err; 3427 3428 uoptval = (char __user __force *) optval; 3429 uoptlen = (int __user __force *) optlen; 3430 3431 set_fs(KERNEL_DS); 3432 if (level == SOL_SOCKET) 3433 err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); 3434 else 3435 err = sock->ops->getsockopt(sock, level, optname, uoptval, 3436 uoptlen); 3437 set_fs(oldfs); 3438 return err; 3439 } 3440 EXPORT_SYMBOL(kernel_getsockopt); 3441 3442 int kernel_setsockopt(struct socket *sock, int level, int optname, 3443 char *optval, unsigned int optlen) 3444 { 3445 mm_segment_t oldfs = get_fs(); 3446 char __user *uoptval; 3447 int err; 3448 3449 uoptval = (char __user __force *) optval; 3450 3451 set_fs(KERNEL_DS); 3452 if (level == SOL_SOCKET) 3453 err = sock_setsockopt(sock, level, optname, uoptval, optlen); 3454 else 3455 err = sock->ops->setsockopt(sock, level, optname, uoptval, 3456 optlen); 3457 set_fs(oldfs); 3458 return err; 3459 } 3460 EXPORT_SYMBOL(kernel_setsockopt); 3461 3462 int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3463 size_t size, int flags) 3464 { 3465 if (sock->ops->sendpage) 3466 return sock->ops->sendpage(sock, page, offset, size, flags); 3467 3468 return sock_no_sendpage(sock, page, offset, size, flags); 3469 } 3470 EXPORT_SYMBOL(kernel_sendpage); 3471 3472 int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) 3473 { 3474 mm_segment_t oldfs = get_fs(); 3475 int err; 3476 3477 set_fs(KERNEL_DS); 3478 err = sock->ops->ioctl(sock, cmd, arg); 3479 set_fs(oldfs); 3480 3481 return err; 3482 } 3483 EXPORT_SYMBOL(kernel_sock_ioctl); 3484 3485 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) 3486 { 3487 return sock->ops->shutdown(sock, how); 3488 } 3489 EXPORT_SYMBOL(kernel_sock_shutdown); 3490