1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * PACKET - implements raw packet sockets. 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org> 11 * 12 * Fixes: 13 * Alan Cox : verify_area() now used correctly 14 * Alan Cox : new skbuff lists, look ma no backlogs! 15 * Alan Cox : tidied skbuff lists. 16 * Alan Cox : Now uses generic datagram routines I 17 * added. Also fixed the peek/read crash 18 * from all old Linux datagram code. 19 * Alan Cox : Uses the improved datagram code. 20 * Alan Cox : Added NULL's for socket options. 21 * Alan Cox : Re-commented the code. 22 * Alan Cox : Use new kernel side addressing 23 * Rob Janssen : Correct MTU usage. 24 * Dave Platt : Counter leaks caused by incorrect 25 * interrupt locking and some slightly 26 * dubious gcc output. Can you read 27 * compiler: it said _VOLATILE_ 28 * Richard Kooijman : Timestamp fixes. 29 * Alan Cox : New buffers. Use sk->mac.raw. 30 * Alan Cox : sendmsg/recvmsg support. 31 * Alan Cox : Protocol setting support 32 * Alexey Kuznetsov : Untied from IPv4 stack. 33 * Cyrus Durgin : Fixed kerneld for kmod. 34 * Michal Ostrowski : Module initialization cleanup. 35 * Ulises Alonso : Frame number limit removal and 36 * packet_set_ring memory leak. 37 * Eric Biederman : Allow for > 8 byte hardware addresses. 38 * The convention is that longer addresses 39 * will simply extend the hardware address 40 * byte arrays at the end of sockaddr_ll 41 * and packet_mreq. 42 * Johann Baudy : Added TX RING. 43 * 44 * This program is free software; you can redistribute it and/or 45 * modify it under the terms of the GNU General Public License 46 * as published by the Free Software Foundation; either version 47 * 2 of the License, or (at your option) any later version. 48 * 49 */ 50 51 #include <linux/types.h> 52 #include <linux/mm.h> 53 #include <linux/capability.h> 54 #include <linux/fcntl.h> 55 #include <linux/socket.h> 56 #include <linux/in.h> 57 #include <linux/inet.h> 58 #include <linux/netdevice.h> 59 #include <linux/if_packet.h> 60 #include <linux/wireless.h> 61 #include <linux/kernel.h> 62 #include <linux/kmod.h> 63 #include <linux/slab.h> 64 #include <linux/vmalloc.h> 65 #include <net/net_namespace.h> 66 #include <net/ip.h> 67 #include <net/protocol.h> 68 #include <linux/skbuff.h> 69 #include <net/sock.h> 70 #include <linux/errno.h> 71 #include <linux/timer.h> 72 #include <asm/system.h> 73 #include <asm/uaccess.h> 74 #include <asm/ioctls.h> 75 #include <asm/page.h> 76 #include <asm/cacheflush.h> 77 #include <asm/io.h> 78 #include <linux/proc_fs.h> 79 #include <linux/seq_file.h> 80 #include <linux/poll.h> 81 #include <linux/module.h> 82 #include <linux/init.h> 83 #include <linux/mutex.h> 84 #include <linux/if_vlan.h> 85 #include <linux/virtio_net.h> 86 #include <linux/errqueue.h> 87 #include <linux/net_tstamp.h> 88 89 #ifdef CONFIG_INET 90 #include <net/inet_common.h> 91 #endif 92 93 /* 94 Assumptions: 95 - if device has no dev->hard_header routine, it adds and removes ll header 96 inside itself. In this case ll header is invisible outside of device, 97 but higher levels still should reserve dev->hard_header_len. 98 Some devices are enough clever to reallocate skb, when header 99 will not fit to reserved space (tunnel), another ones are silly 100 (PPP). 101 - packet socket receives packets with pulled ll header, 102 so that SOCK_RAW should push it back. 103 104 On receive: 105 ----------- 106 107 Incoming, dev->hard_header!=NULL 108 mac_header -> ll header 109 data -> data 110 111 Outgoing, dev->hard_header!=NULL 112 mac_header -> ll header 113 data -> ll header 114 115 Incoming, dev->hard_header==NULL 116 mac_header -> UNKNOWN position. It is very likely, that it points to ll 117 header. PPP makes it, that is wrong, because introduce 118 assymetry between rx and tx paths. 119 data -> data 120 121 Outgoing, dev->hard_header==NULL 122 mac_header -> data. ll header is still not built! 123 data -> data 124 125 Resume 126 If dev->hard_header==NULL we are unlikely to restore sensible ll header. 127 128 129 On transmit: 130 ------------ 131 132 dev->hard_header != NULL 133 mac_header -> ll header 134 data -> ll header 135 136 dev->hard_header == NULL (ll header is added by device, we cannot control it) 137 mac_header -> data 138 data -> data 139 140 We should set nh.raw on output to correct posistion, 141 packet classifier depends on it. 142 */ 143 144 /* Private packet socket structures. */ 145 146 struct packet_mclist { 147 struct packet_mclist *next; 148 int ifindex; 149 int count; 150 unsigned short type; 151 unsigned short alen; 152 unsigned char addr[MAX_ADDR_LEN]; 153 }; 154 /* identical to struct packet_mreq except it has 155 * a longer address field. 156 */ 157 struct packet_mreq_max { 158 int mr_ifindex; 159 unsigned short mr_type; 160 unsigned short mr_alen; 161 unsigned char mr_address[MAX_ADDR_LEN]; 162 }; 163 164 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 165 int closing, int tx_ring); 166 167 struct pgv { 168 char *buffer; 169 }; 170 171 struct packet_ring_buffer { 172 struct pgv *pg_vec; 173 unsigned int head; 174 unsigned int frames_per_block; 175 unsigned int frame_size; 176 unsigned int frame_max; 177 178 unsigned int pg_vec_order; 179 unsigned int pg_vec_pages; 180 unsigned int pg_vec_len; 181 182 atomic_t pending; 183 }; 184 185 struct packet_sock; 186 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 187 188 static void packet_flush_mclist(struct sock *sk); 189 190 struct packet_fanout; 191 struct packet_sock { 192 /* struct sock has to be the first member of packet_sock */ 193 struct sock sk; 194 struct packet_fanout *fanout; 195 struct tpacket_stats stats; 196 struct packet_ring_buffer rx_ring; 197 struct packet_ring_buffer tx_ring; 198 int copy_thresh; 199 spinlock_t bind_lock; 200 struct mutex pg_vec_lock; 201 unsigned int running:1, /* prot_hook is attached*/ 202 auxdata:1, 203 origdev:1, 204 has_vnet_hdr:1; 205 int ifindex; /* bound device */ 206 __be16 num; 207 struct packet_mclist *mclist; 208 atomic_t mapped; 209 enum tpacket_versions tp_version; 210 unsigned int tp_hdrlen; 211 unsigned int tp_reserve; 212 unsigned int tp_loss:1; 213 unsigned int tp_tstamp; 214 struct packet_type prot_hook ____cacheline_aligned_in_smp; 215 }; 216 217 #define PACKET_FANOUT_MAX 256 218 219 struct packet_fanout { 220 #ifdef CONFIG_NET_NS 221 struct net *net; 222 #endif 223 unsigned int num_members; 224 u16 id; 225 u8 type; 226 u8 defrag; 227 atomic_t rr_cur; 228 struct list_head list; 229 struct sock *arr[PACKET_FANOUT_MAX]; 230 spinlock_t lock; 231 atomic_t sk_ref; 232 struct packet_type prot_hook ____cacheline_aligned_in_smp; 233 }; 234 235 struct packet_skb_cb { 236 unsigned int origlen; 237 union { 238 struct sockaddr_pkt pkt; 239 struct sockaddr_ll ll; 240 } sa; 241 }; 242 243 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 244 245 static inline struct packet_sock *pkt_sk(struct sock *sk) 246 { 247 return (struct packet_sock *)sk; 248 } 249 250 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 251 static void __fanout_link(struct sock *sk, struct packet_sock *po); 252 253 /* register_prot_hook must be invoked with the po->bind_lock held, 254 * or from a context in which asynchronous accesses to the packet 255 * socket is not possible (packet_create()). 256 */ 257 static void register_prot_hook(struct sock *sk) 258 { 259 struct packet_sock *po = pkt_sk(sk); 260 if (!po->running) { 261 if (po->fanout) 262 __fanout_link(sk, po); 263 else 264 dev_add_pack(&po->prot_hook); 265 sock_hold(sk); 266 po->running = 1; 267 } 268 } 269 270 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock 271 * held. If the sync parameter is true, we will temporarily drop 272 * the po->bind_lock and do a synchronize_net to make sure no 273 * asynchronous packet processing paths still refer to the elements 274 * of po->prot_hook. If the sync parameter is false, it is the 275 * callers responsibility to take care of this. 276 */ 277 static void __unregister_prot_hook(struct sock *sk, bool sync) 278 { 279 struct packet_sock *po = pkt_sk(sk); 280 281 po->running = 0; 282 if (po->fanout) 283 __fanout_unlink(sk, po); 284 else 285 __dev_remove_pack(&po->prot_hook); 286 __sock_put(sk); 287 288 if (sync) { 289 spin_unlock(&po->bind_lock); 290 synchronize_net(); 291 spin_lock(&po->bind_lock); 292 } 293 } 294 295 static void unregister_prot_hook(struct sock *sk, bool sync) 296 { 297 struct packet_sock *po = pkt_sk(sk); 298 299 if (po->running) 300 __unregister_prot_hook(sk, sync); 301 } 302 303 static inline __pure struct page *pgv_to_page(void *addr) 304 { 305 if (is_vmalloc_addr(addr)) 306 return vmalloc_to_page(addr); 307 return virt_to_page(addr); 308 } 309 310 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 311 { 312 union { 313 struct tpacket_hdr *h1; 314 struct tpacket2_hdr *h2; 315 void *raw; 316 } h; 317 318 h.raw = frame; 319 switch (po->tp_version) { 320 case TPACKET_V1: 321 h.h1->tp_status = status; 322 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 323 break; 324 case TPACKET_V2: 325 h.h2->tp_status = status; 326 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 327 break; 328 default: 329 pr_err("TPACKET version not supported\n"); 330 BUG(); 331 } 332 333 smp_wmb(); 334 } 335 336 static int __packet_get_status(struct packet_sock *po, void *frame) 337 { 338 union { 339 struct tpacket_hdr *h1; 340 struct tpacket2_hdr *h2; 341 void *raw; 342 } h; 343 344 smp_rmb(); 345 346 h.raw = frame; 347 switch (po->tp_version) { 348 case TPACKET_V1: 349 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 350 return h.h1->tp_status; 351 case TPACKET_V2: 352 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 353 return h.h2->tp_status; 354 default: 355 pr_err("TPACKET version not supported\n"); 356 BUG(); 357 return 0; 358 } 359 } 360 361 static void *packet_lookup_frame(struct packet_sock *po, 362 struct packet_ring_buffer *rb, 363 unsigned int position, 364 int status) 365 { 366 unsigned int pg_vec_pos, frame_offset; 367 union { 368 struct tpacket_hdr *h1; 369 struct tpacket2_hdr *h2; 370 void *raw; 371 } h; 372 373 pg_vec_pos = position / rb->frames_per_block; 374 frame_offset = position % rb->frames_per_block; 375 376 h.raw = rb->pg_vec[pg_vec_pos].buffer + 377 (frame_offset * rb->frame_size); 378 379 if (status != __packet_get_status(po, h.raw)) 380 return NULL; 381 382 return h.raw; 383 } 384 385 static inline void *packet_current_frame(struct packet_sock *po, 386 struct packet_ring_buffer *rb, 387 int status) 388 { 389 return packet_lookup_frame(po, rb, rb->head, status); 390 } 391 392 static inline void *packet_previous_frame(struct packet_sock *po, 393 struct packet_ring_buffer *rb, 394 int status) 395 { 396 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 397 return packet_lookup_frame(po, rb, previous, status); 398 } 399 400 static inline void packet_increment_head(struct packet_ring_buffer *buff) 401 { 402 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 403 } 404 405 static void packet_sock_destruct(struct sock *sk) 406 { 407 skb_queue_purge(&sk->sk_error_queue); 408 409 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 410 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 411 412 if (!sock_flag(sk, SOCK_DEAD)) { 413 pr_err("Attempt to release alive packet socket: %p\n", sk); 414 return; 415 } 416 417 sk_refcnt_debug_dec(sk); 418 } 419 420 static int fanout_rr_next(struct packet_fanout *f, unsigned int num) 421 { 422 int x = atomic_read(&f->rr_cur) + 1; 423 424 if (x >= num) 425 x = 0; 426 427 return x; 428 } 429 430 static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) 431 { 432 u32 idx, hash = skb->rxhash; 433 434 idx = ((u64)hash * num) >> 32; 435 436 return f->arr[idx]; 437 } 438 439 static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) 440 { 441 int cur, old; 442 443 cur = atomic_read(&f->rr_cur); 444 while ((old = atomic_cmpxchg(&f->rr_cur, cur, 445 fanout_rr_next(f, num))) != cur) 446 cur = old; 447 return f->arr[cur]; 448 } 449 450 static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) 451 { 452 unsigned int cpu = smp_processor_id(); 453 454 return f->arr[cpu % num]; 455 } 456 457 static struct sk_buff *fanout_check_defrag(struct sk_buff *skb) 458 { 459 #ifdef CONFIG_INET 460 const struct iphdr *iph; 461 u32 len; 462 463 if (skb->protocol != htons(ETH_P_IP)) 464 return skb; 465 466 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 467 return skb; 468 469 iph = ip_hdr(skb); 470 if (iph->ihl < 5 || iph->version != 4) 471 return skb; 472 if (!pskb_may_pull(skb, iph->ihl*4)) 473 return skb; 474 iph = ip_hdr(skb); 475 len = ntohs(iph->tot_len); 476 if (skb->len < len || len < (iph->ihl * 4)) 477 return skb; 478 479 if (ip_is_fragment(ip_hdr(skb))) { 480 skb = skb_share_check(skb, GFP_ATOMIC); 481 if (skb) { 482 if (pskb_trim_rcsum(skb, len)) 483 return skb; 484 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 485 if (ip_defrag(skb, IP_DEFRAG_AF_PACKET)) 486 return NULL; 487 skb->rxhash = 0; 488 } 489 } 490 #endif 491 return skb; 492 } 493 494 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 495 struct packet_type *pt, struct net_device *orig_dev) 496 { 497 struct packet_fanout *f = pt->af_packet_priv; 498 unsigned int num = f->num_members; 499 struct packet_sock *po; 500 struct sock *sk; 501 502 if (!net_eq(dev_net(dev), read_pnet(&f->net)) || 503 !num) { 504 kfree_skb(skb); 505 return 0; 506 } 507 508 switch (f->type) { 509 case PACKET_FANOUT_HASH: 510 default: 511 if (f->defrag) { 512 skb = fanout_check_defrag(skb); 513 if (!skb) 514 return 0; 515 } 516 skb_get_rxhash(skb); 517 sk = fanout_demux_hash(f, skb, num); 518 break; 519 case PACKET_FANOUT_LB: 520 sk = fanout_demux_lb(f, skb, num); 521 break; 522 case PACKET_FANOUT_CPU: 523 sk = fanout_demux_cpu(f, skb, num); 524 break; 525 } 526 527 po = pkt_sk(sk); 528 529 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 530 } 531 532 static DEFINE_MUTEX(fanout_mutex); 533 static LIST_HEAD(fanout_list); 534 535 static void __fanout_link(struct sock *sk, struct packet_sock *po) 536 { 537 struct packet_fanout *f = po->fanout; 538 539 spin_lock(&f->lock); 540 f->arr[f->num_members] = sk; 541 smp_wmb(); 542 f->num_members++; 543 spin_unlock(&f->lock); 544 } 545 546 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 547 { 548 struct packet_fanout *f = po->fanout; 549 int i; 550 551 spin_lock(&f->lock); 552 for (i = 0; i < f->num_members; i++) { 553 if (f->arr[i] == sk) 554 break; 555 } 556 BUG_ON(i >= f->num_members); 557 f->arr[i] = f->arr[f->num_members - 1]; 558 f->num_members--; 559 spin_unlock(&f->lock); 560 } 561 562 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 563 { 564 struct packet_sock *po = pkt_sk(sk); 565 struct packet_fanout *f, *match; 566 u8 type = type_flags & 0xff; 567 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0; 568 int err; 569 570 switch (type) { 571 case PACKET_FANOUT_HASH: 572 case PACKET_FANOUT_LB: 573 case PACKET_FANOUT_CPU: 574 break; 575 default: 576 return -EINVAL; 577 } 578 579 if (!po->running) 580 return -EINVAL; 581 582 if (po->fanout) 583 return -EALREADY; 584 585 mutex_lock(&fanout_mutex); 586 match = NULL; 587 list_for_each_entry(f, &fanout_list, list) { 588 if (f->id == id && 589 read_pnet(&f->net) == sock_net(sk)) { 590 match = f; 591 break; 592 } 593 } 594 err = -EINVAL; 595 if (match && match->defrag != defrag) 596 goto out; 597 if (!match) { 598 err = -ENOMEM; 599 match = kzalloc(sizeof(*match), GFP_KERNEL); 600 if (!match) 601 goto out; 602 write_pnet(&match->net, sock_net(sk)); 603 match->id = id; 604 match->type = type; 605 match->defrag = defrag; 606 atomic_set(&match->rr_cur, 0); 607 INIT_LIST_HEAD(&match->list); 608 spin_lock_init(&match->lock); 609 atomic_set(&match->sk_ref, 0); 610 match->prot_hook.type = po->prot_hook.type; 611 match->prot_hook.dev = po->prot_hook.dev; 612 match->prot_hook.func = packet_rcv_fanout; 613 match->prot_hook.af_packet_priv = match; 614 dev_add_pack(&match->prot_hook); 615 list_add(&match->list, &fanout_list); 616 } 617 err = -EINVAL; 618 if (match->type == type && 619 match->prot_hook.type == po->prot_hook.type && 620 match->prot_hook.dev == po->prot_hook.dev) { 621 err = -ENOSPC; 622 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 623 __dev_remove_pack(&po->prot_hook); 624 po->fanout = match; 625 atomic_inc(&match->sk_ref); 626 __fanout_link(sk, po); 627 err = 0; 628 } 629 } 630 out: 631 mutex_unlock(&fanout_mutex); 632 return err; 633 } 634 635 static void fanout_release(struct sock *sk) 636 { 637 struct packet_sock *po = pkt_sk(sk); 638 struct packet_fanout *f; 639 640 f = po->fanout; 641 if (!f) 642 return; 643 644 po->fanout = NULL; 645 646 mutex_lock(&fanout_mutex); 647 if (atomic_dec_and_test(&f->sk_ref)) { 648 list_del(&f->list); 649 dev_remove_pack(&f->prot_hook); 650 kfree(f); 651 } 652 mutex_unlock(&fanout_mutex); 653 } 654 655 static const struct proto_ops packet_ops; 656 657 static const struct proto_ops packet_ops_spkt; 658 659 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 660 struct packet_type *pt, struct net_device *orig_dev) 661 { 662 struct sock *sk; 663 struct sockaddr_pkt *spkt; 664 665 /* 666 * When we registered the protocol we saved the socket in the data 667 * field for just this event. 668 */ 669 670 sk = pt->af_packet_priv; 671 672 /* 673 * Yank back the headers [hope the device set this 674 * right or kerboom...] 675 * 676 * Incoming packets have ll header pulled, 677 * push it back. 678 * 679 * For outgoing ones skb->data == skb_mac_header(skb) 680 * so that this procedure is noop. 681 */ 682 683 if (skb->pkt_type == PACKET_LOOPBACK) 684 goto out; 685 686 if (!net_eq(dev_net(dev), sock_net(sk))) 687 goto out; 688 689 skb = skb_share_check(skb, GFP_ATOMIC); 690 if (skb == NULL) 691 goto oom; 692 693 /* drop any routing info */ 694 skb_dst_drop(skb); 695 696 /* drop conntrack reference */ 697 nf_reset(skb); 698 699 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 700 701 skb_push(skb, skb->data - skb_mac_header(skb)); 702 703 /* 704 * The SOCK_PACKET socket receives _all_ frames. 705 */ 706 707 spkt->spkt_family = dev->type; 708 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 709 spkt->spkt_protocol = skb->protocol; 710 711 /* 712 * Charge the memory to the socket. This is done specifically 713 * to prevent sockets using all the memory up. 714 */ 715 716 if (sock_queue_rcv_skb(sk, skb) == 0) 717 return 0; 718 719 out: 720 kfree_skb(skb); 721 oom: 722 return 0; 723 } 724 725 726 /* 727 * Output a raw packet to a device layer. This bypasses all the other 728 * protocol layers and you must therefore supply it with a complete frame 729 */ 730 731 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, 732 struct msghdr *msg, size_t len) 733 { 734 struct sock *sk = sock->sk; 735 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; 736 struct sk_buff *skb = NULL; 737 struct net_device *dev; 738 __be16 proto = 0; 739 int err; 740 741 /* 742 * Get and verify the address. 743 */ 744 745 if (saddr) { 746 if (msg->msg_namelen < sizeof(struct sockaddr)) 747 return -EINVAL; 748 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 749 proto = saddr->spkt_protocol; 750 } else 751 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 752 753 /* 754 * Find the device first to size check it 755 */ 756 757 saddr->spkt_device[13] = 0; 758 retry: 759 rcu_read_lock(); 760 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 761 err = -ENODEV; 762 if (dev == NULL) 763 goto out_unlock; 764 765 err = -ENETDOWN; 766 if (!(dev->flags & IFF_UP)) 767 goto out_unlock; 768 769 /* 770 * You may not queue a frame bigger than the mtu. This is the lowest level 771 * raw protocol and you must do your own fragmentation at this level. 772 */ 773 774 err = -EMSGSIZE; 775 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN) 776 goto out_unlock; 777 778 if (!skb) { 779 size_t reserved = LL_RESERVED_SPACE(dev); 780 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 781 782 rcu_read_unlock(); 783 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL); 784 if (skb == NULL) 785 return -ENOBUFS; 786 /* FIXME: Save some space for broken drivers that write a hard 787 * header at transmission time by themselves. PPP is the notable 788 * one here. This should really be fixed at the driver level. 789 */ 790 skb_reserve(skb, reserved); 791 skb_reset_network_header(skb); 792 793 /* Try to align data part correctly */ 794 if (hhlen) { 795 skb->data -= hhlen; 796 skb->tail -= hhlen; 797 if (len < hhlen) 798 skb_reset_network_header(skb); 799 } 800 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 801 if (err) 802 goto out_free; 803 goto retry; 804 } 805 806 if (len > (dev->mtu + dev->hard_header_len)) { 807 /* Earlier code assumed this would be a VLAN pkt, 808 * double-check this now that we have the actual 809 * packet in hand. 810 */ 811 struct ethhdr *ehdr; 812 skb_reset_mac_header(skb); 813 ehdr = eth_hdr(skb); 814 if (ehdr->h_proto != htons(ETH_P_8021Q)) { 815 err = -EMSGSIZE; 816 goto out_unlock; 817 } 818 } 819 820 skb->protocol = proto; 821 skb->dev = dev; 822 skb->priority = sk->sk_priority; 823 skb->mark = sk->sk_mark; 824 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); 825 if (err < 0) 826 goto out_unlock; 827 828 dev_queue_xmit(skb); 829 rcu_read_unlock(); 830 return len; 831 832 out_unlock: 833 rcu_read_unlock(); 834 out_free: 835 kfree_skb(skb); 836 return err; 837 } 838 839 static inline unsigned int run_filter(const struct sk_buff *skb, 840 const struct sock *sk, 841 unsigned int res) 842 { 843 struct sk_filter *filter; 844 845 rcu_read_lock(); 846 filter = rcu_dereference(sk->sk_filter); 847 if (filter != NULL) 848 res = SK_RUN_FILTER(filter, skb); 849 rcu_read_unlock(); 850 851 return res; 852 } 853 854 /* 855 * This function makes lazy skb cloning in hope that most of packets 856 * are discarded by BPF. 857 * 858 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 859 * and skb->cb are mangled. It works because (and until) packets 860 * falling here are owned by current CPU. Output packets are cloned 861 * by dev_queue_xmit_nit(), input packets are processed by net_bh 862 * sequencially, so that if we return skb to original state on exit, 863 * we will not harm anyone. 864 */ 865 866 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 867 struct packet_type *pt, struct net_device *orig_dev) 868 { 869 struct sock *sk; 870 struct sockaddr_ll *sll; 871 struct packet_sock *po; 872 u8 *skb_head = skb->data; 873 int skb_len = skb->len; 874 unsigned int snaplen, res; 875 876 if (skb->pkt_type == PACKET_LOOPBACK) 877 goto drop; 878 879 sk = pt->af_packet_priv; 880 po = pkt_sk(sk); 881 882 if (!net_eq(dev_net(dev), sock_net(sk))) 883 goto drop; 884 885 skb->dev = dev; 886 887 if (dev->header_ops) { 888 /* The device has an explicit notion of ll header, 889 * exported to higher levels. 890 * 891 * Otherwise, the device hides details of its frame 892 * structure, so that corresponding packet head is 893 * never delivered to user. 894 */ 895 if (sk->sk_type != SOCK_DGRAM) 896 skb_push(skb, skb->data - skb_mac_header(skb)); 897 else if (skb->pkt_type == PACKET_OUTGOING) { 898 /* Special case: outgoing packets have ll header at head */ 899 skb_pull(skb, skb_network_offset(skb)); 900 } 901 } 902 903 snaplen = skb->len; 904 905 res = run_filter(skb, sk, snaplen); 906 if (!res) 907 goto drop_n_restore; 908 if (snaplen > res) 909 snaplen = res; 910 911 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 912 (unsigned)sk->sk_rcvbuf) 913 goto drop_n_acct; 914 915 if (skb_shared(skb)) { 916 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 917 if (nskb == NULL) 918 goto drop_n_acct; 919 920 if (skb_head != skb->data) { 921 skb->data = skb_head; 922 skb->len = skb_len; 923 } 924 kfree_skb(skb); 925 skb = nskb; 926 } 927 928 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 > 929 sizeof(skb->cb)); 930 931 sll = &PACKET_SKB_CB(skb)->sa.ll; 932 sll->sll_family = AF_PACKET; 933 sll->sll_hatype = dev->type; 934 sll->sll_protocol = skb->protocol; 935 sll->sll_pkttype = skb->pkt_type; 936 if (unlikely(po->origdev)) 937 sll->sll_ifindex = orig_dev->ifindex; 938 else 939 sll->sll_ifindex = dev->ifindex; 940 941 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 942 943 PACKET_SKB_CB(skb)->origlen = skb->len; 944 945 if (pskb_trim(skb, snaplen)) 946 goto drop_n_acct; 947 948 skb_set_owner_r(skb, sk); 949 skb->dev = NULL; 950 skb_dst_drop(skb); 951 952 /* drop conntrack reference */ 953 nf_reset(skb); 954 955 spin_lock(&sk->sk_receive_queue.lock); 956 po->stats.tp_packets++; 957 skb->dropcount = atomic_read(&sk->sk_drops); 958 __skb_queue_tail(&sk->sk_receive_queue, skb); 959 spin_unlock(&sk->sk_receive_queue.lock); 960 sk->sk_data_ready(sk, skb->len); 961 return 0; 962 963 drop_n_acct: 964 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); 965 966 drop_n_restore: 967 if (skb_head != skb->data && skb_shared(skb)) { 968 skb->data = skb_head; 969 skb->len = skb_len; 970 } 971 drop: 972 consume_skb(skb); 973 return 0; 974 } 975 976 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 977 struct packet_type *pt, struct net_device *orig_dev) 978 { 979 struct sock *sk; 980 struct packet_sock *po; 981 struct sockaddr_ll *sll; 982 union { 983 struct tpacket_hdr *h1; 984 struct tpacket2_hdr *h2; 985 void *raw; 986 } h; 987 u8 *skb_head = skb->data; 988 int skb_len = skb->len; 989 unsigned int snaplen, res; 990 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; 991 unsigned short macoff, netoff, hdrlen; 992 struct sk_buff *copy_skb = NULL; 993 struct timeval tv; 994 struct timespec ts; 995 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 996 997 if (skb->pkt_type == PACKET_LOOPBACK) 998 goto drop; 999 1000 sk = pt->af_packet_priv; 1001 po = pkt_sk(sk); 1002 1003 if (!net_eq(dev_net(dev), sock_net(sk))) 1004 goto drop; 1005 1006 if (dev->header_ops) { 1007 if (sk->sk_type != SOCK_DGRAM) 1008 skb_push(skb, skb->data - skb_mac_header(skb)); 1009 else if (skb->pkt_type == PACKET_OUTGOING) { 1010 /* Special case: outgoing packets have ll header at head */ 1011 skb_pull(skb, skb_network_offset(skb)); 1012 } 1013 } 1014 1015 if (skb->ip_summed == CHECKSUM_PARTIAL) 1016 status |= TP_STATUS_CSUMNOTREADY; 1017 1018 snaplen = skb->len; 1019 1020 res = run_filter(skb, sk, snaplen); 1021 if (!res) 1022 goto drop_n_restore; 1023 if (snaplen > res) 1024 snaplen = res; 1025 1026 if (sk->sk_type == SOCK_DGRAM) { 1027 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 1028 po->tp_reserve; 1029 } else { 1030 unsigned maclen = skb_network_offset(skb); 1031 netoff = TPACKET_ALIGN(po->tp_hdrlen + 1032 (maclen < 16 ? 16 : maclen)) + 1033 po->tp_reserve; 1034 macoff = netoff - maclen; 1035 } 1036 1037 if (macoff + snaplen > po->rx_ring.frame_size) { 1038 if (po->copy_thresh && 1039 atomic_read(&sk->sk_rmem_alloc) + skb->truesize < 1040 (unsigned)sk->sk_rcvbuf) { 1041 if (skb_shared(skb)) { 1042 copy_skb = skb_clone(skb, GFP_ATOMIC); 1043 } else { 1044 copy_skb = skb_get(skb); 1045 skb_head = skb->data; 1046 } 1047 if (copy_skb) 1048 skb_set_owner_r(copy_skb, sk); 1049 } 1050 snaplen = po->rx_ring.frame_size - macoff; 1051 if ((int)snaplen < 0) 1052 snaplen = 0; 1053 } 1054 1055 spin_lock(&sk->sk_receive_queue.lock); 1056 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL); 1057 if (!h.raw) 1058 goto ring_is_full; 1059 packet_increment_head(&po->rx_ring); 1060 po->stats.tp_packets++; 1061 if (copy_skb) { 1062 status |= TP_STATUS_COPY; 1063 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 1064 } 1065 if (!po->stats.tp_drops) 1066 status &= ~TP_STATUS_LOSING; 1067 spin_unlock(&sk->sk_receive_queue.lock); 1068 1069 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 1070 1071 switch (po->tp_version) { 1072 case TPACKET_V1: 1073 h.h1->tp_len = skb->len; 1074 h.h1->tp_snaplen = snaplen; 1075 h.h1->tp_mac = macoff; 1076 h.h1->tp_net = netoff; 1077 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) 1078 && shhwtstamps->syststamp.tv64) 1079 tv = ktime_to_timeval(shhwtstamps->syststamp); 1080 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) 1081 && shhwtstamps->hwtstamp.tv64) 1082 tv = ktime_to_timeval(shhwtstamps->hwtstamp); 1083 else if (skb->tstamp.tv64) 1084 tv = ktime_to_timeval(skb->tstamp); 1085 else 1086 do_gettimeofday(&tv); 1087 h.h1->tp_sec = tv.tv_sec; 1088 h.h1->tp_usec = tv.tv_usec; 1089 hdrlen = sizeof(*h.h1); 1090 break; 1091 case TPACKET_V2: 1092 h.h2->tp_len = skb->len; 1093 h.h2->tp_snaplen = snaplen; 1094 h.h2->tp_mac = macoff; 1095 h.h2->tp_net = netoff; 1096 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) 1097 && shhwtstamps->syststamp.tv64) 1098 ts = ktime_to_timespec(shhwtstamps->syststamp); 1099 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) 1100 && shhwtstamps->hwtstamp.tv64) 1101 ts = ktime_to_timespec(shhwtstamps->hwtstamp); 1102 else if (skb->tstamp.tv64) 1103 ts = ktime_to_timespec(skb->tstamp); 1104 else 1105 getnstimeofday(&ts); 1106 h.h2->tp_sec = ts.tv_sec; 1107 h.h2->tp_nsec = ts.tv_nsec; 1108 if (vlan_tx_tag_present(skb)) { 1109 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); 1110 status |= TP_STATUS_VLAN_VALID; 1111 } else { 1112 h.h2->tp_vlan_tci = 0; 1113 } 1114 h.h2->tp_padding = 0; 1115 hdrlen = sizeof(*h.h2); 1116 break; 1117 default: 1118 BUG(); 1119 } 1120 1121 sll = h.raw + TPACKET_ALIGN(hdrlen); 1122 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 1123 sll->sll_family = AF_PACKET; 1124 sll->sll_hatype = dev->type; 1125 sll->sll_protocol = skb->protocol; 1126 sll->sll_pkttype = skb->pkt_type; 1127 if (unlikely(po->origdev)) 1128 sll->sll_ifindex = orig_dev->ifindex; 1129 else 1130 sll->sll_ifindex = dev->ifindex; 1131 1132 smp_mb(); 1133 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 1134 { 1135 u8 *start, *end; 1136 1137 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen); 1138 for (start = h.raw; start < end; start += PAGE_SIZE) 1139 flush_dcache_page(pgv_to_page(start)); 1140 smp_wmb(); 1141 } 1142 #endif 1143 __packet_set_status(po, h.raw, status); 1144 1145 sk->sk_data_ready(sk, 0); 1146 1147 drop_n_restore: 1148 if (skb_head != skb->data && skb_shared(skb)) { 1149 skb->data = skb_head; 1150 skb->len = skb_len; 1151 } 1152 drop: 1153 kfree_skb(skb); 1154 return 0; 1155 1156 ring_is_full: 1157 po->stats.tp_drops++; 1158 spin_unlock(&sk->sk_receive_queue.lock); 1159 1160 sk->sk_data_ready(sk, 0); 1161 kfree_skb(copy_skb); 1162 goto drop_n_restore; 1163 } 1164 1165 static void tpacket_destruct_skb(struct sk_buff *skb) 1166 { 1167 struct packet_sock *po = pkt_sk(skb->sk); 1168 void *ph; 1169 1170 BUG_ON(skb == NULL); 1171 1172 if (likely(po->tx_ring.pg_vec)) { 1173 ph = skb_shinfo(skb)->destructor_arg; 1174 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); 1175 BUG_ON(atomic_read(&po->tx_ring.pending) == 0); 1176 atomic_dec(&po->tx_ring.pending); 1177 __packet_set_status(po, ph, TP_STATUS_AVAILABLE); 1178 } 1179 1180 sock_wfree(skb); 1181 } 1182 1183 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 1184 void *frame, struct net_device *dev, int size_max, 1185 __be16 proto, unsigned char *addr) 1186 { 1187 union { 1188 struct tpacket_hdr *h1; 1189 struct tpacket2_hdr *h2; 1190 void *raw; 1191 } ph; 1192 int to_write, offset, len, tp_len, nr_frags, len_max; 1193 struct socket *sock = po->sk.sk_socket; 1194 struct page *page; 1195 void *data; 1196 int err; 1197 1198 ph.raw = frame; 1199 1200 skb->protocol = proto; 1201 skb->dev = dev; 1202 skb->priority = po->sk.sk_priority; 1203 skb->mark = po->sk.sk_mark; 1204 skb_shinfo(skb)->destructor_arg = ph.raw; 1205 1206 switch (po->tp_version) { 1207 case TPACKET_V2: 1208 tp_len = ph.h2->tp_len; 1209 break; 1210 default: 1211 tp_len = ph.h1->tp_len; 1212 break; 1213 } 1214 if (unlikely(tp_len > size_max)) { 1215 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 1216 return -EMSGSIZE; 1217 } 1218 1219 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 1220 skb_reset_network_header(skb); 1221 1222 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); 1223 to_write = tp_len; 1224 1225 if (sock->type == SOCK_DGRAM) { 1226 err = dev_hard_header(skb, dev, ntohs(proto), addr, 1227 NULL, tp_len); 1228 if (unlikely(err < 0)) 1229 return -EINVAL; 1230 } else if (dev->hard_header_len) { 1231 /* net device doesn't like empty head */ 1232 if (unlikely(tp_len <= dev->hard_header_len)) { 1233 pr_err("packet size is too short (%d < %d)\n", 1234 tp_len, dev->hard_header_len); 1235 return -EINVAL; 1236 } 1237 1238 skb_push(skb, dev->hard_header_len); 1239 err = skb_store_bits(skb, 0, data, 1240 dev->hard_header_len); 1241 if (unlikely(err)) 1242 return err; 1243 1244 data += dev->hard_header_len; 1245 to_write -= dev->hard_header_len; 1246 } 1247 1248 err = -EFAULT; 1249 offset = offset_in_page(data); 1250 len_max = PAGE_SIZE - offset; 1251 len = ((to_write > len_max) ? len_max : to_write); 1252 1253 skb->data_len = to_write; 1254 skb->len += to_write; 1255 skb->truesize += to_write; 1256 atomic_add(to_write, &po->sk.sk_wmem_alloc); 1257 1258 while (likely(to_write)) { 1259 nr_frags = skb_shinfo(skb)->nr_frags; 1260 1261 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 1262 pr_err("Packet exceed the number of skb frags(%lu)\n", 1263 MAX_SKB_FRAGS); 1264 return -EFAULT; 1265 } 1266 1267 page = pgv_to_page(data); 1268 data += len; 1269 flush_dcache_page(page); 1270 get_page(page); 1271 skb_fill_page_desc(skb, nr_frags, page, offset, len); 1272 to_write -= len; 1273 offset = 0; 1274 len_max = PAGE_SIZE; 1275 len = ((to_write > len_max) ? len_max : to_write); 1276 } 1277 1278 return tp_len; 1279 } 1280 1281 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 1282 { 1283 struct sk_buff *skb; 1284 struct net_device *dev; 1285 __be16 proto; 1286 bool need_rls_dev = false; 1287 int err, reserve = 0; 1288 void *ph; 1289 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; 1290 int tp_len, size_max; 1291 unsigned char *addr; 1292 int len_sum = 0; 1293 int status = 0; 1294 1295 mutex_lock(&po->pg_vec_lock); 1296 1297 err = -EBUSY; 1298 if (saddr == NULL) { 1299 dev = po->prot_hook.dev; 1300 proto = po->num; 1301 addr = NULL; 1302 } else { 1303 err = -EINVAL; 1304 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 1305 goto out; 1306 if (msg->msg_namelen < (saddr->sll_halen 1307 + offsetof(struct sockaddr_ll, 1308 sll_addr))) 1309 goto out; 1310 proto = saddr->sll_protocol; 1311 addr = saddr->sll_addr; 1312 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 1313 need_rls_dev = true; 1314 } 1315 1316 err = -ENXIO; 1317 if (unlikely(dev == NULL)) 1318 goto out; 1319 1320 reserve = dev->hard_header_len; 1321 1322 err = -ENETDOWN; 1323 if (unlikely(!(dev->flags & IFF_UP))) 1324 goto out_put; 1325 1326 size_max = po->tx_ring.frame_size 1327 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 1328 1329 if (size_max > dev->mtu + reserve) 1330 size_max = dev->mtu + reserve; 1331 1332 do { 1333 ph = packet_current_frame(po, &po->tx_ring, 1334 TP_STATUS_SEND_REQUEST); 1335 1336 if (unlikely(ph == NULL)) { 1337 schedule(); 1338 continue; 1339 } 1340 1341 status = TP_STATUS_SEND_REQUEST; 1342 skb = sock_alloc_send_skb(&po->sk, 1343 LL_ALLOCATED_SPACE(dev) 1344 + sizeof(struct sockaddr_ll), 1345 0, &err); 1346 1347 if (unlikely(skb == NULL)) 1348 goto out_status; 1349 1350 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 1351 addr); 1352 1353 if (unlikely(tp_len < 0)) { 1354 if (po->tp_loss) { 1355 __packet_set_status(po, ph, 1356 TP_STATUS_AVAILABLE); 1357 packet_increment_head(&po->tx_ring); 1358 kfree_skb(skb); 1359 continue; 1360 } else { 1361 status = TP_STATUS_WRONG_FORMAT; 1362 err = tp_len; 1363 goto out_status; 1364 } 1365 } 1366 1367 skb->destructor = tpacket_destruct_skb; 1368 __packet_set_status(po, ph, TP_STATUS_SENDING); 1369 atomic_inc(&po->tx_ring.pending); 1370 1371 status = TP_STATUS_SEND_REQUEST; 1372 err = dev_queue_xmit(skb); 1373 if (unlikely(err > 0)) { 1374 err = net_xmit_errno(err); 1375 if (err && __packet_get_status(po, ph) == 1376 TP_STATUS_AVAILABLE) { 1377 /* skb was destructed already */ 1378 skb = NULL; 1379 goto out_status; 1380 } 1381 /* 1382 * skb was dropped but not destructed yet; 1383 * let's treat it like congestion or err < 0 1384 */ 1385 err = 0; 1386 } 1387 packet_increment_head(&po->tx_ring); 1388 len_sum += tp_len; 1389 } while (likely((ph != NULL) || 1390 ((!(msg->msg_flags & MSG_DONTWAIT)) && 1391 (atomic_read(&po->tx_ring.pending)))) 1392 ); 1393 1394 err = len_sum; 1395 goto out_put; 1396 1397 out_status: 1398 __packet_set_status(po, ph, status); 1399 kfree_skb(skb); 1400 out_put: 1401 if (need_rls_dev) 1402 dev_put(dev); 1403 out: 1404 mutex_unlock(&po->pg_vec_lock); 1405 return err; 1406 } 1407 1408 static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 1409 size_t reserve, size_t len, 1410 size_t linear, int noblock, 1411 int *err) 1412 { 1413 struct sk_buff *skb; 1414 1415 /* Under a page? Don't bother with paged skb. */ 1416 if (prepad + len < PAGE_SIZE || !linear) 1417 linear = len; 1418 1419 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 1420 err); 1421 if (!skb) 1422 return NULL; 1423 1424 skb_reserve(skb, reserve); 1425 skb_put(skb, linear); 1426 skb->data_len = len - linear; 1427 skb->len += len - linear; 1428 1429 return skb; 1430 } 1431 1432 static int packet_snd(struct socket *sock, 1433 struct msghdr *msg, size_t len) 1434 { 1435 struct sock *sk = sock->sk; 1436 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; 1437 struct sk_buff *skb; 1438 struct net_device *dev; 1439 __be16 proto; 1440 bool need_rls_dev = false; 1441 unsigned char *addr; 1442 int err, reserve = 0; 1443 struct virtio_net_hdr vnet_hdr = { 0 }; 1444 int offset = 0; 1445 int vnet_hdr_len; 1446 struct packet_sock *po = pkt_sk(sk); 1447 unsigned short gso_type = 0; 1448 1449 /* 1450 * Get and verify the address. 1451 */ 1452 1453 if (saddr == NULL) { 1454 dev = po->prot_hook.dev; 1455 proto = po->num; 1456 addr = NULL; 1457 } else { 1458 err = -EINVAL; 1459 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 1460 goto out; 1461 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 1462 goto out; 1463 proto = saddr->sll_protocol; 1464 addr = saddr->sll_addr; 1465 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 1466 need_rls_dev = true; 1467 } 1468 1469 err = -ENXIO; 1470 if (dev == NULL) 1471 goto out_unlock; 1472 if (sock->type == SOCK_RAW) 1473 reserve = dev->hard_header_len; 1474 1475 err = -ENETDOWN; 1476 if (!(dev->flags & IFF_UP)) 1477 goto out_unlock; 1478 1479 if (po->has_vnet_hdr) { 1480 vnet_hdr_len = sizeof(vnet_hdr); 1481 1482 err = -EINVAL; 1483 if (len < vnet_hdr_len) 1484 goto out_unlock; 1485 1486 len -= vnet_hdr_len; 1487 1488 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov, 1489 vnet_hdr_len); 1490 if (err < 0) 1491 goto out_unlock; 1492 1493 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 1494 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > 1495 vnet_hdr.hdr_len)) 1496 vnet_hdr.hdr_len = vnet_hdr.csum_start + 1497 vnet_hdr.csum_offset + 2; 1498 1499 err = -EINVAL; 1500 if (vnet_hdr.hdr_len > len) 1501 goto out_unlock; 1502 1503 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 1504 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 1505 case VIRTIO_NET_HDR_GSO_TCPV4: 1506 gso_type = SKB_GSO_TCPV4; 1507 break; 1508 case VIRTIO_NET_HDR_GSO_TCPV6: 1509 gso_type = SKB_GSO_TCPV6; 1510 break; 1511 case VIRTIO_NET_HDR_GSO_UDP: 1512 gso_type = SKB_GSO_UDP; 1513 break; 1514 default: 1515 goto out_unlock; 1516 } 1517 1518 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) 1519 gso_type |= SKB_GSO_TCP_ECN; 1520 1521 if (vnet_hdr.gso_size == 0) 1522 goto out_unlock; 1523 1524 } 1525 } 1526 1527 err = -EMSGSIZE; 1528 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN)) 1529 goto out_unlock; 1530 1531 err = -ENOBUFS; 1532 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev), 1533 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len, 1534 msg->msg_flags & MSG_DONTWAIT, &err); 1535 if (skb == NULL) 1536 goto out_unlock; 1537 1538 skb_set_network_header(skb, reserve); 1539 1540 err = -EINVAL; 1541 if (sock->type == SOCK_DGRAM && 1542 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0) 1543 goto out_free; 1544 1545 /* Returns -EFAULT on error */ 1546 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); 1547 if (err) 1548 goto out_free; 1549 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); 1550 if (err < 0) 1551 goto out_free; 1552 1553 if (!gso_type && (len > dev->mtu + reserve)) { 1554 /* Earlier code assumed this would be a VLAN pkt, 1555 * double-check this now that we have the actual 1556 * packet in hand. 1557 */ 1558 struct ethhdr *ehdr; 1559 skb_reset_mac_header(skb); 1560 ehdr = eth_hdr(skb); 1561 if (ehdr->h_proto != htons(ETH_P_8021Q)) { 1562 err = -EMSGSIZE; 1563 goto out_free; 1564 } 1565 } 1566 1567 skb->protocol = proto; 1568 skb->dev = dev; 1569 skb->priority = sk->sk_priority; 1570 skb->mark = sk->sk_mark; 1571 1572 if (po->has_vnet_hdr) { 1573 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1574 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start, 1575 vnet_hdr.csum_offset)) { 1576 err = -EINVAL; 1577 goto out_free; 1578 } 1579 } 1580 1581 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size; 1582 skb_shinfo(skb)->gso_type = gso_type; 1583 1584 /* Header must be checked, and gso_segs computed. */ 1585 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 1586 skb_shinfo(skb)->gso_segs = 0; 1587 1588 len += vnet_hdr_len; 1589 } 1590 1591 /* 1592 * Now send it 1593 */ 1594 1595 err = dev_queue_xmit(skb); 1596 if (err > 0 && (err = net_xmit_errno(err)) != 0) 1597 goto out_unlock; 1598 1599 if (need_rls_dev) 1600 dev_put(dev); 1601 1602 return len; 1603 1604 out_free: 1605 kfree_skb(skb); 1606 out_unlock: 1607 if (dev && need_rls_dev) 1608 dev_put(dev); 1609 out: 1610 return err; 1611 } 1612 1613 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, 1614 struct msghdr *msg, size_t len) 1615 { 1616 struct sock *sk = sock->sk; 1617 struct packet_sock *po = pkt_sk(sk); 1618 if (po->tx_ring.pg_vec) 1619 return tpacket_snd(po, msg); 1620 else 1621 return packet_snd(sock, msg, len); 1622 } 1623 1624 /* 1625 * Close a PACKET socket. This is fairly simple. We immediately go 1626 * to 'closed' state and remove our protocol entry in the device list. 1627 */ 1628 1629 static int packet_release(struct socket *sock) 1630 { 1631 struct sock *sk = sock->sk; 1632 struct packet_sock *po; 1633 struct net *net; 1634 struct tpacket_req req; 1635 1636 if (!sk) 1637 return 0; 1638 1639 net = sock_net(sk); 1640 po = pkt_sk(sk); 1641 1642 spin_lock_bh(&net->packet.sklist_lock); 1643 sk_del_node_init_rcu(sk); 1644 sock_prot_inuse_add(net, sk->sk_prot, -1); 1645 spin_unlock_bh(&net->packet.sklist_lock); 1646 1647 spin_lock(&po->bind_lock); 1648 unregister_prot_hook(sk, false); 1649 if (po->prot_hook.dev) { 1650 dev_put(po->prot_hook.dev); 1651 po->prot_hook.dev = NULL; 1652 } 1653 spin_unlock(&po->bind_lock); 1654 1655 packet_flush_mclist(sk); 1656 1657 memset(&req, 0, sizeof(req)); 1658 1659 if (po->rx_ring.pg_vec) 1660 packet_set_ring(sk, &req, 1, 0); 1661 1662 if (po->tx_ring.pg_vec) 1663 packet_set_ring(sk, &req, 1, 1); 1664 1665 fanout_release(sk); 1666 1667 synchronize_net(); 1668 /* 1669 * Now the socket is dead. No more input will appear. 1670 */ 1671 sock_orphan(sk); 1672 sock->sk = NULL; 1673 1674 /* Purge queues */ 1675 1676 skb_queue_purge(&sk->sk_receive_queue); 1677 sk_refcnt_debug_release(sk); 1678 1679 sock_put(sk); 1680 return 0; 1681 } 1682 1683 /* 1684 * Attach a packet hook. 1685 */ 1686 1687 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol) 1688 { 1689 struct packet_sock *po = pkt_sk(sk); 1690 1691 if (po->fanout) 1692 return -EINVAL; 1693 1694 lock_sock(sk); 1695 1696 spin_lock(&po->bind_lock); 1697 unregister_prot_hook(sk, true); 1698 po->num = protocol; 1699 po->prot_hook.type = protocol; 1700 if (po->prot_hook.dev) 1701 dev_put(po->prot_hook.dev); 1702 po->prot_hook.dev = dev; 1703 1704 po->ifindex = dev ? dev->ifindex : 0; 1705 1706 if (protocol == 0) 1707 goto out_unlock; 1708 1709 if (!dev || (dev->flags & IFF_UP)) { 1710 register_prot_hook(sk); 1711 } else { 1712 sk->sk_err = ENETDOWN; 1713 if (!sock_flag(sk, SOCK_DEAD)) 1714 sk->sk_error_report(sk); 1715 } 1716 1717 out_unlock: 1718 spin_unlock(&po->bind_lock); 1719 release_sock(sk); 1720 return 0; 1721 } 1722 1723 /* 1724 * Bind a packet socket to a device 1725 */ 1726 1727 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 1728 int addr_len) 1729 { 1730 struct sock *sk = sock->sk; 1731 char name[15]; 1732 struct net_device *dev; 1733 int err = -ENODEV; 1734 1735 /* 1736 * Check legality 1737 */ 1738 1739 if (addr_len != sizeof(struct sockaddr)) 1740 return -EINVAL; 1741 strlcpy(name, uaddr->sa_data, sizeof(name)); 1742 1743 dev = dev_get_by_name(sock_net(sk), name); 1744 if (dev) 1745 err = packet_do_bind(sk, dev, pkt_sk(sk)->num); 1746 return err; 1747 } 1748 1749 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 1750 { 1751 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 1752 struct sock *sk = sock->sk; 1753 struct net_device *dev = NULL; 1754 int err; 1755 1756 1757 /* 1758 * Check legality 1759 */ 1760 1761 if (addr_len < sizeof(struct sockaddr_ll)) 1762 return -EINVAL; 1763 if (sll->sll_family != AF_PACKET) 1764 return -EINVAL; 1765 1766 if (sll->sll_ifindex) { 1767 err = -ENODEV; 1768 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); 1769 if (dev == NULL) 1770 goto out; 1771 } 1772 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); 1773 1774 out: 1775 return err; 1776 } 1777 1778 static struct proto packet_proto = { 1779 .name = "PACKET", 1780 .owner = THIS_MODULE, 1781 .obj_size = sizeof(struct packet_sock), 1782 }; 1783 1784 /* 1785 * Create a packet of type SOCK_PACKET. 1786 */ 1787 1788 static int packet_create(struct net *net, struct socket *sock, int protocol, 1789 int kern) 1790 { 1791 struct sock *sk; 1792 struct packet_sock *po; 1793 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 1794 int err; 1795 1796 if (!capable(CAP_NET_RAW)) 1797 return -EPERM; 1798 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 1799 sock->type != SOCK_PACKET) 1800 return -ESOCKTNOSUPPORT; 1801 1802 sock->state = SS_UNCONNECTED; 1803 1804 err = -ENOBUFS; 1805 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto); 1806 if (sk == NULL) 1807 goto out; 1808 1809 sock->ops = &packet_ops; 1810 if (sock->type == SOCK_PACKET) 1811 sock->ops = &packet_ops_spkt; 1812 1813 sock_init_data(sock, sk); 1814 1815 po = pkt_sk(sk); 1816 sk->sk_family = PF_PACKET; 1817 po->num = proto; 1818 1819 sk->sk_destruct = packet_sock_destruct; 1820 sk_refcnt_debug_inc(sk); 1821 1822 /* 1823 * Attach a protocol block 1824 */ 1825 1826 spin_lock_init(&po->bind_lock); 1827 mutex_init(&po->pg_vec_lock); 1828 po->prot_hook.func = packet_rcv; 1829 1830 if (sock->type == SOCK_PACKET) 1831 po->prot_hook.func = packet_rcv_spkt; 1832 1833 po->prot_hook.af_packet_priv = sk; 1834 1835 if (proto) { 1836 po->prot_hook.type = proto; 1837 register_prot_hook(sk); 1838 } 1839 1840 spin_lock_bh(&net->packet.sklist_lock); 1841 sk_add_node_rcu(sk, &net->packet.sklist); 1842 sock_prot_inuse_add(net, &packet_proto, 1); 1843 spin_unlock_bh(&net->packet.sklist_lock); 1844 1845 return 0; 1846 out: 1847 return err; 1848 } 1849 1850 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len) 1851 { 1852 struct sock_exterr_skb *serr; 1853 struct sk_buff *skb, *skb2; 1854 int copied, err; 1855 1856 err = -EAGAIN; 1857 skb = skb_dequeue(&sk->sk_error_queue); 1858 if (skb == NULL) 1859 goto out; 1860 1861 copied = skb->len; 1862 if (copied > len) { 1863 msg->msg_flags |= MSG_TRUNC; 1864 copied = len; 1865 } 1866 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1867 if (err) 1868 goto out_free_skb; 1869 1870 sock_recv_timestamp(msg, sk, skb); 1871 1872 serr = SKB_EXT_ERR(skb); 1873 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP, 1874 sizeof(serr->ee), &serr->ee); 1875 1876 msg->msg_flags |= MSG_ERRQUEUE; 1877 err = copied; 1878 1879 /* Reset and regenerate socket error */ 1880 spin_lock_bh(&sk->sk_error_queue.lock); 1881 sk->sk_err = 0; 1882 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { 1883 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; 1884 spin_unlock_bh(&sk->sk_error_queue.lock); 1885 sk->sk_error_report(sk); 1886 } else 1887 spin_unlock_bh(&sk->sk_error_queue.lock); 1888 1889 out_free_skb: 1890 kfree_skb(skb); 1891 out: 1892 return err; 1893 } 1894 1895 /* 1896 * Pull a packet from our receive queue and hand it to the user. 1897 * If necessary we block. 1898 */ 1899 1900 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, 1901 struct msghdr *msg, size_t len, int flags) 1902 { 1903 struct sock *sk = sock->sk; 1904 struct sk_buff *skb; 1905 int copied, err; 1906 struct sockaddr_ll *sll; 1907 int vnet_hdr_len = 0; 1908 1909 err = -EINVAL; 1910 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 1911 goto out; 1912 1913 #if 0 1914 /* What error should we return now? EUNATTACH? */ 1915 if (pkt_sk(sk)->ifindex < 0) 1916 return -ENODEV; 1917 #endif 1918 1919 if (flags & MSG_ERRQUEUE) { 1920 err = packet_recv_error(sk, msg, len); 1921 goto out; 1922 } 1923 1924 /* 1925 * Call the generic datagram receiver. This handles all sorts 1926 * of horrible races and re-entrancy so we can forget about it 1927 * in the protocol layers. 1928 * 1929 * Now it will return ENETDOWN, if device have just gone down, 1930 * but then it will block. 1931 */ 1932 1933 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); 1934 1935 /* 1936 * An error occurred so return it. Because skb_recv_datagram() 1937 * handles the blocking we don't see and worry about blocking 1938 * retries. 1939 */ 1940 1941 if (skb == NULL) 1942 goto out; 1943 1944 if (pkt_sk(sk)->has_vnet_hdr) { 1945 struct virtio_net_hdr vnet_hdr = { 0 }; 1946 1947 err = -EINVAL; 1948 vnet_hdr_len = sizeof(vnet_hdr); 1949 if (len < vnet_hdr_len) 1950 goto out_free; 1951 1952 len -= vnet_hdr_len; 1953 1954 if (skb_is_gso(skb)) { 1955 struct skb_shared_info *sinfo = skb_shinfo(skb); 1956 1957 /* This is a hint as to how much should be linear. */ 1958 vnet_hdr.hdr_len = skb_headlen(skb); 1959 vnet_hdr.gso_size = sinfo->gso_size; 1960 if (sinfo->gso_type & SKB_GSO_TCPV4) 1961 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1962 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1963 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1964 else if (sinfo->gso_type & SKB_GSO_UDP) 1965 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; 1966 else if (sinfo->gso_type & SKB_GSO_FCOE) 1967 goto out_free; 1968 else 1969 BUG(); 1970 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 1971 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1972 } else 1973 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; 1974 1975 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1976 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 1977 vnet_hdr.csum_start = skb_checksum_start_offset(skb); 1978 vnet_hdr.csum_offset = skb->csum_offset; 1979 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1980 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; 1981 } /* else everything is zero */ 1982 1983 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr, 1984 vnet_hdr_len); 1985 if (err < 0) 1986 goto out_free; 1987 } 1988 1989 /* 1990 * If the address length field is there to be filled in, we fill 1991 * it in now. 1992 */ 1993 1994 sll = &PACKET_SKB_CB(skb)->sa.ll; 1995 if (sock->type == SOCK_PACKET) 1996 msg->msg_namelen = sizeof(struct sockaddr_pkt); 1997 else 1998 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); 1999 2000 /* 2001 * You lose any data beyond the buffer you gave. If it worries a 2002 * user program they can ask the device for its MTU anyway. 2003 */ 2004 2005 copied = skb->len; 2006 if (copied > len) { 2007 copied = len; 2008 msg->msg_flags |= MSG_TRUNC; 2009 } 2010 2011 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 2012 if (err) 2013 goto out_free; 2014 2015 sock_recv_ts_and_drops(msg, sk, skb); 2016 2017 if (msg->msg_name) 2018 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, 2019 msg->msg_namelen); 2020 2021 if (pkt_sk(sk)->auxdata) { 2022 struct tpacket_auxdata aux; 2023 2024 aux.tp_status = TP_STATUS_USER; 2025 if (skb->ip_summed == CHECKSUM_PARTIAL) 2026 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 2027 aux.tp_len = PACKET_SKB_CB(skb)->origlen; 2028 aux.tp_snaplen = skb->len; 2029 aux.tp_mac = 0; 2030 aux.tp_net = skb_network_offset(skb); 2031 if (vlan_tx_tag_present(skb)) { 2032 aux.tp_vlan_tci = vlan_tx_tag_get(skb); 2033 aux.tp_status |= TP_STATUS_VLAN_VALID; 2034 } else { 2035 aux.tp_vlan_tci = 0; 2036 } 2037 aux.tp_padding = 0; 2038 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 2039 } 2040 2041 /* 2042 * Free or return the buffer as appropriate. Again this 2043 * hides all the races and re-entrancy issues from us. 2044 */ 2045 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 2046 2047 out_free: 2048 skb_free_datagram(sk, skb); 2049 out: 2050 return err; 2051 } 2052 2053 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 2054 int *uaddr_len, int peer) 2055 { 2056 struct net_device *dev; 2057 struct sock *sk = sock->sk; 2058 2059 if (peer) 2060 return -EOPNOTSUPP; 2061 2062 uaddr->sa_family = AF_PACKET; 2063 rcu_read_lock(); 2064 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 2065 if (dev) 2066 strncpy(uaddr->sa_data, dev->name, 14); 2067 else 2068 memset(uaddr->sa_data, 0, 14); 2069 rcu_read_unlock(); 2070 *uaddr_len = sizeof(*uaddr); 2071 2072 return 0; 2073 } 2074 2075 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 2076 int *uaddr_len, int peer) 2077 { 2078 struct net_device *dev; 2079 struct sock *sk = sock->sk; 2080 struct packet_sock *po = pkt_sk(sk); 2081 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 2082 2083 if (peer) 2084 return -EOPNOTSUPP; 2085 2086 sll->sll_family = AF_PACKET; 2087 sll->sll_ifindex = po->ifindex; 2088 sll->sll_protocol = po->num; 2089 sll->sll_pkttype = 0; 2090 rcu_read_lock(); 2091 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 2092 if (dev) { 2093 sll->sll_hatype = dev->type; 2094 sll->sll_halen = dev->addr_len; 2095 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 2096 } else { 2097 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 2098 sll->sll_halen = 0; 2099 } 2100 rcu_read_unlock(); 2101 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 2102 2103 return 0; 2104 } 2105 2106 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 2107 int what) 2108 { 2109 switch (i->type) { 2110 case PACKET_MR_MULTICAST: 2111 if (i->alen != dev->addr_len) 2112 return -EINVAL; 2113 if (what > 0) 2114 return dev_mc_add(dev, i->addr); 2115 else 2116 return dev_mc_del(dev, i->addr); 2117 break; 2118 case PACKET_MR_PROMISC: 2119 return dev_set_promiscuity(dev, what); 2120 break; 2121 case PACKET_MR_ALLMULTI: 2122 return dev_set_allmulti(dev, what); 2123 break; 2124 case PACKET_MR_UNICAST: 2125 if (i->alen != dev->addr_len) 2126 return -EINVAL; 2127 if (what > 0) 2128 return dev_uc_add(dev, i->addr); 2129 else 2130 return dev_uc_del(dev, i->addr); 2131 break; 2132 default: 2133 break; 2134 } 2135 return 0; 2136 } 2137 2138 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) 2139 { 2140 for ( ; i; i = i->next) { 2141 if (i->ifindex == dev->ifindex) 2142 packet_dev_mc(dev, i, what); 2143 } 2144 } 2145 2146 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 2147 { 2148 struct packet_sock *po = pkt_sk(sk); 2149 struct packet_mclist *ml, *i; 2150 struct net_device *dev; 2151 int err; 2152 2153 rtnl_lock(); 2154 2155 err = -ENODEV; 2156 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 2157 if (!dev) 2158 goto done; 2159 2160 err = -EINVAL; 2161 if (mreq->mr_alen > dev->addr_len) 2162 goto done; 2163 2164 err = -ENOBUFS; 2165 i = kmalloc(sizeof(*i), GFP_KERNEL); 2166 if (i == NULL) 2167 goto done; 2168 2169 err = 0; 2170 for (ml = po->mclist; ml; ml = ml->next) { 2171 if (ml->ifindex == mreq->mr_ifindex && 2172 ml->type == mreq->mr_type && 2173 ml->alen == mreq->mr_alen && 2174 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 2175 ml->count++; 2176 /* Free the new element ... */ 2177 kfree(i); 2178 goto done; 2179 } 2180 } 2181 2182 i->type = mreq->mr_type; 2183 i->ifindex = mreq->mr_ifindex; 2184 i->alen = mreq->mr_alen; 2185 memcpy(i->addr, mreq->mr_address, i->alen); 2186 i->count = 1; 2187 i->next = po->mclist; 2188 po->mclist = i; 2189 err = packet_dev_mc(dev, i, 1); 2190 if (err) { 2191 po->mclist = i->next; 2192 kfree(i); 2193 } 2194 2195 done: 2196 rtnl_unlock(); 2197 return err; 2198 } 2199 2200 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 2201 { 2202 struct packet_mclist *ml, **mlp; 2203 2204 rtnl_lock(); 2205 2206 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 2207 if (ml->ifindex == mreq->mr_ifindex && 2208 ml->type == mreq->mr_type && 2209 ml->alen == mreq->mr_alen && 2210 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 2211 if (--ml->count == 0) { 2212 struct net_device *dev; 2213 *mlp = ml->next; 2214 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 2215 if (dev) 2216 packet_dev_mc(dev, ml, -1); 2217 kfree(ml); 2218 } 2219 rtnl_unlock(); 2220 return 0; 2221 } 2222 } 2223 rtnl_unlock(); 2224 return -EADDRNOTAVAIL; 2225 } 2226 2227 static void packet_flush_mclist(struct sock *sk) 2228 { 2229 struct packet_sock *po = pkt_sk(sk); 2230 struct packet_mclist *ml; 2231 2232 if (!po->mclist) 2233 return; 2234 2235 rtnl_lock(); 2236 while ((ml = po->mclist) != NULL) { 2237 struct net_device *dev; 2238 2239 po->mclist = ml->next; 2240 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 2241 if (dev != NULL) 2242 packet_dev_mc(dev, ml, -1); 2243 kfree(ml); 2244 } 2245 rtnl_unlock(); 2246 } 2247 2248 static int 2249 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 2250 { 2251 struct sock *sk = sock->sk; 2252 struct packet_sock *po = pkt_sk(sk); 2253 int ret; 2254 2255 if (level != SOL_PACKET) 2256 return -ENOPROTOOPT; 2257 2258 switch (optname) { 2259 case PACKET_ADD_MEMBERSHIP: 2260 case PACKET_DROP_MEMBERSHIP: 2261 { 2262 struct packet_mreq_max mreq; 2263 int len = optlen; 2264 memset(&mreq, 0, sizeof(mreq)); 2265 if (len < sizeof(struct packet_mreq)) 2266 return -EINVAL; 2267 if (len > sizeof(mreq)) 2268 len = sizeof(mreq); 2269 if (copy_from_user(&mreq, optval, len)) 2270 return -EFAULT; 2271 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 2272 return -EINVAL; 2273 if (optname == PACKET_ADD_MEMBERSHIP) 2274 ret = packet_mc_add(sk, &mreq); 2275 else 2276 ret = packet_mc_drop(sk, &mreq); 2277 return ret; 2278 } 2279 2280 case PACKET_RX_RING: 2281 case PACKET_TX_RING: 2282 { 2283 struct tpacket_req req; 2284 2285 if (optlen < sizeof(req)) 2286 return -EINVAL; 2287 if (pkt_sk(sk)->has_vnet_hdr) 2288 return -EINVAL; 2289 if (copy_from_user(&req, optval, sizeof(req))) 2290 return -EFAULT; 2291 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); 2292 } 2293 case PACKET_COPY_THRESH: 2294 { 2295 int val; 2296 2297 if (optlen != sizeof(val)) 2298 return -EINVAL; 2299 if (copy_from_user(&val, optval, sizeof(val))) 2300 return -EFAULT; 2301 2302 pkt_sk(sk)->copy_thresh = val; 2303 return 0; 2304 } 2305 case PACKET_VERSION: 2306 { 2307 int val; 2308 2309 if (optlen != sizeof(val)) 2310 return -EINVAL; 2311 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 2312 return -EBUSY; 2313 if (copy_from_user(&val, optval, sizeof(val))) 2314 return -EFAULT; 2315 switch (val) { 2316 case TPACKET_V1: 2317 case TPACKET_V2: 2318 po->tp_version = val; 2319 return 0; 2320 default: 2321 return -EINVAL; 2322 } 2323 } 2324 case PACKET_RESERVE: 2325 { 2326 unsigned int val; 2327 2328 if (optlen != sizeof(val)) 2329 return -EINVAL; 2330 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 2331 return -EBUSY; 2332 if (copy_from_user(&val, optval, sizeof(val))) 2333 return -EFAULT; 2334 po->tp_reserve = val; 2335 return 0; 2336 } 2337 case PACKET_LOSS: 2338 { 2339 unsigned int val; 2340 2341 if (optlen != sizeof(val)) 2342 return -EINVAL; 2343 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 2344 return -EBUSY; 2345 if (copy_from_user(&val, optval, sizeof(val))) 2346 return -EFAULT; 2347 po->tp_loss = !!val; 2348 return 0; 2349 } 2350 case PACKET_AUXDATA: 2351 { 2352 int val; 2353 2354 if (optlen < sizeof(val)) 2355 return -EINVAL; 2356 if (copy_from_user(&val, optval, sizeof(val))) 2357 return -EFAULT; 2358 2359 po->auxdata = !!val; 2360 return 0; 2361 } 2362 case PACKET_ORIGDEV: 2363 { 2364 int val; 2365 2366 if (optlen < sizeof(val)) 2367 return -EINVAL; 2368 if (copy_from_user(&val, optval, sizeof(val))) 2369 return -EFAULT; 2370 2371 po->origdev = !!val; 2372 return 0; 2373 } 2374 case PACKET_VNET_HDR: 2375 { 2376 int val; 2377 2378 if (sock->type != SOCK_RAW) 2379 return -EINVAL; 2380 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 2381 return -EBUSY; 2382 if (optlen < sizeof(val)) 2383 return -EINVAL; 2384 if (copy_from_user(&val, optval, sizeof(val))) 2385 return -EFAULT; 2386 2387 po->has_vnet_hdr = !!val; 2388 return 0; 2389 } 2390 case PACKET_TIMESTAMP: 2391 { 2392 int val; 2393 2394 if (optlen != sizeof(val)) 2395 return -EINVAL; 2396 if (copy_from_user(&val, optval, sizeof(val))) 2397 return -EFAULT; 2398 2399 po->tp_tstamp = val; 2400 return 0; 2401 } 2402 case PACKET_FANOUT: 2403 { 2404 int val; 2405 2406 if (optlen != sizeof(val)) 2407 return -EINVAL; 2408 if (copy_from_user(&val, optval, sizeof(val))) 2409 return -EFAULT; 2410 2411 return fanout_add(sk, val & 0xffff, val >> 16); 2412 } 2413 default: 2414 return -ENOPROTOOPT; 2415 } 2416 } 2417 2418 static int packet_getsockopt(struct socket *sock, int level, int optname, 2419 char __user *optval, int __user *optlen) 2420 { 2421 int len; 2422 int val; 2423 struct sock *sk = sock->sk; 2424 struct packet_sock *po = pkt_sk(sk); 2425 void *data; 2426 struct tpacket_stats st; 2427 2428 if (level != SOL_PACKET) 2429 return -ENOPROTOOPT; 2430 2431 if (get_user(len, optlen)) 2432 return -EFAULT; 2433 2434 if (len < 0) 2435 return -EINVAL; 2436 2437 switch (optname) { 2438 case PACKET_STATISTICS: 2439 if (len > sizeof(struct tpacket_stats)) 2440 len = sizeof(struct tpacket_stats); 2441 spin_lock_bh(&sk->sk_receive_queue.lock); 2442 st = po->stats; 2443 memset(&po->stats, 0, sizeof(st)); 2444 spin_unlock_bh(&sk->sk_receive_queue.lock); 2445 st.tp_packets += st.tp_drops; 2446 2447 data = &st; 2448 break; 2449 case PACKET_AUXDATA: 2450 if (len > sizeof(int)) 2451 len = sizeof(int); 2452 val = po->auxdata; 2453 2454 data = &val; 2455 break; 2456 case PACKET_ORIGDEV: 2457 if (len > sizeof(int)) 2458 len = sizeof(int); 2459 val = po->origdev; 2460 2461 data = &val; 2462 break; 2463 case PACKET_VNET_HDR: 2464 if (len > sizeof(int)) 2465 len = sizeof(int); 2466 val = po->has_vnet_hdr; 2467 2468 data = &val; 2469 break; 2470 case PACKET_VERSION: 2471 if (len > sizeof(int)) 2472 len = sizeof(int); 2473 val = po->tp_version; 2474 data = &val; 2475 break; 2476 case PACKET_HDRLEN: 2477 if (len > sizeof(int)) 2478 len = sizeof(int); 2479 if (copy_from_user(&val, optval, len)) 2480 return -EFAULT; 2481 switch (val) { 2482 case TPACKET_V1: 2483 val = sizeof(struct tpacket_hdr); 2484 break; 2485 case TPACKET_V2: 2486 val = sizeof(struct tpacket2_hdr); 2487 break; 2488 default: 2489 return -EINVAL; 2490 } 2491 data = &val; 2492 break; 2493 case PACKET_RESERVE: 2494 if (len > sizeof(unsigned int)) 2495 len = sizeof(unsigned int); 2496 val = po->tp_reserve; 2497 data = &val; 2498 break; 2499 case PACKET_LOSS: 2500 if (len > sizeof(unsigned int)) 2501 len = sizeof(unsigned int); 2502 val = po->tp_loss; 2503 data = &val; 2504 break; 2505 case PACKET_TIMESTAMP: 2506 if (len > sizeof(int)) 2507 len = sizeof(int); 2508 val = po->tp_tstamp; 2509 data = &val; 2510 break; 2511 case PACKET_FANOUT: 2512 if (len > sizeof(int)) 2513 len = sizeof(int); 2514 val = (po->fanout ? 2515 ((u32)po->fanout->id | 2516 ((u32)po->fanout->type << 16)) : 2517 0); 2518 data = &val; 2519 break; 2520 default: 2521 return -ENOPROTOOPT; 2522 } 2523 2524 if (put_user(len, optlen)) 2525 return -EFAULT; 2526 if (copy_to_user(optval, data, len)) 2527 return -EFAULT; 2528 return 0; 2529 } 2530 2531 2532 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) 2533 { 2534 struct sock *sk; 2535 struct hlist_node *node; 2536 struct net_device *dev = data; 2537 struct net *net = dev_net(dev); 2538 2539 rcu_read_lock(); 2540 sk_for_each_rcu(sk, node, &net->packet.sklist) { 2541 struct packet_sock *po = pkt_sk(sk); 2542 2543 switch (msg) { 2544 case NETDEV_UNREGISTER: 2545 if (po->mclist) 2546 packet_dev_mclist(dev, po->mclist, -1); 2547 /* fallthrough */ 2548 2549 case NETDEV_DOWN: 2550 if (dev->ifindex == po->ifindex) { 2551 spin_lock(&po->bind_lock); 2552 if (po->running) { 2553 __unregister_prot_hook(sk, false); 2554 sk->sk_err = ENETDOWN; 2555 if (!sock_flag(sk, SOCK_DEAD)) 2556 sk->sk_error_report(sk); 2557 } 2558 if (msg == NETDEV_UNREGISTER) { 2559 po->ifindex = -1; 2560 if (po->prot_hook.dev) 2561 dev_put(po->prot_hook.dev); 2562 po->prot_hook.dev = NULL; 2563 } 2564 spin_unlock(&po->bind_lock); 2565 } 2566 break; 2567 case NETDEV_UP: 2568 if (dev->ifindex == po->ifindex) { 2569 spin_lock(&po->bind_lock); 2570 if (po->num) 2571 register_prot_hook(sk); 2572 spin_unlock(&po->bind_lock); 2573 } 2574 break; 2575 } 2576 } 2577 rcu_read_unlock(); 2578 return NOTIFY_DONE; 2579 } 2580 2581 2582 static int packet_ioctl(struct socket *sock, unsigned int cmd, 2583 unsigned long arg) 2584 { 2585 struct sock *sk = sock->sk; 2586 2587 switch (cmd) { 2588 case SIOCOUTQ: 2589 { 2590 int amount = sk_wmem_alloc_get(sk); 2591 2592 return put_user(amount, (int __user *)arg); 2593 } 2594 case SIOCINQ: 2595 { 2596 struct sk_buff *skb; 2597 int amount = 0; 2598 2599 spin_lock_bh(&sk->sk_receive_queue.lock); 2600 skb = skb_peek(&sk->sk_receive_queue); 2601 if (skb) 2602 amount = skb->len; 2603 spin_unlock_bh(&sk->sk_receive_queue.lock); 2604 return put_user(amount, (int __user *)arg); 2605 } 2606 case SIOCGSTAMP: 2607 return sock_get_timestamp(sk, (struct timeval __user *)arg); 2608 case SIOCGSTAMPNS: 2609 return sock_get_timestampns(sk, (struct timespec __user *)arg); 2610 2611 #ifdef CONFIG_INET 2612 case SIOCADDRT: 2613 case SIOCDELRT: 2614 case SIOCDARP: 2615 case SIOCGARP: 2616 case SIOCSARP: 2617 case SIOCGIFADDR: 2618 case SIOCSIFADDR: 2619 case SIOCGIFBRDADDR: 2620 case SIOCSIFBRDADDR: 2621 case SIOCGIFNETMASK: 2622 case SIOCSIFNETMASK: 2623 case SIOCGIFDSTADDR: 2624 case SIOCSIFDSTADDR: 2625 case SIOCSIFFLAGS: 2626 return inet_dgram_ops.ioctl(sock, cmd, arg); 2627 #endif 2628 2629 default: 2630 return -ENOIOCTLCMD; 2631 } 2632 return 0; 2633 } 2634 2635 static unsigned int packet_poll(struct file *file, struct socket *sock, 2636 poll_table *wait) 2637 { 2638 struct sock *sk = sock->sk; 2639 struct packet_sock *po = pkt_sk(sk); 2640 unsigned int mask = datagram_poll(file, sock, wait); 2641 2642 spin_lock_bh(&sk->sk_receive_queue.lock); 2643 if (po->rx_ring.pg_vec) { 2644 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) 2645 mask |= POLLIN | POLLRDNORM; 2646 } 2647 spin_unlock_bh(&sk->sk_receive_queue.lock); 2648 spin_lock_bh(&sk->sk_write_queue.lock); 2649 if (po->tx_ring.pg_vec) { 2650 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 2651 mask |= POLLOUT | POLLWRNORM; 2652 } 2653 spin_unlock_bh(&sk->sk_write_queue.lock); 2654 return mask; 2655 } 2656 2657 2658 /* Dirty? Well, I still did not learn better way to account 2659 * for user mmaps. 2660 */ 2661 2662 static void packet_mm_open(struct vm_area_struct *vma) 2663 { 2664 struct file *file = vma->vm_file; 2665 struct socket *sock = file->private_data; 2666 struct sock *sk = sock->sk; 2667 2668 if (sk) 2669 atomic_inc(&pkt_sk(sk)->mapped); 2670 } 2671 2672 static void packet_mm_close(struct vm_area_struct *vma) 2673 { 2674 struct file *file = vma->vm_file; 2675 struct socket *sock = file->private_data; 2676 struct sock *sk = sock->sk; 2677 2678 if (sk) 2679 atomic_dec(&pkt_sk(sk)->mapped); 2680 } 2681 2682 static const struct vm_operations_struct packet_mmap_ops = { 2683 .open = packet_mm_open, 2684 .close = packet_mm_close, 2685 }; 2686 2687 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 2688 unsigned int len) 2689 { 2690 int i; 2691 2692 for (i = 0; i < len; i++) { 2693 if (likely(pg_vec[i].buffer)) { 2694 if (is_vmalloc_addr(pg_vec[i].buffer)) 2695 vfree(pg_vec[i].buffer); 2696 else 2697 free_pages((unsigned long)pg_vec[i].buffer, 2698 order); 2699 pg_vec[i].buffer = NULL; 2700 } 2701 } 2702 kfree(pg_vec); 2703 } 2704 2705 static inline char *alloc_one_pg_vec_page(unsigned long order) 2706 { 2707 char *buffer = NULL; 2708 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 2709 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 2710 2711 buffer = (char *) __get_free_pages(gfp_flags, order); 2712 2713 if (buffer) 2714 return buffer; 2715 2716 /* 2717 * __get_free_pages failed, fall back to vmalloc 2718 */ 2719 buffer = vzalloc((1 << order) * PAGE_SIZE); 2720 2721 if (buffer) 2722 return buffer; 2723 2724 /* 2725 * vmalloc failed, lets dig into swap here 2726 */ 2727 gfp_flags &= ~__GFP_NORETRY; 2728 buffer = (char *)__get_free_pages(gfp_flags, order); 2729 if (buffer) 2730 return buffer; 2731 2732 /* 2733 * complete and utter failure 2734 */ 2735 return NULL; 2736 } 2737 2738 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 2739 { 2740 unsigned int block_nr = req->tp_block_nr; 2741 struct pgv *pg_vec; 2742 int i; 2743 2744 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); 2745 if (unlikely(!pg_vec)) 2746 goto out; 2747 2748 for (i = 0; i < block_nr; i++) { 2749 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 2750 if (unlikely(!pg_vec[i].buffer)) 2751 goto out_free_pgvec; 2752 } 2753 2754 out: 2755 return pg_vec; 2756 2757 out_free_pgvec: 2758 free_pg_vec(pg_vec, order, block_nr); 2759 pg_vec = NULL; 2760 goto out; 2761 } 2762 2763 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 2764 int closing, int tx_ring) 2765 { 2766 struct pgv *pg_vec = NULL; 2767 struct packet_sock *po = pkt_sk(sk); 2768 int was_running, order = 0; 2769 struct packet_ring_buffer *rb; 2770 struct sk_buff_head *rb_queue; 2771 __be16 num; 2772 int err; 2773 2774 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 2775 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 2776 2777 err = -EBUSY; 2778 if (!closing) { 2779 if (atomic_read(&po->mapped)) 2780 goto out; 2781 if (atomic_read(&rb->pending)) 2782 goto out; 2783 } 2784 2785 if (req->tp_block_nr) { 2786 /* Sanity tests and some calculations */ 2787 err = -EBUSY; 2788 if (unlikely(rb->pg_vec)) 2789 goto out; 2790 2791 switch (po->tp_version) { 2792 case TPACKET_V1: 2793 po->tp_hdrlen = TPACKET_HDRLEN; 2794 break; 2795 case TPACKET_V2: 2796 po->tp_hdrlen = TPACKET2_HDRLEN; 2797 break; 2798 } 2799 2800 err = -EINVAL; 2801 if (unlikely((int)req->tp_block_size <= 0)) 2802 goto out; 2803 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 2804 goto out; 2805 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 2806 po->tp_reserve)) 2807 goto out; 2808 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 2809 goto out; 2810 2811 rb->frames_per_block = req->tp_block_size/req->tp_frame_size; 2812 if (unlikely(rb->frames_per_block <= 0)) 2813 goto out; 2814 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 2815 req->tp_frame_nr)) 2816 goto out; 2817 2818 err = -ENOMEM; 2819 order = get_order(req->tp_block_size); 2820 pg_vec = alloc_pg_vec(req, order); 2821 if (unlikely(!pg_vec)) 2822 goto out; 2823 } 2824 /* Done */ 2825 else { 2826 err = -EINVAL; 2827 if (unlikely(req->tp_frame_nr)) 2828 goto out; 2829 } 2830 2831 lock_sock(sk); 2832 2833 /* Detach socket from network */ 2834 spin_lock(&po->bind_lock); 2835 was_running = po->running; 2836 num = po->num; 2837 if (was_running) { 2838 po->num = 0; 2839 __unregister_prot_hook(sk, false); 2840 } 2841 spin_unlock(&po->bind_lock); 2842 2843 synchronize_net(); 2844 2845 err = -EBUSY; 2846 mutex_lock(&po->pg_vec_lock); 2847 if (closing || atomic_read(&po->mapped) == 0) { 2848 err = 0; 2849 spin_lock_bh(&rb_queue->lock); 2850 swap(rb->pg_vec, pg_vec); 2851 rb->frame_max = (req->tp_frame_nr - 1); 2852 rb->head = 0; 2853 rb->frame_size = req->tp_frame_size; 2854 spin_unlock_bh(&rb_queue->lock); 2855 2856 swap(rb->pg_vec_order, order); 2857 swap(rb->pg_vec_len, req->tp_block_nr); 2858 2859 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 2860 po->prot_hook.func = (po->rx_ring.pg_vec) ? 2861 tpacket_rcv : packet_rcv; 2862 skb_queue_purge(rb_queue); 2863 if (atomic_read(&po->mapped)) 2864 pr_err("packet_mmap: vma is busy: %d\n", 2865 atomic_read(&po->mapped)); 2866 } 2867 mutex_unlock(&po->pg_vec_lock); 2868 2869 spin_lock(&po->bind_lock); 2870 if (was_running) { 2871 po->num = num; 2872 register_prot_hook(sk); 2873 } 2874 spin_unlock(&po->bind_lock); 2875 2876 release_sock(sk); 2877 2878 if (pg_vec) 2879 free_pg_vec(pg_vec, order, req->tp_block_nr); 2880 out: 2881 return err; 2882 } 2883 2884 static int packet_mmap(struct file *file, struct socket *sock, 2885 struct vm_area_struct *vma) 2886 { 2887 struct sock *sk = sock->sk; 2888 struct packet_sock *po = pkt_sk(sk); 2889 unsigned long size, expected_size; 2890 struct packet_ring_buffer *rb; 2891 unsigned long start; 2892 int err = -EINVAL; 2893 int i; 2894 2895 if (vma->vm_pgoff) 2896 return -EINVAL; 2897 2898 mutex_lock(&po->pg_vec_lock); 2899 2900 expected_size = 0; 2901 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 2902 if (rb->pg_vec) { 2903 expected_size += rb->pg_vec_len 2904 * rb->pg_vec_pages 2905 * PAGE_SIZE; 2906 } 2907 } 2908 2909 if (expected_size == 0) 2910 goto out; 2911 2912 size = vma->vm_end - vma->vm_start; 2913 if (size != expected_size) 2914 goto out; 2915 2916 start = vma->vm_start; 2917 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 2918 if (rb->pg_vec == NULL) 2919 continue; 2920 2921 for (i = 0; i < rb->pg_vec_len; i++) { 2922 struct page *page; 2923 void *kaddr = rb->pg_vec[i].buffer; 2924 int pg_num; 2925 2926 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 2927 page = pgv_to_page(kaddr); 2928 err = vm_insert_page(vma, start, page); 2929 if (unlikely(err)) 2930 goto out; 2931 start += PAGE_SIZE; 2932 kaddr += PAGE_SIZE; 2933 } 2934 } 2935 } 2936 2937 atomic_inc(&po->mapped); 2938 vma->vm_ops = &packet_mmap_ops; 2939 err = 0; 2940 2941 out: 2942 mutex_unlock(&po->pg_vec_lock); 2943 return err; 2944 } 2945 2946 static const struct proto_ops packet_ops_spkt = { 2947 .family = PF_PACKET, 2948 .owner = THIS_MODULE, 2949 .release = packet_release, 2950 .bind = packet_bind_spkt, 2951 .connect = sock_no_connect, 2952 .socketpair = sock_no_socketpair, 2953 .accept = sock_no_accept, 2954 .getname = packet_getname_spkt, 2955 .poll = datagram_poll, 2956 .ioctl = packet_ioctl, 2957 .listen = sock_no_listen, 2958 .shutdown = sock_no_shutdown, 2959 .setsockopt = sock_no_setsockopt, 2960 .getsockopt = sock_no_getsockopt, 2961 .sendmsg = packet_sendmsg_spkt, 2962 .recvmsg = packet_recvmsg, 2963 .mmap = sock_no_mmap, 2964 .sendpage = sock_no_sendpage, 2965 }; 2966 2967 static const struct proto_ops packet_ops = { 2968 .family = PF_PACKET, 2969 .owner = THIS_MODULE, 2970 .release = packet_release, 2971 .bind = packet_bind, 2972 .connect = sock_no_connect, 2973 .socketpair = sock_no_socketpair, 2974 .accept = sock_no_accept, 2975 .getname = packet_getname, 2976 .poll = packet_poll, 2977 .ioctl = packet_ioctl, 2978 .listen = sock_no_listen, 2979 .shutdown = sock_no_shutdown, 2980 .setsockopt = packet_setsockopt, 2981 .getsockopt = packet_getsockopt, 2982 .sendmsg = packet_sendmsg, 2983 .recvmsg = packet_recvmsg, 2984 .mmap = packet_mmap, 2985 .sendpage = sock_no_sendpage, 2986 }; 2987 2988 static const struct net_proto_family packet_family_ops = { 2989 .family = PF_PACKET, 2990 .create = packet_create, 2991 .owner = THIS_MODULE, 2992 }; 2993 2994 static struct notifier_block packet_netdev_notifier = { 2995 .notifier_call = packet_notifier, 2996 }; 2997 2998 #ifdef CONFIG_PROC_FS 2999 3000 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 3001 __acquires(RCU) 3002 { 3003 struct net *net = seq_file_net(seq); 3004 3005 rcu_read_lock(); 3006 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 3007 } 3008 3009 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3010 { 3011 struct net *net = seq_file_net(seq); 3012 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 3013 } 3014 3015 static void packet_seq_stop(struct seq_file *seq, void *v) 3016 __releases(RCU) 3017 { 3018 rcu_read_unlock(); 3019 } 3020 3021 static int packet_seq_show(struct seq_file *seq, void *v) 3022 { 3023 if (v == SEQ_START_TOKEN) 3024 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 3025 else { 3026 struct sock *s = sk_entry(v); 3027 const struct packet_sock *po = pkt_sk(s); 3028 3029 seq_printf(seq, 3030 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 3031 s, 3032 atomic_read(&s->sk_refcnt), 3033 s->sk_type, 3034 ntohs(po->num), 3035 po->ifindex, 3036 po->running, 3037 atomic_read(&s->sk_rmem_alloc), 3038 sock_i_uid(s), 3039 sock_i_ino(s)); 3040 } 3041 3042 return 0; 3043 } 3044 3045 static const struct seq_operations packet_seq_ops = { 3046 .start = packet_seq_start, 3047 .next = packet_seq_next, 3048 .stop = packet_seq_stop, 3049 .show = packet_seq_show, 3050 }; 3051 3052 static int packet_seq_open(struct inode *inode, struct file *file) 3053 { 3054 return seq_open_net(inode, file, &packet_seq_ops, 3055 sizeof(struct seq_net_private)); 3056 } 3057 3058 static const struct file_operations packet_seq_fops = { 3059 .owner = THIS_MODULE, 3060 .open = packet_seq_open, 3061 .read = seq_read, 3062 .llseek = seq_lseek, 3063 .release = seq_release_net, 3064 }; 3065 3066 #endif 3067 3068 static int __net_init packet_net_init(struct net *net) 3069 { 3070 spin_lock_init(&net->packet.sklist_lock); 3071 INIT_HLIST_HEAD(&net->packet.sklist); 3072 3073 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) 3074 return -ENOMEM; 3075 3076 return 0; 3077 } 3078 3079 static void __net_exit packet_net_exit(struct net *net) 3080 { 3081 proc_net_remove(net, "packet"); 3082 } 3083 3084 static struct pernet_operations packet_net_ops = { 3085 .init = packet_net_init, 3086 .exit = packet_net_exit, 3087 }; 3088 3089 3090 static void __exit packet_exit(void) 3091 { 3092 unregister_netdevice_notifier(&packet_netdev_notifier); 3093 unregister_pernet_subsys(&packet_net_ops); 3094 sock_unregister(PF_PACKET); 3095 proto_unregister(&packet_proto); 3096 } 3097 3098 static int __init packet_init(void) 3099 { 3100 int rc = proto_register(&packet_proto, 0); 3101 3102 if (rc != 0) 3103 goto out; 3104 3105 sock_register(&packet_family_ops); 3106 register_pernet_subsys(&packet_net_ops); 3107 register_netdevice_notifier(&packet_netdev_notifier); 3108 out: 3109 return rc; 3110 } 3111 3112 module_init(packet_init); 3113 module_exit(packet_exit); 3114 MODULE_LICENSE("GPL"); 3115 MODULE_ALIAS_NETPROTO(PF_PACKET); 3116