1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * PACKET - implements raw packet sockets. 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org> 11 * 12 * Fixes: 13 * Alan Cox : verify_area() now used correctly 14 * Alan Cox : new skbuff lists, look ma no backlogs! 15 * Alan Cox : tidied skbuff lists. 16 * Alan Cox : Now uses generic datagram routines I 17 * added. Also fixed the peek/read crash 18 * from all old Linux datagram code. 19 * Alan Cox : Uses the improved datagram code. 20 * Alan Cox : Added NULL's for socket options. 21 * Alan Cox : Re-commented the code. 22 * Alan Cox : Use new kernel side addressing 23 * Rob Janssen : Correct MTU usage. 24 * Dave Platt : Counter leaks caused by incorrect 25 * interrupt locking and some slightly 26 * dubious gcc output. Can you read 27 * compiler: it said _VOLATILE_ 28 * Richard Kooijman : Timestamp fixes. 29 * Alan Cox : New buffers. Use sk->mac.raw. 30 * Alan Cox : sendmsg/recvmsg support. 31 * Alan Cox : Protocol setting support 32 * Alexey Kuznetsov : Untied from IPv4 stack. 33 * Cyrus Durgin : Fixed kerneld for kmod. 34 * Michal Ostrowski : Module initialization cleanup. 35 * Ulises Alonso : Frame number limit removal and 36 * packet_set_ring memory leak. 37 * Eric Biederman : Allow for > 8 byte hardware addresses. 38 * The convention is that longer addresses 39 * will simply extend the hardware address 40 * byte arrays at the end of sockaddr_ll 41 * and packet_mreq. 42 * Johann Baudy : Added TX RING. 43 * Chetan Loke : Implemented TPACKET_V3 block abstraction 44 * layer. 45 * Copyright (C) 2011, <lokec@ccs.neu.edu> 46 * 47 * 48 * This program is free software; you can redistribute it and/or 49 * modify it under the terms of the GNU General Public License 50 * as published by the Free Software Foundation; either version 51 * 2 of the License, or (at your option) any later version. 52 * 53 */ 54 55 #include <linux/types.h> 56 #include <linux/mm.h> 57 #include <linux/capability.h> 58 #include <linux/fcntl.h> 59 #include <linux/socket.h> 60 #include <linux/in.h> 61 #include <linux/inet.h> 62 #include <linux/netdevice.h> 63 #include <linux/if_packet.h> 64 #include <linux/wireless.h> 65 #include <linux/kernel.h> 66 #include <linux/kmod.h> 67 #include <linux/slab.h> 68 #include <linux/vmalloc.h> 69 #include <net/net_namespace.h> 70 #include <net/ip.h> 71 #include <net/protocol.h> 72 #include <linux/skbuff.h> 73 #include <net/sock.h> 74 #include <linux/errno.h> 75 #include <linux/timer.h> 76 #include <linux/uaccess.h> 77 #include <asm/ioctls.h> 78 #include <asm/page.h> 79 #include <asm/cacheflush.h> 80 #include <asm/io.h> 81 #include <linux/proc_fs.h> 82 #include <linux/seq_file.h> 83 #include <linux/poll.h> 84 #include <linux/module.h> 85 #include <linux/init.h> 86 #include <linux/mutex.h> 87 #include <linux/if_vlan.h> 88 #include <linux/virtio_net.h> 89 #include <linux/errqueue.h> 90 #include <linux/net_tstamp.h> 91 #include <linux/percpu.h> 92 #ifdef CONFIG_INET 93 #include <net/inet_common.h> 94 #endif 95 #include <linux/bpf.h> 96 #include <net/compat.h> 97 98 #include "internal.h" 99 100 /* 101 Assumptions: 102 - if device has no dev->hard_header routine, it adds and removes ll header 103 inside itself. In this case ll header is invisible outside of device, 104 but higher levels still should reserve dev->hard_header_len. 105 Some devices are enough clever to reallocate skb, when header 106 will not fit to reserved space (tunnel), another ones are silly 107 (PPP). 108 - packet socket receives packets with pulled ll header, 109 so that SOCK_RAW should push it back. 110 111 On receive: 112 ----------- 113 114 Incoming, dev->hard_header!=NULL 115 mac_header -> ll header 116 data -> data 117 118 Outgoing, dev->hard_header!=NULL 119 mac_header -> ll header 120 data -> ll header 121 122 Incoming, dev->hard_header==NULL 123 mac_header -> UNKNOWN position. It is very likely, that it points to ll 124 header. PPP makes it, that is wrong, because introduce 125 assymetry between rx and tx paths. 126 data -> data 127 128 Outgoing, dev->hard_header==NULL 129 mac_header -> data. ll header is still not built! 130 data -> data 131 132 Resume 133 If dev->hard_header==NULL we are unlikely to restore sensible ll header. 134 135 136 On transmit: 137 ------------ 138 139 dev->hard_header != NULL 140 mac_header -> ll header 141 data -> ll header 142 143 dev->hard_header == NULL (ll header is added by device, we cannot control it) 144 mac_header -> data 145 data -> data 146 147 We should set nh.raw on output to correct posistion, 148 packet classifier depends on it. 149 */ 150 151 /* Private packet socket structures. */ 152 153 /* identical to struct packet_mreq except it has 154 * a longer address field. 155 */ 156 struct packet_mreq_max { 157 int mr_ifindex; 158 unsigned short mr_type; 159 unsigned short mr_alen; 160 unsigned char mr_address[MAX_ADDR_LEN]; 161 }; 162 163 union tpacket_uhdr { 164 struct tpacket_hdr *h1; 165 struct tpacket2_hdr *h2; 166 struct tpacket3_hdr *h3; 167 void *raw; 168 }; 169 170 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 171 int closing, int tx_ring); 172 173 #define V3_ALIGNMENT (8) 174 175 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 176 177 #define BLK_PLUS_PRIV(sz_of_priv) \ 178 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 179 180 #define PGV_FROM_VMALLOC 1 181 182 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 183 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 184 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 185 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 186 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 187 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 188 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) 189 190 struct packet_sock; 191 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 192 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 193 struct packet_type *pt, struct net_device *orig_dev); 194 195 static void *packet_previous_frame(struct packet_sock *po, 196 struct packet_ring_buffer *rb, 197 int status); 198 static void packet_increment_head(struct packet_ring_buffer *buff); 199 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *, 200 struct tpacket_block_desc *); 201 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 202 struct packet_sock *); 203 static void prb_retire_current_block(struct tpacket_kbdq_core *, 204 struct packet_sock *, unsigned int status); 205 static int prb_queue_frozen(struct tpacket_kbdq_core *); 206 static void prb_open_block(struct tpacket_kbdq_core *, 207 struct tpacket_block_desc *); 208 static void prb_retire_rx_blk_timer_expired(unsigned long); 209 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); 210 static void prb_init_blk_timer(struct packet_sock *, 211 struct tpacket_kbdq_core *, 212 void (*func) (unsigned long)); 213 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 214 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 215 struct tpacket3_hdr *); 216 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 217 struct tpacket3_hdr *); 218 static void packet_flush_mclist(struct sock *sk); 219 220 struct packet_skb_cb { 221 union { 222 struct sockaddr_pkt pkt; 223 union { 224 /* Trick: alias skb original length with 225 * ll.sll_family and ll.protocol in order 226 * to save room. 227 */ 228 unsigned int origlen; 229 struct sockaddr_ll ll; 230 }; 231 } sa; 232 }; 233 234 #define vio_le() virtio_legacy_is_little_endian() 235 236 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 237 238 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 239 #define GET_PBLOCK_DESC(x, bid) \ 240 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 241 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 242 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 243 #define GET_NEXT_PRB_BLK_NUM(x) \ 244 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 245 ((x)->kactive_blk_num+1) : 0) 246 247 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 248 static void __fanout_link(struct sock *sk, struct packet_sock *po); 249 250 static int packet_direct_xmit(struct sk_buff *skb) 251 { 252 struct net_device *dev = skb->dev; 253 struct sk_buff *orig_skb = skb; 254 struct netdev_queue *txq; 255 int ret = NETDEV_TX_BUSY; 256 257 if (unlikely(!netif_running(dev) || 258 !netif_carrier_ok(dev))) 259 goto drop; 260 261 skb = validate_xmit_skb_list(skb, dev); 262 if (skb != orig_skb) 263 goto drop; 264 265 txq = skb_get_tx_queue(dev, skb); 266 267 local_bh_disable(); 268 269 HARD_TX_LOCK(dev, txq, smp_processor_id()); 270 if (!netif_xmit_frozen_or_drv_stopped(txq)) 271 ret = netdev_start_xmit(skb, dev, txq, false); 272 HARD_TX_UNLOCK(dev, txq); 273 274 local_bh_enable(); 275 276 if (!dev_xmit_complete(ret)) 277 kfree_skb(skb); 278 279 return ret; 280 drop: 281 atomic_long_inc(&dev->tx_dropped); 282 kfree_skb_list(skb); 283 return NET_XMIT_DROP; 284 } 285 286 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 287 { 288 struct net_device *dev; 289 290 rcu_read_lock(); 291 dev = rcu_dereference(po->cached_dev); 292 if (likely(dev)) 293 dev_hold(dev); 294 rcu_read_unlock(); 295 296 return dev; 297 } 298 299 static void packet_cached_dev_assign(struct packet_sock *po, 300 struct net_device *dev) 301 { 302 rcu_assign_pointer(po->cached_dev, dev); 303 } 304 305 static void packet_cached_dev_reset(struct packet_sock *po) 306 { 307 RCU_INIT_POINTER(po->cached_dev, NULL); 308 } 309 310 static bool packet_use_direct_xmit(const struct packet_sock *po) 311 { 312 return po->xmit == packet_direct_xmit; 313 } 314 315 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 316 { 317 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; 318 } 319 320 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 321 { 322 const struct net_device_ops *ops = dev->netdev_ops; 323 u16 queue_index; 324 325 if (ops->ndo_select_queue) { 326 queue_index = ops->ndo_select_queue(dev, skb, NULL, 327 __packet_pick_tx_queue); 328 queue_index = netdev_cap_txqueue(dev, queue_index); 329 } else { 330 queue_index = __packet_pick_tx_queue(dev, skb); 331 } 332 333 skb_set_queue_mapping(skb, queue_index); 334 } 335 336 /* register_prot_hook must be invoked with the po->bind_lock held, 337 * or from a context in which asynchronous accesses to the packet 338 * socket is not possible (packet_create()). 339 */ 340 static void register_prot_hook(struct sock *sk) 341 { 342 struct packet_sock *po = pkt_sk(sk); 343 344 if (!po->running) { 345 if (po->fanout) 346 __fanout_link(sk, po); 347 else 348 dev_add_pack(&po->prot_hook); 349 350 sock_hold(sk); 351 po->running = 1; 352 } 353 } 354 355 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock 356 * held. If the sync parameter is true, we will temporarily drop 357 * the po->bind_lock and do a synchronize_net to make sure no 358 * asynchronous packet processing paths still refer to the elements 359 * of po->prot_hook. If the sync parameter is false, it is the 360 * callers responsibility to take care of this. 361 */ 362 static void __unregister_prot_hook(struct sock *sk, bool sync) 363 { 364 struct packet_sock *po = pkt_sk(sk); 365 366 po->running = 0; 367 368 if (po->fanout) 369 __fanout_unlink(sk, po); 370 else 371 __dev_remove_pack(&po->prot_hook); 372 373 __sock_put(sk); 374 375 if (sync) { 376 spin_unlock(&po->bind_lock); 377 synchronize_net(); 378 spin_lock(&po->bind_lock); 379 } 380 } 381 382 static void unregister_prot_hook(struct sock *sk, bool sync) 383 { 384 struct packet_sock *po = pkt_sk(sk); 385 386 if (po->running) 387 __unregister_prot_hook(sk, sync); 388 } 389 390 static inline struct page * __pure pgv_to_page(void *addr) 391 { 392 if (is_vmalloc_addr(addr)) 393 return vmalloc_to_page(addr); 394 return virt_to_page(addr); 395 } 396 397 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 398 { 399 union tpacket_uhdr h; 400 401 h.raw = frame; 402 switch (po->tp_version) { 403 case TPACKET_V1: 404 h.h1->tp_status = status; 405 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 406 break; 407 case TPACKET_V2: 408 h.h2->tp_status = status; 409 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 410 break; 411 case TPACKET_V3: 412 default: 413 WARN(1, "TPACKET version not supported.\n"); 414 BUG(); 415 } 416 417 smp_wmb(); 418 } 419 420 static int __packet_get_status(struct packet_sock *po, void *frame) 421 { 422 union tpacket_uhdr h; 423 424 smp_rmb(); 425 426 h.raw = frame; 427 switch (po->tp_version) { 428 case TPACKET_V1: 429 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 430 return h.h1->tp_status; 431 case TPACKET_V2: 432 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 433 return h.h2->tp_status; 434 case TPACKET_V3: 435 default: 436 WARN(1, "TPACKET version not supported.\n"); 437 BUG(); 438 return 0; 439 } 440 } 441 442 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, 443 unsigned int flags) 444 { 445 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 446 447 if (shhwtstamps && 448 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 449 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) 450 return TP_STATUS_TS_RAW_HARDWARE; 451 452 if (ktime_to_timespec_cond(skb->tstamp, ts)) 453 return TP_STATUS_TS_SOFTWARE; 454 455 return 0; 456 } 457 458 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 459 struct sk_buff *skb) 460 { 461 union tpacket_uhdr h; 462 struct timespec ts; 463 __u32 ts_status; 464 465 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 466 return 0; 467 468 h.raw = frame; 469 switch (po->tp_version) { 470 case TPACKET_V1: 471 h.h1->tp_sec = ts.tv_sec; 472 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 473 break; 474 case TPACKET_V2: 475 h.h2->tp_sec = ts.tv_sec; 476 h.h2->tp_nsec = ts.tv_nsec; 477 break; 478 case TPACKET_V3: 479 default: 480 WARN(1, "TPACKET version not supported.\n"); 481 BUG(); 482 } 483 484 /* one flush is safe, as both fields always lie on the same cacheline */ 485 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 486 smp_wmb(); 487 488 return ts_status; 489 } 490 491 static void *packet_lookup_frame(struct packet_sock *po, 492 struct packet_ring_buffer *rb, 493 unsigned int position, 494 int status) 495 { 496 unsigned int pg_vec_pos, frame_offset; 497 union tpacket_uhdr h; 498 499 pg_vec_pos = position / rb->frames_per_block; 500 frame_offset = position % rb->frames_per_block; 501 502 h.raw = rb->pg_vec[pg_vec_pos].buffer + 503 (frame_offset * rb->frame_size); 504 505 if (status != __packet_get_status(po, h.raw)) 506 return NULL; 507 508 return h.raw; 509 } 510 511 static void *packet_current_frame(struct packet_sock *po, 512 struct packet_ring_buffer *rb, 513 int status) 514 { 515 return packet_lookup_frame(po, rb, rb->head, status); 516 } 517 518 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 519 { 520 del_timer_sync(&pkc->retire_blk_timer); 521 } 522 523 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 524 struct sk_buff_head *rb_queue) 525 { 526 struct tpacket_kbdq_core *pkc; 527 528 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 529 530 spin_lock_bh(&rb_queue->lock); 531 pkc->delete_blk_timer = 1; 532 spin_unlock_bh(&rb_queue->lock); 533 534 prb_del_retire_blk_timer(pkc); 535 } 536 537 static void prb_init_blk_timer(struct packet_sock *po, 538 struct tpacket_kbdq_core *pkc, 539 void (*func) (unsigned long)) 540 { 541 init_timer(&pkc->retire_blk_timer); 542 pkc->retire_blk_timer.data = (long)po; 543 pkc->retire_blk_timer.function = func; 544 pkc->retire_blk_timer.expires = jiffies; 545 } 546 547 static void prb_setup_retire_blk_timer(struct packet_sock *po) 548 { 549 struct tpacket_kbdq_core *pkc; 550 551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 552 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); 553 } 554 555 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 556 int blk_size_in_bytes) 557 { 558 struct net_device *dev; 559 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; 560 struct ethtool_link_ksettings ecmd; 561 int err; 562 563 rtnl_lock(); 564 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 565 if (unlikely(!dev)) { 566 rtnl_unlock(); 567 return DEFAULT_PRB_RETIRE_TOV; 568 } 569 err = __ethtool_get_link_ksettings(dev, &ecmd); 570 rtnl_unlock(); 571 if (!err) { 572 /* 573 * If the link speed is so slow you don't really 574 * need to worry about perf anyways 575 */ 576 if (ecmd.base.speed < SPEED_1000 || 577 ecmd.base.speed == SPEED_UNKNOWN) { 578 return DEFAULT_PRB_RETIRE_TOV; 579 } else { 580 msec = 1; 581 div = ecmd.base.speed / 1000; 582 } 583 } 584 585 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 586 587 if (div) 588 mbits /= div; 589 590 tmo = mbits * msec; 591 592 if (div) 593 return tmo+1; 594 return tmo; 595 } 596 597 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 598 union tpacket_req_u *req_u) 599 { 600 p1->feature_req_word = req_u->req3.tp_feature_req_word; 601 } 602 603 static void init_prb_bdqc(struct packet_sock *po, 604 struct packet_ring_buffer *rb, 605 struct pgv *pg_vec, 606 union tpacket_req_u *req_u) 607 { 608 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 609 struct tpacket_block_desc *pbd; 610 611 memset(p1, 0x0, sizeof(*p1)); 612 613 p1->knxt_seq_num = 1; 614 p1->pkbdq = pg_vec; 615 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 616 p1->pkblk_start = pg_vec[0].buffer; 617 p1->kblk_size = req_u->req3.tp_block_size; 618 p1->knum_blocks = req_u->req3.tp_block_nr; 619 p1->hdrlen = po->tp_hdrlen; 620 p1->version = po->tp_version; 621 p1->last_kactive_blk_num = 0; 622 po->stats.stats3.tp_freeze_q_cnt = 0; 623 if (req_u->req3.tp_retire_blk_tov) 624 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; 625 else 626 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, 627 req_u->req3.tp_block_size); 628 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); 629 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 630 631 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 632 prb_init_ft_ops(p1, req_u); 633 prb_setup_retire_blk_timer(po); 634 prb_open_block(p1, pbd); 635 } 636 637 /* Do NOT update the last_blk_num first. 638 * Assumes sk_buff_head lock is held. 639 */ 640 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) 641 { 642 mod_timer(&pkc->retire_blk_timer, 643 jiffies + pkc->tov_in_jiffies); 644 pkc->last_kactive_blk_num = pkc->kactive_blk_num; 645 } 646 647 /* 648 * Timer logic: 649 * 1) We refresh the timer only when we open a block. 650 * By doing this we don't waste cycles refreshing the timer 651 * on packet-by-packet basis. 652 * 653 * With a 1MB block-size, on a 1Gbps line, it will take 654 * i) ~8 ms to fill a block + ii) memcpy etc. 655 * In this cut we are not accounting for the memcpy time. 656 * 657 * So, if the user sets the 'tmo' to 10ms then the timer 658 * will never fire while the block is still getting filled 659 * (which is what we want). However, the user could choose 660 * to close a block early and that's fine. 661 * 662 * But when the timer does fire, we check whether or not to refresh it. 663 * Since the tmo granularity is in msecs, it is not too expensive 664 * to refresh the timer, lets say every '8' msecs. 665 * Either the user can set the 'tmo' or we can derive it based on 666 * a) line-speed and b) block-size. 667 * prb_calc_retire_blk_tmo() calculates the tmo. 668 * 669 */ 670 static void prb_retire_rx_blk_timer_expired(unsigned long data) 671 { 672 struct packet_sock *po = (struct packet_sock *)data; 673 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 674 unsigned int frozen; 675 struct tpacket_block_desc *pbd; 676 677 spin_lock(&po->sk.sk_receive_queue.lock); 678 679 frozen = prb_queue_frozen(pkc); 680 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 681 682 if (unlikely(pkc->delete_blk_timer)) 683 goto out; 684 685 /* We only need to plug the race when the block is partially filled. 686 * tpacket_rcv: 687 * lock(); increment BLOCK_NUM_PKTS; unlock() 688 * copy_bits() is in progress ... 689 * timer fires on other cpu: 690 * we can't retire the current block because copy_bits 691 * is in progress. 692 * 693 */ 694 if (BLOCK_NUM_PKTS(pbd)) { 695 while (atomic_read(&pkc->blk_fill_in_prog)) { 696 /* Waiting for skb_copy_bits to finish... */ 697 cpu_relax(); 698 } 699 } 700 701 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 702 if (!frozen) { 703 if (!BLOCK_NUM_PKTS(pbd)) { 704 /* An empty block. Just refresh the timer. */ 705 goto refresh_timer; 706 } 707 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 708 if (!prb_dispatch_next_block(pkc, po)) 709 goto refresh_timer; 710 else 711 goto out; 712 } else { 713 /* Case 1. Queue was frozen because user-space was 714 * lagging behind. 715 */ 716 if (prb_curr_blk_in_use(pkc, pbd)) { 717 /* 718 * Ok, user-space is still behind. 719 * So just refresh the timer. 720 */ 721 goto refresh_timer; 722 } else { 723 /* Case 2. queue was frozen,user-space caught up, 724 * now the link went idle && the timer fired. 725 * We don't have a block to close.So we open this 726 * block and restart the timer. 727 * opening a block thaws the queue,restarts timer 728 * Thawing/timer-refresh is a side effect. 729 */ 730 prb_open_block(pkc, pbd); 731 goto out; 732 } 733 } 734 } 735 736 refresh_timer: 737 _prb_refresh_rx_retire_blk_timer(pkc); 738 739 out: 740 spin_unlock(&po->sk.sk_receive_queue.lock); 741 } 742 743 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 744 struct tpacket_block_desc *pbd1, __u32 status) 745 { 746 /* Flush everything minus the block header */ 747 748 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 749 u8 *start, *end; 750 751 start = (u8 *)pbd1; 752 753 /* Skip the block header(we know header WILL fit in 4K) */ 754 start += PAGE_SIZE; 755 756 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 757 for (; start < end; start += PAGE_SIZE) 758 flush_dcache_page(pgv_to_page(start)); 759 760 smp_wmb(); 761 #endif 762 763 /* Now update the block status. */ 764 765 BLOCK_STATUS(pbd1) = status; 766 767 /* Flush the block header */ 768 769 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 770 start = (u8 *)pbd1; 771 flush_dcache_page(pgv_to_page(start)); 772 773 smp_wmb(); 774 #endif 775 } 776 777 /* 778 * Side effect: 779 * 780 * 1) flush the block 781 * 2) Increment active_blk_num 782 * 783 * Note:We DONT refresh the timer on purpose. 784 * Because almost always the next block will be opened. 785 */ 786 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 787 struct tpacket_block_desc *pbd1, 788 struct packet_sock *po, unsigned int stat) 789 { 790 __u32 status = TP_STATUS_USER | stat; 791 792 struct tpacket3_hdr *last_pkt; 793 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 794 struct sock *sk = &po->sk; 795 796 if (po->stats.stats3.tp_drops) 797 status |= TP_STATUS_LOSING; 798 799 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 800 last_pkt->tp_next_offset = 0; 801 802 /* Get the ts of the last pkt */ 803 if (BLOCK_NUM_PKTS(pbd1)) { 804 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 805 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 806 } else { 807 /* Ok, we tmo'd - so get the current time. 808 * 809 * It shouldn't really happen as we don't close empty 810 * blocks. See prb_retire_rx_blk_timer_expired(). 811 */ 812 struct timespec ts; 813 getnstimeofday(&ts); 814 h1->ts_last_pkt.ts_sec = ts.tv_sec; 815 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 816 } 817 818 smp_wmb(); 819 820 /* Flush the block */ 821 prb_flush_block(pkc1, pbd1, status); 822 823 sk->sk_data_ready(sk); 824 825 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 826 } 827 828 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 829 { 830 pkc->reset_pending_on_curr_blk = 0; 831 } 832 833 /* 834 * Side effect of opening a block: 835 * 836 * 1) prb_queue is thawed. 837 * 2) retire_blk_timer is refreshed. 838 * 839 */ 840 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 841 struct tpacket_block_desc *pbd1) 842 { 843 struct timespec ts; 844 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 845 846 smp_rmb(); 847 848 /* We could have just memset this but we will lose the 849 * flexibility of making the priv area sticky 850 */ 851 852 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 853 BLOCK_NUM_PKTS(pbd1) = 0; 854 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 855 856 getnstimeofday(&ts); 857 858 h1->ts_first_pkt.ts_sec = ts.tv_sec; 859 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 860 861 pkc1->pkblk_start = (char *)pbd1; 862 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 863 864 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 865 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 866 867 pbd1->version = pkc1->version; 868 pkc1->prev = pkc1->nxt_offset; 869 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 870 871 prb_thaw_queue(pkc1); 872 _prb_refresh_rx_retire_blk_timer(pkc1); 873 874 smp_wmb(); 875 } 876 877 /* 878 * Queue freeze logic: 879 * 1) Assume tp_block_nr = 8 blocks. 880 * 2) At time 't0', user opens Rx ring. 881 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 882 * 4) user-space is either sleeping or processing block '0'. 883 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 884 * it will close block-7,loop around and try to fill block '0'. 885 * call-flow: 886 * __packet_lookup_frame_in_block 887 * prb_retire_current_block() 888 * prb_dispatch_next_block() 889 * |->(BLOCK_STATUS == USER) evaluates to true 890 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 891 * 6) Now there are two cases: 892 * 6.1) Link goes idle right after the queue is frozen. 893 * But remember, the last open_block() refreshed the timer. 894 * When this timer expires,it will refresh itself so that we can 895 * re-open block-0 in near future. 896 * 6.2) Link is busy and keeps on receiving packets. This is a simple 897 * case and __packet_lookup_frame_in_block will check if block-0 898 * is free and can now be re-used. 899 */ 900 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 901 struct packet_sock *po) 902 { 903 pkc->reset_pending_on_curr_blk = 1; 904 po->stats.stats3.tp_freeze_q_cnt++; 905 } 906 907 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 908 909 /* 910 * If the next block is free then we will dispatch it 911 * and return a good offset. 912 * Else, we will freeze the queue. 913 * So, caller must check the return value. 914 */ 915 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 916 struct packet_sock *po) 917 { 918 struct tpacket_block_desc *pbd; 919 920 smp_rmb(); 921 922 /* 1. Get current block num */ 923 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 924 925 /* 2. If this block is currently in_use then freeze the queue */ 926 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 927 prb_freeze_queue(pkc, po); 928 return NULL; 929 } 930 931 /* 932 * 3. 933 * open this block and return the offset where the first packet 934 * needs to get stored. 935 */ 936 prb_open_block(pkc, pbd); 937 return (void *)pkc->nxt_offset; 938 } 939 940 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 941 struct packet_sock *po, unsigned int status) 942 { 943 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 944 945 /* retire/close the current block */ 946 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 947 /* 948 * Plug the case where copy_bits() is in progress on 949 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 950 * have space to copy the pkt in the current block and 951 * called prb_retire_current_block() 952 * 953 * We don't need to worry about the TMO case because 954 * the timer-handler already handled this case. 955 */ 956 if (!(status & TP_STATUS_BLK_TMO)) { 957 while (atomic_read(&pkc->blk_fill_in_prog)) { 958 /* Waiting for skb_copy_bits to finish... */ 959 cpu_relax(); 960 } 961 } 962 prb_close_block(pkc, pbd, po, status); 963 return; 964 } 965 } 966 967 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, 968 struct tpacket_block_desc *pbd) 969 { 970 return TP_STATUS_USER & BLOCK_STATUS(pbd); 971 } 972 973 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 974 { 975 return pkc->reset_pending_on_curr_blk; 976 } 977 978 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 979 { 980 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 981 atomic_dec(&pkc->blk_fill_in_prog); 982 } 983 984 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 985 struct tpacket3_hdr *ppd) 986 { 987 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 988 } 989 990 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 991 struct tpacket3_hdr *ppd) 992 { 993 ppd->hv1.tp_rxhash = 0; 994 } 995 996 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 997 struct tpacket3_hdr *ppd) 998 { 999 if (skb_vlan_tag_present(pkc->skb)) { 1000 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 1001 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 1002 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 1003 } else { 1004 ppd->hv1.tp_vlan_tci = 0; 1005 ppd->hv1.tp_vlan_tpid = 0; 1006 ppd->tp_status = TP_STATUS_AVAILABLE; 1007 } 1008 } 1009 1010 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 1011 struct tpacket3_hdr *ppd) 1012 { 1013 ppd->hv1.tp_padding = 0; 1014 prb_fill_vlan_info(pkc, ppd); 1015 1016 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 1017 prb_fill_rxhash(pkc, ppd); 1018 else 1019 prb_clear_rxhash(pkc, ppd); 1020 } 1021 1022 static void prb_fill_curr_block(char *curr, 1023 struct tpacket_kbdq_core *pkc, 1024 struct tpacket_block_desc *pbd, 1025 unsigned int len) 1026 { 1027 struct tpacket3_hdr *ppd; 1028 1029 ppd = (struct tpacket3_hdr *)curr; 1030 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 1031 pkc->prev = curr; 1032 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1033 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1034 BLOCK_NUM_PKTS(pbd) += 1; 1035 atomic_inc(&pkc->blk_fill_in_prog); 1036 prb_run_all_ft_ops(pkc, ppd); 1037 } 1038 1039 /* Assumes caller has the sk->rx_queue.lock */ 1040 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1041 struct sk_buff *skb, 1042 int status, 1043 unsigned int len 1044 ) 1045 { 1046 struct tpacket_kbdq_core *pkc; 1047 struct tpacket_block_desc *pbd; 1048 char *curr, *end; 1049 1050 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1051 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1052 1053 /* Queue is frozen when user space is lagging behind */ 1054 if (prb_queue_frozen(pkc)) { 1055 /* 1056 * Check if that last block which caused the queue to freeze, 1057 * is still in_use by user-space. 1058 */ 1059 if (prb_curr_blk_in_use(pkc, pbd)) { 1060 /* Can't record this packet */ 1061 return NULL; 1062 } else { 1063 /* 1064 * Ok, the block was released by user-space. 1065 * Now let's open that block. 1066 * opening a block also thaws the queue. 1067 * Thawing is a side effect. 1068 */ 1069 prb_open_block(pkc, pbd); 1070 } 1071 } 1072 1073 smp_mb(); 1074 curr = pkc->nxt_offset; 1075 pkc->skb = skb; 1076 end = (char *)pbd + pkc->kblk_size; 1077 1078 /* first try the current block */ 1079 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1080 prb_fill_curr_block(curr, pkc, pbd, len); 1081 return (void *)curr; 1082 } 1083 1084 /* Ok, close the current block */ 1085 prb_retire_current_block(pkc, po, 0); 1086 1087 /* Now, try to dispatch the next block */ 1088 curr = (char *)prb_dispatch_next_block(pkc, po); 1089 if (curr) { 1090 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1091 prb_fill_curr_block(curr, pkc, pbd, len); 1092 return (void *)curr; 1093 } 1094 1095 /* 1096 * No free blocks are available.user_space hasn't caught up yet. 1097 * Queue was just frozen and now this packet will get dropped. 1098 */ 1099 return NULL; 1100 } 1101 1102 static void *packet_current_rx_frame(struct packet_sock *po, 1103 struct sk_buff *skb, 1104 int status, unsigned int len) 1105 { 1106 char *curr = NULL; 1107 switch (po->tp_version) { 1108 case TPACKET_V1: 1109 case TPACKET_V2: 1110 curr = packet_lookup_frame(po, &po->rx_ring, 1111 po->rx_ring.head, status); 1112 return curr; 1113 case TPACKET_V3: 1114 return __packet_lookup_frame_in_block(po, skb, status, len); 1115 default: 1116 WARN(1, "TPACKET version not supported\n"); 1117 BUG(); 1118 return NULL; 1119 } 1120 } 1121 1122 static void *prb_lookup_block(struct packet_sock *po, 1123 struct packet_ring_buffer *rb, 1124 unsigned int idx, 1125 int status) 1126 { 1127 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1128 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1129 1130 if (status != BLOCK_STATUS(pbd)) 1131 return NULL; 1132 return pbd; 1133 } 1134 1135 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1136 { 1137 unsigned int prev; 1138 if (rb->prb_bdqc.kactive_blk_num) 1139 prev = rb->prb_bdqc.kactive_blk_num-1; 1140 else 1141 prev = rb->prb_bdqc.knum_blocks-1; 1142 return prev; 1143 } 1144 1145 /* Assumes caller has held the rx_queue.lock */ 1146 static void *__prb_previous_block(struct packet_sock *po, 1147 struct packet_ring_buffer *rb, 1148 int status) 1149 { 1150 unsigned int previous = prb_previous_blk_num(rb); 1151 return prb_lookup_block(po, rb, previous, status); 1152 } 1153 1154 static void *packet_previous_rx_frame(struct packet_sock *po, 1155 struct packet_ring_buffer *rb, 1156 int status) 1157 { 1158 if (po->tp_version <= TPACKET_V2) 1159 return packet_previous_frame(po, rb, status); 1160 1161 return __prb_previous_block(po, rb, status); 1162 } 1163 1164 static void packet_increment_rx_head(struct packet_sock *po, 1165 struct packet_ring_buffer *rb) 1166 { 1167 switch (po->tp_version) { 1168 case TPACKET_V1: 1169 case TPACKET_V2: 1170 return packet_increment_head(rb); 1171 case TPACKET_V3: 1172 default: 1173 WARN(1, "TPACKET version not supported.\n"); 1174 BUG(); 1175 return; 1176 } 1177 } 1178 1179 static void *packet_previous_frame(struct packet_sock *po, 1180 struct packet_ring_buffer *rb, 1181 int status) 1182 { 1183 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1184 return packet_lookup_frame(po, rb, previous, status); 1185 } 1186 1187 static void packet_increment_head(struct packet_ring_buffer *buff) 1188 { 1189 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1190 } 1191 1192 static void packet_inc_pending(struct packet_ring_buffer *rb) 1193 { 1194 this_cpu_inc(*rb->pending_refcnt); 1195 } 1196 1197 static void packet_dec_pending(struct packet_ring_buffer *rb) 1198 { 1199 this_cpu_dec(*rb->pending_refcnt); 1200 } 1201 1202 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1203 { 1204 unsigned int refcnt = 0; 1205 int cpu; 1206 1207 /* We don't use pending refcount in rx_ring. */ 1208 if (rb->pending_refcnt == NULL) 1209 return 0; 1210 1211 for_each_possible_cpu(cpu) 1212 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1213 1214 return refcnt; 1215 } 1216 1217 static int packet_alloc_pending(struct packet_sock *po) 1218 { 1219 po->rx_ring.pending_refcnt = NULL; 1220 1221 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1222 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1223 return -ENOBUFS; 1224 1225 return 0; 1226 } 1227 1228 static void packet_free_pending(struct packet_sock *po) 1229 { 1230 free_percpu(po->tx_ring.pending_refcnt); 1231 } 1232 1233 #define ROOM_POW_OFF 2 1234 #define ROOM_NONE 0x0 1235 #define ROOM_LOW 0x1 1236 #define ROOM_NORMAL 0x2 1237 1238 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) 1239 { 1240 int idx, len; 1241 1242 len = po->rx_ring.frame_max + 1; 1243 idx = po->rx_ring.head; 1244 if (pow_off) 1245 idx += len >> pow_off; 1246 if (idx >= len) 1247 idx -= len; 1248 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1249 } 1250 1251 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) 1252 { 1253 int idx, len; 1254 1255 len = po->rx_ring.prb_bdqc.knum_blocks; 1256 idx = po->rx_ring.prb_bdqc.kactive_blk_num; 1257 if (pow_off) 1258 idx += len >> pow_off; 1259 if (idx >= len) 1260 idx -= len; 1261 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1262 } 1263 1264 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1265 { 1266 struct sock *sk = &po->sk; 1267 int ret = ROOM_NONE; 1268 1269 if (po->prot_hook.func != tpacket_rcv) { 1270 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) 1271 - (skb ? skb->truesize : 0); 1272 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) 1273 return ROOM_NORMAL; 1274 else if (avail > 0) 1275 return ROOM_LOW; 1276 else 1277 return ROOM_NONE; 1278 } 1279 1280 if (po->tp_version == TPACKET_V3) { 1281 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) 1282 ret = ROOM_NORMAL; 1283 else if (__tpacket_v3_has_room(po, 0)) 1284 ret = ROOM_LOW; 1285 } else { 1286 if (__tpacket_has_room(po, ROOM_POW_OFF)) 1287 ret = ROOM_NORMAL; 1288 else if (__tpacket_has_room(po, 0)) 1289 ret = ROOM_LOW; 1290 } 1291 1292 return ret; 1293 } 1294 1295 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1296 { 1297 int ret; 1298 bool has_room; 1299 1300 spin_lock_bh(&po->sk.sk_receive_queue.lock); 1301 ret = __packet_rcv_has_room(po, skb); 1302 has_room = ret == ROOM_NORMAL; 1303 if (po->pressure == has_room) 1304 po->pressure = !has_room; 1305 spin_unlock_bh(&po->sk.sk_receive_queue.lock); 1306 1307 return ret; 1308 } 1309 1310 static void packet_sock_destruct(struct sock *sk) 1311 { 1312 skb_queue_purge(&sk->sk_error_queue); 1313 1314 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1315 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 1316 1317 if (!sock_flag(sk, SOCK_DEAD)) { 1318 pr_err("Attempt to release alive packet socket: %p\n", sk); 1319 return; 1320 } 1321 1322 sk_refcnt_debug_dec(sk); 1323 } 1324 1325 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) 1326 { 1327 u32 rxhash; 1328 int i, count = 0; 1329 1330 rxhash = skb_get_hash(skb); 1331 for (i = 0; i < ROLLOVER_HLEN; i++) 1332 if (po->rollover->history[i] == rxhash) 1333 count++; 1334 1335 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; 1336 return count > (ROLLOVER_HLEN >> 1); 1337 } 1338 1339 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1340 struct sk_buff *skb, 1341 unsigned int num) 1342 { 1343 return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1344 } 1345 1346 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1347 struct sk_buff *skb, 1348 unsigned int num) 1349 { 1350 unsigned int val = atomic_inc_return(&f->rr_cur); 1351 1352 return val % num; 1353 } 1354 1355 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1356 struct sk_buff *skb, 1357 unsigned int num) 1358 { 1359 return smp_processor_id() % num; 1360 } 1361 1362 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1363 struct sk_buff *skb, 1364 unsigned int num) 1365 { 1366 return prandom_u32_max(num); 1367 } 1368 1369 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1370 struct sk_buff *skb, 1371 unsigned int idx, bool try_self, 1372 unsigned int num) 1373 { 1374 struct packet_sock *po, *po_next, *po_skip = NULL; 1375 unsigned int i, j, room = ROOM_NONE; 1376 1377 po = pkt_sk(f->arr[idx]); 1378 1379 if (try_self) { 1380 room = packet_rcv_has_room(po, skb); 1381 if (room == ROOM_NORMAL || 1382 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) 1383 return idx; 1384 po_skip = po; 1385 } 1386 1387 i = j = min_t(int, po->rollover->sock, num - 1); 1388 do { 1389 po_next = pkt_sk(f->arr[i]); 1390 if (po_next != po_skip && !po_next->pressure && 1391 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { 1392 if (i != j) 1393 po->rollover->sock = i; 1394 atomic_long_inc(&po->rollover->num); 1395 if (room == ROOM_LOW) 1396 atomic_long_inc(&po->rollover->num_huge); 1397 return i; 1398 } 1399 1400 if (++i == num) 1401 i = 0; 1402 } while (i != j); 1403 1404 atomic_long_inc(&po->rollover->num_failed); 1405 return idx; 1406 } 1407 1408 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1409 struct sk_buff *skb, 1410 unsigned int num) 1411 { 1412 return skb_get_queue_mapping(skb) % num; 1413 } 1414 1415 static unsigned int fanout_demux_bpf(struct packet_fanout *f, 1416 struct sk_buff *skb, 1417 unsigned int num) 1418 { 1419 struct bpf_prog *prog; 1420 unsigned int ret = 0; 1421 1422 rcu_read_lock(); 1423 prog = rcu_dereference(f->bpf_prog); 1424 if (prog) 1425 ret = bpf_prog_run_clear_cb(prog, skb) % num; 1426 rcu_read_unlock(); 1427 1428 return ret; 1429 } 1430 1431 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1432 { 1433 return f->flags & (flag >> 8); 1434 } 1435 1436 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1437 struct packet_type *pt, struct net_device *orig_dev) 1438 { 1439 struct packet_fanout *f = pt->af_packet_priv; 1440 unsigned int num = READ_ONCE(f->num_members); 1441 struct net *net = read_pnet(&f->net); 1442 struct packet_sock *po; 1443 unsigned int idx; 1444 1445 if (!net_eq(dev_net(dev), net) || !num) { 1446 kfree_skb(skb); 1447 return 0; 1448 } 1449 1450 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1451 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); 1452 if (!skb) 1453 return 0; 1454 } 1455 switch (f->type) { 1456 case PACKET_FANOUT_HASH: 1457 default: 1458 idx = fanout_demux_hash(f, skb, num); 1459 break; 1460 case PACKET_FANOUT_LB: 1461 idx = fanout_demux_lb(f, skb, num); 1462 break; 1463 case PACKET_FANOUT_CPU: 1464 idx = fanout_demux_cpu(f, skb, num); 1465 break; 1466 case PACKET_FANOUT_RND: 1467 idx = fanout_demux_rnd(f, skb, num); 1468 break; 1469 case PACKET_FANOUT_QM: 1470 idx = fanout_demux_qm(f, skb, num); 1471 break; 1472 case PACKET_FANOUT_ROLLOVER: 1473 idx = fanout_demux_rollover(f, skb, 0, false, num); 1474 break; 1475 case PACKET_FANOUT_CBPF: 1476 case PACKET_FANOUT_EBPF: 1477 idx = fanout_demux_bpf(f, skb, num); 1478 break; 1479 } 1480 1481 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) 1482 idx = fanout_demux_rollover(f, skb, idx, true, num); 1483 1484 po = pkt_sk(f->arr[idx]); 1485 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1486 } 1487 1488 DEFINE_MUTEX(fanout_mutex); 1489 EXPORT_SYMBOL_GPL(fanout_mutex); 1490 static LIST_HEAD(fanout_list); 1491 1492 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1493 { 1494 struct packet_fanout *f = po->fanout; 1495 1496 spin_lock(&f->lock); 1497 f->arr[f->num_members] = sk; 1498 smp_wmb(); 1499 f->num_members++; 1500 spin_unlock(&f->lock); 1501 } 1502 1503 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1504 { 1505 struct packet_fanout *f = po->fanout; 1506 int i; 1507 1508 spin_lock(&f->lock); 1509 for (i = 0; i < f->num_members; i++) { 1510 if (f->arr[i] == sk) 1511 break; 1512 } 1513 BUG_ON(i >= f->num_members); 1514 f->arr[i] = f->arr[f->num_members - 1]; 1515 f->num_members--; 1516 spin_unlock(&f->lock); 1517 } 1518 1519 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1520 { 1521 if (sk->sk_family != PF_PACKET) 1522 return false; 1523 1524 return ptype->af_packet_priv == pkt_sk(sk)->fanout; 1525 } 1526 1527 static void fanout_init_data(struct packet_fanout *f) 1528 { 1529 switch (f->type) { 1530 case PACKET_FANOUT_LB: 1531 atomic_set(&f->rr_cur, 0); 1532 break; 1533 case PACKET_FANOUT_CBPF: 1534 case PACKET_FANOUT_EBPF: 1535 RCU_INIT_POINTER(f->bpf_prog, NULL); 1536 break; 1537 } 1538 } 1539 1540 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) 1541 { 1542 struct bpf_prog *old; 1543 1544 spin_lock(&f->lock); 1545 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); 1546 rcu_assign_pointer(f->bpf_prog, new); 1547 spin_unlock(&f->lock); 1548 1549 if (old) { 1550 synchronize_net(); 1551 bpf_prog_destroy(old); 1552 } 1553 } 1554 1555 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, 1556 unsigned int len) 1557 { 1558 struct bpf_prog *new; 1559 struct sock_fprog fprog; 1560 int ret; 1561 1562 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1563 return -EPERM; 1564 if (len != sizeof(fprog)) 1565 return -EINVAL; 1566 if (copy_from_user(&fprog, data, len)) 1567 return -EFAULT; 1568 1569 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); 1570 if (ret) 1571 return ret; 1572 1573 __fanout_set_data_bpf(po->fanout, new); 1574 return 0; 1575 } 1576 1577 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, 1578 unsigned int len) 1579 { 1580 struct bpf_prog *new; 1581 u32 fd; 1582 1583 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1584 return -EPERM; 1585 if (len != sizeof(fd)) 1586 return -EINVAL; 1587 if (copy_from_user(&fd, data, len)) 1588 return -EFAULT; 1589 1590 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 1591 if (IS_ERR(new)) 1592 return PTR_ERR(new); 1593 1594 __fanout_set_data_bpf(po->fanout, new); 1595 return 0; 1596 } 1597 1598 static int fanout_set_data(struct packet_sock *po, char __user *data, 1599 unsigned int len) 1600 { 1601 switch (po->fanout->type) { 1602 case PACKET_FANOUT_CBPF: 1603 return fanout_set_data_cbpf(po, data, len); 1604 case PACKET_FANOUT_EBPF: 1605 return fanout_set_data_ebpf(po, data, len); 1606 default: 1607 return -EINVAL; 1608 }; 1609 } 1610 1611 static void fanout_release_data(struct packet_fanout *f) 1612 { 1613 switch (f->type) { 1614 case PACKET_FANOUT_CBPF: 1615 case PACKET_FANOUT_EBPF: 1616 __fanout_set_data_bpf(f, NULL); 1617 }; 1618 } 1619 1620 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1621 { 1622 struct packet_sock *po = pkt_sk(sk); 1623 struct packet_fanout *f, *match; 1624 u8 type = type_flags & 0xff; 1625 u8 flags = type_flags >> 8; 1626 int err; 1627 1628 switch (type) { 1629 case PACKET_FANOUT_ROLLOVER: 1630 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1631 return -EINVAL; 1632 case PACKET_FANOUT_HASH: 1633 case PACKET_FANOUT_LB: 1634 case PACKET_FANOUT_CPU: 1635 case PACKET_FANOUT_RND: 1636 case PACKET_FANOUT_QM: 1637 case PACKET_FANOUT_CBPF: 1638 case PACKET_FANOUT_EBPF: 1639 break; 1640 default: 1641 return -EINVAL; 1642 } 1643 1644 if (!po->running) 1645 return -EINVAL; 1646 1647 if (po->fanout) 1648 return -EALREADY; 1649 1650 if (type == PACKET_FANOUT_ROLLOVER || 1651 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1652 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); 1653 if (!po->rollover) 1654 return -ENOMEM; 1655 atomic_long_set(&po->rollover->num, 0); 1656 atomic_long_set(&po->rollover->num_huge, 0); 1657 atomic_long_set(&po->rollover->num_failed, 0); 1658 } 1659 1660 mutex_lock(&fanout_mutex); 1661 match = NULL; 1662 list_for_each_entry(f, &fanout_list, list) { 1663 if (f->id == id && 1664 read_pnet(&f->net) == sock_net(sk)) { 1665 match = f; 1666 break; 1667 } 1668 } 1669 err = -EINVAL; 1670 if (match && match->flags != flags) 1671 goto out; 1672 if (!match) { 1673 err = -ENOMEM; 1674 match = kzalloc(sizeof(*match), GFP_KERNEL); 1675 if (!match) 1676 goto out; 1677 write_pnet(&match->net, sock_net(sk)); 1678 match->id = id; 1679 match->type = type; 1680 match->flags = flags; 1681 INIT_LIST_HEAD(&match->list); 1682 spin_lock_init(&match->lock); 1683 atomic_set(&match->sk_ref, 0); 1684 fanout_init_data(match); 1685 match->prot_hook.type = po->prot_hook.type; 1686 match->prot_hook.dev = po->prot_hook.dev; 1687 match->prot_hook.func = packet_rcv_fanout; 1688 match->prot_hook.af_packet_priv = match; 1689 match->prot_hook.id_match = match_fanout_group; 1690 dev_add_pack(&match->prot_hook); 1691 list_add(&match->list, &fanout_list); 1692 } 1693 err = -EINVAL; 1694 if (match->type == type && 1695 match->prot_hook.type == po->prot_hook.type && 1696 match->prot_hook.dev == po->prot_hook.dev) { 1697 err = -ENOSPC; 1698 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 1699 __dev_remove_pack(&po->prot_hook); 1700 po->fanout = match; 1701 atomic_inc(&match->sk_ref); 1702 __fanout_link(sk, po); 1703 err = 0; 1704 } 1705 } 1706 out: 1707 mutex_unlock(&fanout_mutex); 1708 if (err) { 1709 kfree(po->rollover); 1710 po->rollover = NULL; 1711 } 1712 return err; 1713 } 1714 1715 static void fanout_release(struct sock *sk) 1716 { 1717 struct packet_sock *po = pkt_sk(sk); 1718 struct packet_fanout *f; 1719 1720 f = po->fanout; 1721 if (!f) 1722 return; 1723 1724 mutex_lock(&fanout_mutex); 1725 po->fanout = NULL; 1726 1727 if (atomic_dec_and_test(&f->sk_ref)) { 1728 list_del(&f->list); 1729 dev_remove_pack(&f->prot_hook); 1730 fanout_release_data(f); 1731 kfree(f); 1732 } 1733 mutex_unlock(&fanout_mutex); 1734 1735 if (po->rollover) 1736 kfree_rcu(po->rollover, rcu); 1737 } 1738 1739 static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1740 struct sk_buff *skb) 1741 { 1742 /* Earlier code assumed this would be a VLAN pkt, double-check 1743 * this now that we have the actual packet in hand. We can only 1744 * do this check on Ethernet devices. 1745 */ 1746 if (unlikely(dev->type != ARPHRD_ETHER)) 1747 return false; 1748 1749 skb_reset_mac_header(skb); 1750 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); 1751 } 1752 1753 static const struct proto_ops packet_ops; 1754 1755 static const struct proto_ops packet_ops_spkt; 1756 1757 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1758 struct packet_type *pt, struct net_device *orig_dev) 1759 { 1760 struct sock *sk; 1761 struct sockaddr_pkt *spkt; 1762 1763 /* 1764 * When we registered the protocol we saved the socket in the data 1765 * field for just this event. 1766 */ 1767 1768 sk = pt->af_packet_priv; 1769 1770 /* 1771 * Yank back the headers [hope the device set this 1772 * right or kerboom...] 1773 * 1774 * Incoming packets have ll header pulled, 1775 * push it back. 1776 * 1777 * For outgoing ones skb->data == skb_mac_header(skb) 1778 * so that this procedure is noop. 1779 */ 1780 1781 if (skb->pkt_type == PACKET_LOOPBACK) 1782 goto out; 1783 1784 if (!net_eq(dev_net(dev), sock_net(sk))) 1785 goto out; 1786 1787 skb = skb_share_check(skb, GFP_ATOMIC); 1788 if (skb == NULL) 1789 goto oom; 1790 1791 /* drop any routing info */ 1792 skb_dst_drop(skb); 1793 1794 /* drop conntrack reference */ 1795 nf_reset(skb); 1796 1797 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1798 1799 skb_push(skb, skb->data - skb_mac_header(skb)); 1800 1801 /* 1802 * The SOCK_PACKET socket receives _all_ frames. 1803 */ 1804 1805 spkt->spkt_family = dev->type; 1806 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1807 spkt->spkt_protocol = skb->protocol; 1808 1809 /* 1810 * Charge the memory to the socket. This is done specifically 1811 * to prevent sockets using all the memory up. 1812 */ 1813 1814 if (sock_queue_rcv_skb(sk, skb) == 0) 1815 return 0; 1816 1817 out: 1818 kfree_skb(skb); 1819 oom: 1820 return 0; 1821 } 1822 1823 1824 /* 1825 * Output a raw packet to a device layer. This bypasses all the other 1826 * protocol layers and you must therefore supply it with a complete frame 1827 */ 1828 1829 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, 1830 size_t len) 1831 { 1832 struct sock *sk = sock->sk; 1833 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1834 struct sk_buff *skb = NULL; 1835 struct net_device *dev; 1836 struct sockcm_cookie sockc; 1837 __be16 proto = 0; 1838 int err; 1839 int extra_len = 0; 1840 1841 /* 1842 * Get and verify the address. 1843 */ 1844 1845 if (saddr) { 1846 if (msg->msg_namelen < sizeof(struct sockaddr)) 1847 return -EINVAL; 1848 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1849 proto = saddr->spkt_protocol; 1850 } else 1851 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1852 1853 /* 1854 * Find the device first to size check it 1855 */ 1856 1857 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1858 retry: 1859 rcu_read_lock(); 1860 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1861 err = -ENODEV; 1862 if (dev == NULL) 1863 goto out_unlock; 1864 1865 err = -ENETDOWN; 1866 if (!(dev->flags & IFF_UP)) 1867 goto out_unlock; 1868 1869 /* 1870 * You may not queue a frame bigger than the mtu. This is the lowest level 1871 * raw protocol and you must do your own fragmentation at this level. 1872 */ 1873 1874 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1875 if (!netif_supports_nofcs(dev)) { 1876 err = -EPROTONOSUPPORT; 1877 goto out_unlock; 1878 } 1879 extra_len = 4; /* We're doing our own CRC */ 1880 } 1881 1882 err = -EMSGSIZE; 1883 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 1884 goto out_unlock; 1885 1886 if (!skb) { 1887 size_t reserved = LL_RESERVED_SPACE(dev); 1888 int tlen = dev->needed_tailroom; 1889 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 1890 1891 rcu_read_unlock(); 1892 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 1893 if (skb == NULL) 1894 return -ENOBUFS; 1895 /* FIXME: Save some space for broken drivers that write a hard 1896 * header at transmission time by themselves. PPP is the notable 1897 * one here. This should really be fixed at the driver level. 1898 */ 1899 skb_reserve(skb, reserved); 1900 skb_reset_network_header(skb); 1901 1902 /* Try to align data part correctly */ 1903 if (hhlen) { 1904 skb->data -= hhlen; 1905 skb->tail -= hhlen; 1906 if (len < hhlen) 1907 skb_reset_network_header(skb); 1908 } 1909 err = memcpy_from_msg(skb_put(skb, len), msg, len); 1910 if (err) 1911 goto out_free; 1912 goto retry; 1913 } 1914 1915 if (!dev_validate_header(dev, skb->data, len)) { 1916 err = -EINVAL; 1917 goto out_unlock; 1918 } 1919 if (len > (dev->mtu + dev->hard_header_len + extra_len) && 1920 !packet_extra_vlan_len_allowed(dev, skb)) { 1921 err = -EMSGSIZE; 1922 goto out_unlock; 1923 } 1924 1925 sockc.tsflags = sk->sk_tsflags; 1926 if (msg->msg_controllen) { 1927 err = sock_cmsg_send(sk, msg, &sockc); 1928 if (unlikely(err)) 1929 goto out_unlock; 1930 } 1931 1932 skb->protocol = proto; 1933 skb->dev = dev; 1934 skb->priority = sk->sk_priority; 1935 skb->mark = sk->sk_mark; 1936 1937 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); 1938 1939 if (unlikely(extra_len == 4)) 1940 skb->no_fcs = 1; 1941 1942 skb_probe_transport_header(skb, 0); 1943 1944 dev_queue_xmit(skb); 1945 rcu_read_unlock(); 1946 return len; 1947 1948 out_unlock: 1949 rcu_read_unlock(); 1950 out_free: 1951 kfree_skb(skb); 1952 return err; 1953 } 1954 1955 static unsigned int run_filter(struct sk_buff *skb, 1956 const struct sock *sk, 1957 unsigned int res) 1958 { 1959 struct sk_filter *filter; 1960 1961 rcu_read_lock(); 1962 filter = rcu_dereference(sk->sk_filter); 1963 if (filter != NULL) 1964 res = bpf_prog_run_clear_cb(filter->prog, skb); 1965 rcu_read_unlock(); 1966 1967 return res; 1968 } 1969 1970 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, 1971 size_t *len) 1972 { 1973 struct virtio_net_hdr vnet_hdr; 1974 1975 if (*len < sizeof(vnet_hdr)) 1976 return -EINVAL; 1977 *len -= sizeof(vnet_hdr); 1978 1979 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true)) 1980 return -EINVAL; 1981 1982 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); 1983 } 1984 1985 /* 1986 * This function makes lazy skb cloning in hope that most of packets 1987 * are discarded by BPF. 1988 * 1989 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 1990 * and skb->cb are mangled. It works because (and until) packets 1991 * falling here are owned by current CPU. Output packets are cloned 1992 * by dev_queue_xmit_nit(), input packets are processed by net_bh 1993 * sequencially, so that if we return skb to original state on exit, 1994 * we will not harm anyone. 1995 */ 1996 1997 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 1998 struct packet_type *pt, struct net_device *orig_dev) 1999 { 2000 struct sock *sk; 2001 struct sockaddr_ll *sll; 2002 struct packet_sock *po; 2003 u8 *skb_head = skb->data; 2004 int skb_len = skb->len; 2005 unsigned int snaplen, res; 2006 bool is_drop_n_account = false; 2007 2008 if (skb->pkt_type == PACKET_LOOPBACK) 2009 goto drop; 2010 2011 sk = pt->af_packet_priv; 2012 po = pkt_sk(sk); 2013 2014 if (!net_eq(dev_net(dev), sock_net(sk))) 2015 goto drop; 2016 2017 skb->dev = dev; 2018 2019 if (dev->header_ops) { 2020 /* The device has an explicit notion of ll header, 2021 * exported to higher levels. 2022 * 2023 * Otherwise, the device hides details of its frame 2024 * structure, so that corresponding packet head is 2025 * never delivered to user. 2026 */ 2027 if (sk->sk_type != SOCK_DGRAM) 2028 skb_push(skb, skb->data - skb_mac_header(skb)); 2029 else if (skb->pkt_type == PACKET_OUTGOING) { 2030 /* Special case: outgoing packets have ll header at head */ 2031 skb_pull(skb, skb_network_offset(skb)); 2032 } 2033 } 2034 2035 snaplen = skb->len; 2036 2037 res = run_filter(skb, sk, snaplen); 2038 if (!res) 2039 goto drop_n_restore; 2040 if (snaplen > res) 2041 snaplen = res; 2042 2043 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2044 goto drop_n_acct; 2045 2046 if (skb_shared(skb)) { 2047 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2048 if (nskb == NULL) 2049 goto drop_n_acct; 2050 2051 if (skb_head != skb->data) { 2052 skb->data = skb_head; 2053 skb->len = skb_len; 2054 } 2055 consume_skb(skb); 2056 skb = nskb; 2057 } 2058 2059 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); 2060 2061 sll = &PACKET_SKB_CB(skb)->sa.ll; 2062 sll->sll_hatype = dev->type; 2063 sll->sll_pkttype = skb->pkt_type; 2064 if (unlikely(po->origdev)) 2065 sll->sll_ifindex = orig_dev->ifindex; 2066 else 2067 sll->sll_ifindex = dev->ifindex; 2068 2069 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2070 2071 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). 2072 * Use their space for storing the original skb length. 2073 */ 2074 PACKET_SKB_CB(skb)->sa.origlen = skb->len; 2075 2076 if (pskb_trim(skb, snaplen)) 2077 goto drop_n_acct; 2078 2079 skb_set_owner_r(skb, sk); 2080 skb->dev = NULL; 2081 skb_dst_drop(skb); 2082 2083 /* drop conntrack reference */ 2084 nf_reset(skb); 2085 2086 spin_lock(&sk->sk_receive_queue.lock); 2087 po->stats.stats1.tp_packets++; 2088 sock_skb_set_dropcount(sk, skb); 2089 __skb_queue_tail(&sk->sk_receive_queue, skb); 2090 spin_unlock(&sk->sk_receive_queue.lock); 2091 sk->sk_data_ready(sk); 2092 return 0; 2093 2094 drop_n_acct: 2095 is_drop_n_account = true; 2096 spin_lock(&sk->sk_receive_queue.lock); 2097 po->stats.stats1.tp_drops++; 2098 atomic_inc(&sk->sk_drops); 2099 spin_unlock(&sk->sk_receive_queue.lock); 2100 2101 drop_n_restore: 2102 if (skb_head != skb->data && skb_shared(skb)) { 2103 skb->data = skb_head; 2104 skb->len = skb_len; 2105 } 2106 drop: 2107 if (!is_drop_n_account) 2108 consume_skb(skb); 2109 else 2110 kfree_skb(skb); 2111 return 0; 2112 } 2113 2114 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 2115 struct packet_type *pt, struct net_device *orig_dev) 2116 { 2117 struct sock *sk; 2118 struct packet_sock *po; 2119 struct sockaddr_ll *sll; 2120 union tpacket_uhdr h; 2121 u8 *skb_head = skb->data; 2122 int skb_len = skb->len; 2123 unsigned int snaplen, res; 2124 unsigned long status = TP_STATUS_USER; 2125 unsigned short macoff, netoff, hdrlen; 2126 struct sk_buff *copy_skb = NULL; 2127 struct timespec ts; 2128 __u32 ts_status; 2129 bool is_drop_n_account = false; 2130 2131 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2132 * We may add members to them until current aligned size without forcing 2133 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 2134 */ 2135 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 2136 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 2137 2138 if (skb->pkt_type == PACKET_LOOPBACK) 2139 goto drop; 2140 2141 sk = pt->af_packet_priv; 2142 po = pkt_sk(sk); 2143 2144 if (!net_eq(dev_net(dev), sock_net(sk))) 2145 goto drop; 2146 2147 if (dev->header_ops) { 2148 if (sk->sk_type != SOCK_DGRAM) 2149 skb_push(skb, skb->data - skb_mac_header(skb)); 2150 else if (skb->pkt_type == PACKET_OUTGOING) { 2151 /* Special case: outgoing packets have ll header at head */ 2152 skb_pull(skb, skb_network_offset(skb)); 2153 } 2154 } 2155 2156 snaplen = skb->len; 2157 2158 res = run_filter(skb, sk, snaplen); 2159 if (!res) 2160 goto drop_n_restore; 2161 2162 if (skb->ip_summed == CHECKSUM_PARTIAL) 2163 status |= TP_STATUS_CSUMNOTREADY; 2164 else if (skb->pkt_type != PACKET_OUTGOING && 2165 (skb->ip_summed == CHECKSUM_COMPLETE || 2166 skb_csum_unnecessary(skb))) 2167 status |= TP_STATUS_CSUM_VALID; 2168 2169 if (snaplen > res) 2170 snaplen = res; 2171 2172 if (sk->sk_type == SOCK_DGRAM) { 2173 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 2174 po->tp_reserve; 2175 } else { 2176 unsigned int maclen = skb_network_offset(skb); 2177 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2178 (maclen < 16 ? 16 : maclen)) + 2179 po->tp_reserve; 2180 if (po->has_vnet_hdr) 2181 netoff += sizeof(struct virtio_net_hdr); 2182 macoff = netoff - maclen; 2183 } 2184 if (po->tp_version <= TPACKET_V2) { 2185 if (macoff + snaplen > po->rx_ring.frame_size) { 2186 if (po->copy_thresh && 2187 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 2188 if (skb_shared(skb)) { 2189 copy_skb = skb_clone(skb, GFP_ATOMIC); 2190 } else { 2191 copy_skb = skb_get(skb); 2192 skb_head = skb->data; 2193 } 2194 if (copy_skb) 2195 skb_set_owner_r(copy_skb, sk); 2196 } 2197 snaplen = po->rx_ring.frame_size - macoff; 2198 if ((int)snaplen < 0) 2199 snaplen = 0; 2200 } 2201 } else if (unlikely(macoff + snaplen > 2202 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2203 u32 nval; 2204 2205 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 2206 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 2207 snaplen, nval, macoff); 2208 snaplen = nval; 2209 if (unlikely((int)snaplen < 0)) { 2210 snaplen = 0; 2211 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2212 } 2213 } 2214 spin_lock(&sk->sk_receive_queue.lock); 2215 h.raw = packet_current_rx_frame(po, skb, 2216 TP_STATUS_KERNEL, (macoff+snaplen)); 2217 if (!h.raw) 2218 goto drop_n_account; 2219 if (po->tp_version <= TPACKET_V2) { 2220 packet_increment_rx_head(po, &po->rx_ring); 2221 /* 2222 * LOSING will be reported till you read the stats, 2223 * because it's COR - Clear On Read. 2224 * Anyways, moving it for V1/V2 only as V3 doesn't need this 2225 * at packet level. 2226 */ 2227 if (po->stats.stats1.tp_drops) 2228 status |= TP_STATUS_LOSING; 2229 } 2230 po->stats.stats1.tp_packets++; 2231 if (copy_skb) { 2232 status |= TP_STATUS_COPY; 2233 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 2234 } 2235 spin_unlock(&sk->sk_receive_queue.lock); 2236 2237 if (po->has_vnet_hdr) { 2238 if (virtio_net_hdr_from_skb(skb, h.raw + macoff - 2239 sizeof(struct virtio_net_hdr), 2240 vio_le(), true)) { 2241 spin_lock(&sk->sk_receive_queue.lock); 2242 goto drop_n_account; 2243 } 2244 } 2245 2246 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 2247 2248 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 2249 getnstimeofday(&ts); 2250 2251 status |= ts_status; 2252 2253 switch (po->tp_version) { 2254 case TPACKET_V1: 2255 h.h1->tp_len = skb->len; 2256 h.h1->tp_snaplen = snaplen; 2257 h.h1->tp_mac = macoff; 2258 h.h1->tp_net = netoff; 2259 h.h1->tp_sec = ts.tv_sec; 2260 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 2261 hdrlen = sizeof(*h.h1); 2262 break; 2263 case TPACKET_V2: 2264 h.h2->tp_len = skb->len; 2265 h.h2->tp_snaplen = snaplen; 2266 h.h2->tp_mac = macoff; 2267 h.h2->tp_net = netoff; 2268 h.h2->tp_sec = ts.tv_sec; 2269 h.h2->tp_nsec = ts.tv_nsec; 2270 if (skb_vlan_tag_present(skb)) { 2271 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2272 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2273 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2274 } else { 2275 h.h2->tp_vlan_tci = 0; 2276 h.h2->tp_vlan_tpid = 0; 2277 } 2278 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2279 hdrlen = sizeof(*h.h2); 2280 break; 2281 case TPACKET_V3: 2282 /* tp_nxt_offset,vlan are already populated above. 2283 * So DONT clear those fields here 2284 */ 2285 h.h3->tp_status |= status; 2286 h.h3->tp_len = skb->len; 2287 h.h3->tp_snaplen = snaplen; 2288 h.h3->tp_mac = macoff; 2289 h.h3->tp_net = netoff; 2290 h.h3->tp_sec = ts.tv_sec; 2291 h.h3->tp_nsec = ts.tv_nsec; 2292 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2293 hdrlen = sizeof(*h.h3); 2294 break; 2295 default: 2296 BUG(); 2297 } 2298 2299 sll = h.raw + TPACKET_ALIGN(hdrlen); 2300 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2301 sll->sll_family = AF_PACKET; 2302 sll->sll_hatype = dev->type; 2303 sll->sll_protocol = skb->protocol; 2304 sll->sll_pkttype = skb->pkt_type; 2305 if (unlikely(po->origdev)) 2306 sll->sll_ifindex = orig_dev->ifindex; 2307 else 2308 sll->sll_ifindex = dev->ifindex; 2309 2310 smp_mb(); 2311 2312 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2313 if (po->tp_version <= TPACKET_V2) { 2314 u8 *start, *end; 2315 2316 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2317 macoff + snaplen); 2318 2319 for (start = h.raw; start < end; start += PAGE_SIZE) 2320 flush_dcache_page(pgv_to_page(start)); 2321 } 2322 smp_wmb(); 2323 #endif 2324 2325 if (po->tp_version <= TPACKET_V2) { 2326 __packet_set_status(po, h.raw, status); 2327 sk->sk_data_ready(sk); 2328 } else { 2329 prb_clear_blk_fill_status(&po->rx_ring); 2330 } 2331 2332 drop_n_restore: 2333 if (skb_head != skb->data && skb_shared(skb)) { 2334 skb->data = skb_head; 2335 skb->len = skb_len; 2336 } 2337 drop: 2338 if (!is_drop_n_account) 2339 consume_skb(skb); 2340 else 2341 kfree_skb(skb); 2342 return 0; 2343 2344 drop_n_account: 2345 is_drop_n_account = true; 2346 po->stats.stats1.tp_drops++; 2347 spin_unlock(&sk->sk_receive_queue.lock); 2348 2349 sk->sk_data_ready(sk); 2350 kfree_skb(copy_skb); 2351 goto drop_n_restore; 2352 } 2353 2354 static void tpacket_destruct_skb(struct sk_buff *skb) 2355 { 2356 struct packet_sock *po = pkt_sk(skb->sk); 2357 2358 if (likely(po->tx_ring.pg_vec)) { 2359 void *ph; 2360 __u32 ts; 2361 2362 ph = skb_shinfo(skb)->destructor_arg; 2363 packet_dec_pending(&po->tx_ring); 2364 2365 ts = __packet_set_timestamp(po, ph, skb); 2366 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2367 } 2368 2369 sock_wfree(skb); 2370 } 2371 2372 static void tpacket_set_protocol(const struct net_device *dev, 2373 struct sk_buff *skb) 2374 { 2375 if (dev->type == ARPHRD_ETHER) { 2376 skb_reset_mac_header(skb); 2377 skb->protocol = eth_hdr(skb)->h_proto; 2378 } 2379 } 2380 2381 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) 2382 { 2383 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2384 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2385 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > 2386 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) 2387 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), 2388 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2389 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); 2390 2391 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) 2392 return -EINVAL; 2393 2394 return 0; 2395 } 2396 2397 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, 2398 struct virtio_net_hdr *vnet_hdr) 2399 { 2400 if (*len < sizeof(*vnet_hdr)) 2401 return -EINVAL; 2402 *len -= sizeof(*vnet_hdr); 2403 2404 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) 2405 return -EFAULT; 2406 2407 return __packet_snd_vnet_parse(vnet_hdr, *len); 2408 } 2409 2410 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2411 void *frame, struct net_device *dev, void *data, int tp_len, 2412 __be16 proto, unsigned char *addr, int hlen, int copylen, 2413 const struct sockcm_cookie *sockc) 2414 { 2415 union tpacket_uhdr ph; 2416 int to_write, offset, len, nr_frags, len_max; 2417 struct socket *sock = po->sk.sk_socket; 2418 struct page *page; 2419 int err; 2420 2421 ph.raw = frame; 2422 2423 skb->protocol = proto; 2424 skb->dev = dev; 2425 skb->priority = po->sk.sk_priority; 2426 skb->mark = po->sk.sk_mark; 2427 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); 2428 skb_shinfo(skb)->destructor_arg = ph.raw; 2429 2430 skb_reserve(skb, hlen); 2431 skb_reset_network_header(skb); 2432 2433 to_write = tp_len; 2434 2435 if (sock->type == SOCK_DGRAM) { 2436 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2437 NULL, tp_len); 2438 if (unlikely(err < 0)) 2439 return -EINVAL; 2440 } else if (copylen) { 2441 int hdrlen = min_t(int, copylen, tp_len); 2442 2443 skb_push(skb, dev->hard_header_len); 2444 skb_put(skb, copylen - dev->hard_header_len); 2445 err = skb_store_bits(skb, 0, data, hdrlen); 2446 if (unlikely(err)) 2447 return err; 2448 if (!dev_validate_header(dev, skb->data, hdrlen)) 2449 return -EINVAL; 2450 if (!skb->protocol) 2451 tpacket_set_protocol(dev, skb); 2452 2453 data += hdrlen; 2454 to_write -= hdrlen; 2455 } 2456 2457 offset = offset_in_page(data); 2458 len_max = PAGE_SIZE - offset; 2459 len = ((to_write > len_max) ? len_max : to_write); 2460 2461 skb->data_len = to_write; 2462 skb->len += to_write; 2463 skb->truesize += to_write; 2464 atomic_add(to_write, &po->sk.sk_wmem_alloc); 2465 2466 while (likely(to_write)) { 2467 nr_frags = skb_shinfo(skb)->nr_frags; 2468 2469 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2470 pr_err("Packet exceed the number of skb frags(%lu)\n", 2471 MAX_SKB_FRAGS); 2472 return -EFAULT; 2473 } 2474 2475 page = pgv_to_page(data); 2476 data += len; 2477 flush_dcache_page(page); 2478 get_page(page); 2479 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2480 to_write -= len; 2481 offset = 0; 2482 len_max = PAGE_SIZE; 2483 len = ((to_write > len_max) ? len_max : to_write); 2484 } 2485 2486 skb_probe_transport_header(skb, 0); 2487 2488 return tp_len; 2489 } 2490 2491 static int tpacket_parse_header(struct packet_sock *po, void *frame, 2492 int size_max, void **data) 2493 { 2494 union tpacket_uhdr ph; 2495 int tp_len, off; 2496 2497 ph.raw = frame; 2498 2499 switch (po->tp_version) { 2500 case TPACKET_V2: 2501 tp_len = ph.h2->tp_len; 2502 break; 2503 default: 2504 tp_len = ph.h1->tp_len; 2505 break; 2506 } 2507 if (unlikely(tp_len > size_max)) { 2508 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2509 return -EMSGSIZE; 2510 } 2511 2512 if (unlikely(po->tp_tx_has_off)) { 2513 int off_min, off_max; 2514 2515 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2516 off_max = po->tx_ring.frame_size - tp_len; 2517 if (po->sk.sk_type == SOCK_DGRAM) { 2518 switch (po->tp_version) { 2519 case TPACKET_V2: 2520 off = ph.h2->tp_net; 2521 break; 2522 default: 2523 off = ph.h1->tp_net; 2524 break; 2525 } 2526 } else { 2527 switch (po->tp_version) { 2528 case TPACKET_V2: 2529 off = ph.h2->tp_mac; 2530 break; 2531 default: 2532 off = ph.h1->tp_mac; 2533 break; 2534 } 2535 } 2536 if (unlikely((off < off_min) || (off_max < off))) 2537 return -EINVAL; 2538 } else { 2539 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2540 } 2541 2542 *data = frame + off; 2543 return tp_len; 2544 } 2545 2546 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2547 { 2548 struct sk_buff *skb; 2549 struct net_device *dev; 2550 struct virtio_net_hdr *vnet_hdr = NULL; 2551 struct sockcm_cookie sockc; 2552 __be16 proto; 2553 int err, reserve = 0; 2554 void *ph; 2555 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2556 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2557 int tp_len, size_max; 2558 unsigned char *addr; 2559 void *data; 2560 int len_sum = 0; 2561 int status = TP_STATUS_AVAILABLE; 2562 int hlen, tlen, copylen = 0; 2563 2564 mutex_lock(&po->pg_vec_lock); 2565 2566 if (likely(saddr == NULL)) { 2567 dev = packet_cached_dev_get(po); 2568 proto = po->num; 2569 addr = NULL; 2570 } else { 2571 err = -EINVAL; 2572 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2573 goto out; 2574 if (msg->msg_namelen < (saddr->sll_halen 2575 + offsetof(struct sockaddr_ll, 2576 sll_addr))) 2577 goto out; 2578 proto = saddr->sll_protocol; 2579 addr = saddr->sll_addr; 2580 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2581 } 2582 2583 sockc.tsflags = po->sk.sk_tsflags; 2584 if (msg->msg_controllen) { 2585 err = sock_cmsg_send(&po->sk, msg, &sockc); 2586 if (unlikely(err)) 2587 goto out; 2588 } 2589 2590 err = -ENXIO; 2591 if (unlikely(dev == NULL)) 2592 goto out; 2593 err = -ENETDOWN; 2594 if (unlikely(!(dev->flags & IFF_UP))) 2595 goto out_put; 2596 2597 if (po->sk.sk_socket->type == SOCK_RAW) 2598 reserve = dev->hard_header_len; 2599 size_max = po->tx_ring.frame_size 2600 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2601 2602 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) 2603 size_max = dev->mtu + reserve + VLAN_HLEN; 2604 2605 do { 2606 ph = packet_current_frame(po, &po->tx_ring, 2607 TP_STATUS_SEND_REQUEST); 2608 if (unlikely(ph == NULL)) { 2609 if (need_wait && need_resched()) 2610 schedule(); 2611 continue; 2612 } 2613 2614 skb = NULL; 2615 tp_len = tpacket_parse_header(po, ph, size_max, &data); 2616 if (tp_len < 0) 2617 goto tpacket_error; 2618 2619 status = TP_STATUS_SEND_REQUEST; 2620 hlen = LL_RESERVED_SPACE(dev); 2621 tlen = dev->needed_tailroom; 2622 if (po->has_vnet_hdr) { 2623 vnet_hdr = data; 2624 data += sizeof(*vnet_hdr); 2625 tp_len -= sizeof(*vnet_hdr); 2626 if (tp_len < 0 || 2627 __packet_snd_vnet_parse(vnet_hdr, tp_len)) { 2628 tp_len = -EINVAL; 2629 goto tpacket_error; 2630 } 2631 copylen = __virtio16_to_cpu(vio_le(), 2632 vnet_hdr->hdr_len); 2633 } 2634 copylen = max_t(int, copylen, dev->hard_header_len); 2635 skb = sock_alloc_send_skb(&po->sk, 2636 hlen + tlen + sizeof(struct sockaddr_ll) + 2637 (copylen - dev->hard_header_len), 2638 !need_wait, &err); 2639 2640 if (unlikely(skb == NULL)) { 2641 /* we assume the socket was initially writeable ... */ 2642 if (likely(len_sum > 0)) 2643 err = len_sum; 2644 goto out_status; 2645 } 2646 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, 2647 addr, hlen, copylen, &sockc); 2648 if (likely(tp_len >= 0) && 2649 tp_len > dev->mtu + reserve && 2650 !po->has_vnet_hdr && 2651 !packet_extra_vlan_len_allowed(dev, skb)) 2652 tp_len = -EMSGSIZE; 2653 2654 if (unlikely(tp_len < 0)) { 2655 tpacket_error: 2656 if (po->tp_loss) { 2657 __packet_set_status(po, ph, 2658 TP_STATUS_AVAILABLE); 2659 packet_increment_head(&po->tx_ring); 2660 kfree_skb(skb); 2661 continue; 2662 } else { 2663 status = TP_STATUS_WRONG_FORMAT; 2664 err = tp_len; 2665 goto out_status; 2666 } 2667 } 2668 2669 if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, 2670 vio_le())) { 2671 tp_len = -EINVAL; 2672 goto tpacket_error; 2673 } 2674 2675 packet_pick_tx_queue(dev, skb); 2676 2677 skb->destructor = tpacket_destruct_skb; 2678 __packet_set_status(po, ph, TP_STATUS_SENDING); 2679 packet_inc_pending(&po->tx_ring); 2680 2681 status = TP_STATUS_SEND_REQUEST; 2682 err = po->xmit(skb); 2683 if (unlikely(err > 0)) { 2684 err = net_xmit_errno(err); 2685 if (err && __packet_get_status(po, ph) == 2686 TP_STATUS_AVAILABLE) { 2687 /* skb was destructed already */ 2688 skb = NULL; 2689 goto out_status; 2690 } 2691 /* 2692 * skb was dropped but not destructed yet; 2693 * let's treat it like congestion or err < 0 2694 */ 2695 err = 0; 2696 } 2697 packet_increment_head(&po->tx_ring); 2698 len_sum += tp_len; 2699 } while (likely((ph != NULL) || 2700 /* Note: packet_read_pending() might be slow if we have 2701 * to call it as it's per_cpu variable, but in fast-path 2702 * we already short-circuit the loop with the first 2703 * condition, and luckily don't have to go that path 2704 * anyway. 2705 */ 2706 (need_wait && packet_read_pending(&po->tx_ring)))); 2707 2708 err = len_sum; 2709 goto out_put; 2710 2711 out_status: 2712 __packet_set_status(po, ph, status); 2713 kfree_skb(skb); 2714 out_put: 2715 dev_put(dev); 2716 out: 2717 mutex_unlock(&po->pg_vec_lock); 2718 return err; 2719 } 2720 2721 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2722 size_t reserve, size_t len, 2723 size_t linear, int noblock, 2724 int *err) 2725 { 2726 struct sk_buff *skb; 2727 2728 /* Under a page? Don't bother with paged skb. */ 2729 if (prepad + len < PAGE_SIZE || !linear) 2730 linear = len; 2731 2732 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2733 err, 0); 2734 if (!skb) 2735 return NULL; 2736 2737 skb_reserve(skb, reserve); 2738 skb_put(skb, linear); 2739 skb->data_len = len - linear; 2740 skb->len += len - linear; 2741 2742 return skb; 2743 } 2744 2745 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2746 { 2747 struct sock *sk = sock->sk; 2748 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2749 struct sk_buff *skb; 2750 struct net_device *dev; 2751 __be16 proto; 2752 unsigned char *addr; 2753 int err, reserve = 0; 2754 struct sockcm_cookie sockc; 2755 struct virtio_net_hdr vnet_hdr = { 0 }; 2756 int offset = 0; 2757 struct packet_sock *po = pkt_sk(sk); 2758 int hlen, tlen, linear; 2759 int extra_len = 0; 2760 2761 /* 2762 * Get and verify the address. 2763 */ 2764 2765 if (likely(saddr == NULL)) { 2766 dev = packet_cached_dev_get(po); 2767 proto = po->num; 2768 addr = NULL; 2769 } else { 2770 err = -EINVAL; 2771 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2772 goto out; 2773 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2774 goto out; 2775 proto = saddr->sll_protocol; 2776 addr = saddr->sll_addr; 2777 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2778 } 2779 2780 err = -ENXIO; 2781 if (unlikely(dev == NULL)) 2782 goto out_unlock; 2783 err = -ENETDOWN; 2784 if (unlikely(!(dev->flags & IFF_UP))) 2785 goto out_unlock; 2786 2787 sockc.tsflags = sk->sk_tsflags; 2788 sockc.mark = sk->sk_mark; 2789 if (msg->msg_controllen) { 2790 err = sock_cmsg_send(sk, msg, &sockc); 2791 if (unlikely(err)) 2792 goto out_unlock; 2793 } 2794 2795 if (sock->type == SOCK_RAW) 2796 reserve = dev->hard_header_len; 2797 if (po->has_vnet_hdr) { 2798 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); 2799 if (err) 2800 goto out_unlock; 2801 } 2802 2803 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 2804 if (!netif_supports_nofcs(dev)) { 2805 err = -EPROTONOSUPPORT; 2806 goto out_unlock; 2807 } 2808 extra_len = 4; /* We're doing our own CRC */ 2809 } 2810 2811 err = -EMSGSIZE; 2812 if (!vnet_hdr.gso_type && 2813 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 2814 goto out_unlock; 2815 2816 err = -ENOBUFS; 2817 hlen = LL_RESERVED_SPACE(dev); 2818 tlen = dev->needed_tailroom; 2819 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 2820 linear = max(linear, min_t(int, len, dev->hard_header_len)); 2821 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 2822 msg->msg_flags & MSG_DONTWAIT, &err); 2823 if (skb == NULL) 2824 goto out_unlock; 2825 2826 skb_set_network_header(skb, reserve); 2827 2828 err = -EINVAL; 2829 if (sock->type == SOCK_DGRAM) { 2830 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2831 if (unlikely(offset < 0)) 2832 goto out_free; 2833 } 2834 2835 /* Returns -EFAULT on error */ 2836 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 2837 if (err) 2838 goto out_free; 2839 2840 if (sock->type == SOCK_RAW && 2841 !dev_validate_header(dev, skb->data, len)) { 2842 err = -EINVAL; 2843 goto out_free; 2844 } 2845 2846 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); 2847 2848 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 2849 !packet_extra_vlan_len_allowed(dev, skb)) { 2850 err = -EMSGSIZE; 2851 goto out_free; 2852 } 2853 2854 skb->protocol = proto; 2855 skb->dev = dev; 2856 skb->priority = sk->sk_priority; 2857 skb->mark = sockc.mark; 2858 2859 packet_pick_tx_queue(dev, skb); 2860 2861 if (po->has_vnet_hdr) { 2862 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 2863 if (err) 2864 goto out_free; 2865 len += sizeof(vnet_hdr); 2866 } 2867 2868 skb_probe_transport_header(skb, reserve); 2869 2870 if (unlikely(extra_len == 4)) 2871 skb->no_fcs = 1; 2872 2873 err = po->xmit(skb); 2874 if (err > 0 && (err = net_xmit_errno(err)) != 0) 2875 goto out_unlock; 2876 2877 dev_put(dev); 2878 2879 return len; 2880 2881 out_free: 2882 kfree_skb(skb); 2883 out_unlock: 2884 if (dev) 2885 dev_put(dev); 2886 out: 2887 return err; 2888 } 2889 2890 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 2891 { 2892 struct sock *sk = sock->sk; 2893 struct packet_sock *po = pkt_sk(sk); 2894 2895 if (po->tx_ring.pg_vec) 2896 return tpacket_snd(po, msg); 2897 else 2898 return packet_snd(sock, msg, len); 2899 } 2900 2901 /* 2902 * Close a PACKET socket. This is fairly simple. We immediately go 2903 * to 'closed' state and remove our protocol entry in the device list. 2904 */ 2905 2906 static int packet_release(struct socket *sock) 2907 { 2908 struct sock *sk = sock->sk; 2909 struct packet_sock *po; 2910 struct net *net; 2911 union tpacket_req_u req_u; 2912 2913 if (!sk) 2914 return 0; 2915 2916 net = sock_net(sk); 2917 po = pkt_sk(sk); 2918 2919 mutex_lock(&net->packet.sklist_lock); 2920 sk_del_node_init_rcu(sk); 2921 mutex_unlock(&net->packet.sklist_lock); 2922 2923 preempt_disable(); 2924 sock_prot_inuse_add(net, sk->sk_prot, -1); 2925 preempt_enable(); 2926 2927 spin_lock(&po->bind_lock); 2928 unregister_prot_hook(sk, false); 2929 packet_cached_dev_reset(po); 2930 2931 if (po->prot_hook.dev) { 2932 dev_put(po->prot_hook.dev); 2933 po->prot_hook.dev = NULL; 2934 } 2935 spin_unlock(&po->bind_lock); 2936 2937 packet_flush_mclist(sk); 2938 2939 if (po->rx_ring.pg_vec) { 2940 memset(&req_u, 0, sizeof(req_u)); 2941 packet_set_ring(sk, &req_u, 1, 0); 2942 } 2943 2944 if (po->tx_ring.pg_vec) { 2945 memset(&req_u, 0, sizeof(req_u)); 2946 packet_set_ring(sk, &req_u, 1, 1); 2947 } 2948 2949 fanout_release(sk); 2950 2951 synchronize_net(); 2952 /* 2953 * Now the socket is dead. No more input will appear. 2954 */ 2955 sock_orphan(sk); 2956 sock->sk = NULL; 2957 2958 /* Purge queues */ 2959 2960 skb_queue_purge(&sk->sk_receive_queue); 2961 packet_free_pending(po); 2962 sk_refcnt_debug_release(sk); 2963 2964 sock_put(sk); 2965 return 0; 2966 } 2967 2968 /* 2969 * Attach a packet hook. 2970 */ 2971 2972 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 2973 __be16 proto) 2974 { 2975 struct packet_sock *po = pkt_sk(sk); 2976 struct net_device *dev_curr; 2977 __be16 proto_curr; 2978 bool need_rehook; 2979 struct net_device *dev = NULL; 2980 int ret = 0; 2981 bool unlisted = false; 2982 2983 if (po->fanout) 2984 return -EINVAL; 2985 2986 lock_sock(sk); 2987 spin_lock(&po->bind_lock); 2988 rcu_read_lock(); 2989 2990 if (name) { 2991 dev = dev_get_by_name_rcu(sock_net(sk), name); 2992 if (!dev) { 2993 ret = -ENODEV; 2994 goto out_unlock; 2995 } 2996 } else if (ifindex) { 2997 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 2998 if (!dev) { 2999 ret = -ENODEV; 3000 goto out_unlock; 3001 } 3002 } 3003 3004 if (dev) 3005 dev_hold(dev); 3006 3007 proto_curr = po->prot_hook.type; 3008 dev_curr = po->prot_hook.dev; 3009 3010 need_rehook = proto_curr != proto || dev_curr != dev; 3011 3012 if (need_rehook) { 3013 if (po->running) { 3014 rcu_read_unlock(); 3015 __unregister_prot_hook(sk, true); 3016 rcu_read_lock(); 3017 dev_curr = po->prot_hook.dev; 3018 if (dev) 3019 unlisted = !dev_get_by_index_rcu(sock_net(sk), 3020 dev->ifindex); 3021 } 3022 3023 po->num = proto; 3024 po->prot_hook.type = proto; 3025 3026 if (unlikely(unlisted)) { 3027 dev_put(dev); 3028 po->prot_hook.dev = NULL; 3029 po->ifindex = -1; 3030 packet_cached_dev_reset(po); 3031 } else { 3032 po->prot_hook.dev = dev; 3033 po->ifindex = dev ? dev->ifindex : 0; 3034 packet_cached_dev_assign(po, dev); 3035 } 3036 } 3037 if (dev_curr) 3038 dev_put(dev_curr); 3039 3040 if (proto == 0 || !need_rehook) 3041 goto out_unlock; 3042 3043 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 3044 register_prot_hook(sk); 3045 } else { 3046 sk->sk_err = ENETDOWN; 3047 if (!sock_flag(sk, SOCK_DEAD)) 3048 sk->sk_error_report(sk); 3049 } 3050 3051 out_unlock: 3052 rcu_read_unlock(); 3053 spin_unlock(&po->bind_lock); 3054 release_sock(sk); 3055 return ret; 3056 } 3057 3058 /* 3059 * Bind a packet socket to a device 3060 */ 3061 3062 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 3063 int addr_len) 3064 { 3065 struct sock *sk = sock->sk; 3066 char name[15]; 3067 3068 /* 3069 * Check legality 3070 */ 3071 3072 if (addr_len != sizeof(struct sockaddr)) 3073 return -EINVAL; 3074 strlcpy(name, uaddr->sa_data, sizeof(name)); 3075 3076 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3077 } 3078 3079 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3080 { 3081 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3082 struct sock *sk = sock->sk; 3083 3084 /* 3085 * Check legality 3086 */ 3087 3088 if (addr_len < sizeof(struct sockaddr_ll)) 3089 return -EINVAL; 3090 if (sll->sll_family != AF_PACKET) 3091 return -EINVAL; 3092 3093 return packet_do_bind(sk, NULL, sll->sll_ifindex, 3094 sll->sll_protocol ? : pkt_sk(sk)->num); 3095 } 3096 3097 static struct proto packet_proto = { 3098 .name = "PACKET", 3099 .owner = THIS_MODULE, 3100 .obj_size = sizeof(struct packet_sock), 3101 }; 3102 3103 /* 3104 * Create a packet of type SOCK_PACKET. 3105 */ 3106 3107 static int packet_create(struct net *net, struct socket *sock, int protocol, 3108 int kern) 3109 { 3110 struct sock *sk; 3111 struct packet_sock *po; 3112 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 3113 int err; 3114 3115 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 3116 return -EPERM; 3117 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 3118 sock->type != SOCK_PACKET) 3119 return -ESOCKTNOSUPPORT; 3120 3121 sock->state = SS_UNCONNECTED; 3122 3123 err = -ENOBUFS; 3124 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); 3125 if (sk == NULL) 3126 goto out; 3127 3128 sock->ops = &packet_ops; 3129 if (sock->type == SOCK_PACKET) 3130 sock->ops = &packet_ops_spkt; 3131 3132 sock_init_data(sock, sk); 3133 3134 po = pkt_sk(sk); 3135 sk->sk_family = PF_PACKET; 3136 po->num = proto; 3137 po->xmit = dev_queue_xmit; 3138 3139 err = packet_alloc_pending(po); 3140 if (err) 3141 goto out2; 3142 3143 packet_cached_dev_reset(po); 3144 3145 sk->sk_destruct = packet_sock_destruct; 3146 sk_refcnt_debug_inc(sk); 3147 3148 /* 3149 * Attach a protocol block 3150 */ 3151 3152 spin_lock_init(&po->bind_lock); 3153 mutex_init(&po->pg_vec_lock); 3154 po->rollover = NULL; 3155 po->prot_hook.func = packet_rcv; 3156 3157 if (sock->type == SOCK_PACKET) 3158 po->prot_hook.func = packet_rcv_spkt; 3159 3160 po->prot_hook.af_packet_priv = sk; 3161 3162 if (proto) { 3163 po->prot_hook.type = proto; 3164 register_prot_hook(sk); 3165 } 3166 3167 mutex_lock(&net->packet.sklist_lock); 3168 sk_add_node_rcu(sk, &net->packet.sklist); 3169 mutex_unlock(&net->packet.sklist_lock); 3170 3171 preempt_disable(); 3172 sock_prot_inuse_add(net, &packet_proto, 1); 3173 preempt_enable(); 3174 3175 return 0; 3176 out2: 3177 sk_free(sk); 3178 out: 3179 return err; 3180 } 3181 3182 /* 3183 * Pull a packet from our receive queue and hand it to the user. 3184 * If necessary we block. 3185 */ 3186 3187 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 3188 int flags) 3189 { 3190 struct sock *sk = sock->sk; 3191 struct sk_buff *skb; 3192 int copied, err; 3193 int vnet_hdr_len = 0; 3194 unsigned int origlen = 0; 3195 3196 err = -EINVAL; 3197 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 3198 goto out; 3199 3200 #if 0 3201 /* What error should we return now? EUNATTACH? */ 3202 if (pkt_sk(sk)->ifindex < 0) 3203 return -ENODEV; 3204 #endif 3205 3206 if (flags & MSG_ERRQUEUE) { 3207 err = sock_recv_errqueue(sk, msg, len, 3208 SOL_PACKET, PACKET_TX_TIMESTAMP); 3209 goto out; 3210 } 3211 3212 /* 3213 * Call the generic datagram receiver. This handles all sorts 3214 * of horrible races and re-entrancy so we can forget about it 3215 * in the protocol layers. 3216 * 3217 * Now it will return ENETDOWN, if device have just gone down, 3218 * but then it will block. 3219 */ 3220 3221 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); 3222 3223 /* 3224 * An error occurred so return it. Because skb_recv_datagram() 3225 * handles the blocking we don't see and worry about blocking 3226 * retries. 3227 */ 3228 3229 if (skb == NULL) 3230 goto out; 3231 3232 if (pkt_sk(sk)->pressure) 3233 packet_rcv_has_room(pkt_sk(sk), NULL); 3234 3235 if (pkt_sk(sk)->has_vnet_hdr) { 3236 err = packet_rcv_vnet(msg, skb, &len); 3237 if (err) 3238 goto out_free; 3239 vnet_hdr_len = sizeof(struct virtio_net_hdr); 3240 } 3241 3242 /* You lose any data beyond the buffer you gave. If it worries 3243 * a user program they can ask the device for its MTU 3244 * anyway. 3245 */ 3246 copied = skb->len; 3247 if (copied > len) { 3248 copied = len; 3249 msg->msg_flags |= MSG_TRUNC; 3250 } 3251 3252 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3253 if (err) 3254 goto out_free; 3255 3256 if (sock->type != SOCK_PACKET) { 3257 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3258 3259 /* Original length was stored in sockaddr_ll fields */ 3260 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3261 sll->sll_family = AF_PACKET; 3262 sll->sll_protocol = skb->protocol; 3263 } 3264 3265 sock_recv_ts_and_drops(msg, sk, skb); 3266 3267 if (msg->msg_name) { 3268 /* If the address length field is there to be filled 3269 * in, we fill it in now. 3270 */ 3271 if (sock->type == SOCK_PACKET) { 3272 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3273 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3274 } else { 3275 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3276 3277 msg->msg_namelen = sll->sll_halen + 3278 offsetof(struct sockaddr_ll, sll_addr); 3279 } 3280 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, 3281 msg->msg_namelen); 3282 } 3283 3284 if (pkt_sk(sk)->auxdata) { 3285 struct tpacket_auxdata aux; 3286 3287 aux.tp_status = TP_STATUS_USER; 3288 if (skb->ip_summed == CHECKSUM_PARTIAL) 3289 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3290 else if (skb->pkt_type != PACKET_OUTGOING && 3291 (skb->ip_summed == CHECKSUM_COMPLETE || 3292 skb_csum_unnecessary(skb))) 3293 aux.tp_status |= TP_STATUS_CSUM_VALID; 3294 3295 aux.tp_len = origlen; 3296 aux.tp_snaplen = skb->len; 3297 aux.tp_mac = 0; 3298 aux.tp_net = skb_network_offset(skb); 3299 if (skb_vlan_tag_present(skb)) { 3300 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3301 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3302 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3303 } else { 3304 aux.tp_vlan_tci = 0; 3305 aux.tp_vlan_tpid = 0; 3306 } 3307 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3308 } 3309 3310 /* 3311 * Free or return the buffer as appropriate. Again this 3312 * hides all the races and re-entrancy issues from us. 3313 */ 3314 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3315 3316 out_free: 3317 skb_free_datagram(sk, skb); 3318 out: 3319 return err; 3320 } 3321 3322 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3323 int *uaddr_len, int peer) 3324 { 3325 struct net_device *dev; 3326 struct sock *sk = sock->sk; 3327 3328 if (peer) 3329 return -EOPNOTSUPP; 3330 3331 uaddr->sa_family = AF_PACKET; 3332 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); 3333 rcu_read_lock(); 3334 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 3335 if (dev) 3336 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); 3337 rcu_read_unlock(); 3338 *uaddr_len = sizeof(*uaddr); 3339 3340 return 0; 3341 } 3342 3343 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3344 int *uaddr_len, int peer) 3345 { 3346 struct net_device *dev; 3347 struct sock *sk = sock->sk; 3348 struct packet_sock *po = pkt_sk(sk); 3349 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3350 3351 if (peer) 3352 return -EOPNOTSUPP; 3353 3354 sll->sll_family = AF_PACKET; 3355 sll->sll_ifindex = po->ifindex; 3356 sll->sll_protocol = po->num; 3357 sll->sll_pkttype = 0; 3358 rcu_read_lock(); 3359 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 3360 if (dev) { 3361 sll->sll_hatype = dev->type; 3362 sll->sll_halen = dev->addr_len; 3363 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 3364 } else { 3365 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3366 sll->sll_halen = 0; 3367 } 3368 rcu_read_unlock(); 3369 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3370 3371 return 0; 3372 } 3373 3374 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3375 int what) 3376 { 3377 switch (i->type) { 3378 case PACKET_MR_MULTICAST: 3379 if (i->alen != dev->addr_len) 3380 return -EINVAL; 3381 if (what > 0) 3382 return dev_mc_add(dev, i->addr); 3383 else 3384 return dev_mc_del(dev, i->addr); 3385 break; 3386 case PACKET_MR_PROMISC: 3387 return dev_set_promiscuity(dev, what); 3388 case PACKET_MR_ALLMULTI: 3389 return dev_set_allmulti(dev, what); 3390 case PACKET_MR_UNICAST: 3391 if (i->alen != dev->addr_len) 3392 return -EINVAL; 3393 if (what > 0) 3394 return dev_uc_add(dev, i->addr); 3395 else 3396 return dev_uc_del(dev, i->addr); 3397 break; 3398 default: 3399 break; 3400 } 3401 return 0; 3402 } 3403 3404 static void packet_dev_mclist_delete(struct net_device *dev, 3405 struct packet_mclist **mlp) 3406 { 3407 struct packet_mclist *ml; 3408 3409 while ((ml = *mlp) != NULL) { 3410 if (ml->ifindex == dev->ifindex) { 3411 packet_dev_mc(dev, ml, -1); 3412 *mlp = ml->next; 3413 kfree(ml); 3414 } else 3415 mlp = &ml->next; 3416 } 3417 } 3418 3419 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3420 { 3421 struct packet_sock *po = pkt_sk(sk); 3422 struct packet_mclist *ml, *i; 3423 struct net_device *dev; 3424 int err; 3425 3426 rtnl_lock(); 3427 3428 err = -ENODEV; 3429 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3430 if (!dev) 3431 goto done; 3432 3433 err = -EINVAL; 3434 if (mreq->mr_alen > dev->addr_len) 3435 goto done; 3436 3437 err = -ENOBUFS; 3438 i = kmalloc(sizeof(*i), GFP_KERNEL); 3439 if (i == NULL) 3440 goto done; 3441 3442 err = 0; 3443 for (ml = po->mclist; ml; ml = ml->next) { 3444 if (ml->ifindex == mreq->mr_ifindex && 3445 ml->type == mreq->mr_type && 3446 ml->alen == mreq->mr_alen && 3447 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3448 ml->count++; 3449 /* Free the new element ... */ 3450 kfree(i); 3451 goto done; 3452 } 3453 } 3454 3455 i->type = mreq->mr_type; 3456 i->ifindex = mreq->mr_ifindex; 3457 i->alen = mreq->mr_alen; 3458 memcpy(i->addr, mreq->mr_address, i->alen); 3459 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3460 i->count = 1; 3461 i->next = po->mclist; 3462 po->mclist = i; 3463 err = packet_dev_mc(dev, i, 1); 3464 if (err) { 3465 po->mclist = i->next; 3466 kfree(i); 3467 } 3468 3469 done: 3470 rtnl_unlock(); 3471 return err; 3472 } 3473 3474 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3475 { 3476 struct packet_mclist *ml, **mlp; 3477 3478 rtnl_lock(); 3479 3480 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3481 if (ml->ifindex == mreq->mr_ifindex && 3482 ml->type == mreq->mr_type && 3483 ml->alen == mreq->mr_alen && 3484 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3485 if (--ml->count == 0) { 3486 struct net_device *dev; 3487 *mlp = ml->next; 3488 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3489 if (dev) 3490 packet_dev_mc(dev, ml, -1); 3491 kfree(ml); 3492 } 3493 break; 3494 } 3495 } 3496 rtnl_unlock(); 3497 return 0; 3498 } 3499 3500 static void packet_flush_mclist(struct sock *sk) 3501 { 3502 struct packet_sock *po = pkt_sk(sk); 3503 struct packet_mclist *ml; 3504 3505 if (!po->mclist) 3506 return; 3507 3508 rtnl_lock(); 3509 while ((ml = po->mclist) != NULL) { 3510 struct net_device *dev; 3511 3512 po->mclist = ml->next; 3513 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3514 if (dev != NULL) 3515 packet_dev_mc(dev, ml, -1); 3516 kfree(ml); 3517 } 3518 rtnl_unlock(); 3519 } 3520 3521 static int 3522 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 3523 { 3524 struct sock *sk = sock->sk; 3525 struct packet_sock *po = pkt_sk(sk); 3526 int ret; 3527 3528 if (level != SOL_PACKET) 3529 return -ENOPROTOOPT; 3530 3531 switch (optname) { 3532 case PACKET_ADD_MEMBERSHIP: 3533 case PACKET_DROP_MEMBERSHIP: 3534 { 3535 struct packet_mreq_max mreq; 3536 int len = optlen; 3537 memset(&mreq, 0, sizeof(mreq)); 3538 if (len < sizeof(struct packet_mreq)) 3539 return -EINVAL; 3540 if (len > sizeof(mreq)) 3541 len = sizeof(mreq); 3542 if (copy_from_user(&mreq, optval, len)) 3543 return -EFAULT; 3544 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3545 return -EINVAL; 3546 if (optname == PACKET_ADD_MEMBERSHIP) 3547 ret = packet_mc_add(sk, &mreq); 3548 else 3549 ret = packet_mc_drop(sk, &mreq); 3550 return ret; 3551 } 3552 3553 case PACKET_RX_RING: 3554 case PACKET_TX_RING: 3555 { 3556 union tpacket_req_u req_u; 3557 int len; 3558 3559 switch (po->tp_version) { 3560 case TPACKET_V1: 3561 case TPACKET_V2: 3562 len = sizeof(req_u.req); 3563 break; 3564 case TPACKET_V3: 3565 default: 3566 len = sizeof(req_u.req3); 3567 break; 3568 } 3569 if (optlen < len) 3570 return -EINVAL; 3571 if (copy_from_user(&req_u.req, optval, len)) 3572 return -EFAULT; 3573 return packet_set_ring(sk, &req_u, 0, 3574 optname == PACKET_TX_RING); 3575 } 3576 case PACKET_COPY_THRESH: 3577 { 3578 int val; 3579 3580 if (optlen != sizeof(val)) 3581 return -EINVAL; 3582 if (copy_from_user(&val, optval, sizeof(val))) 3583 return -EFAULT; 3584 3585 pkt_sk(sk)->copy_thresh = val; 3586 return 0; 3587 } 3588 case PACKET_VERSION: 3589 { 3590 int val; 3591 3592 if (optlen != sizeof(val)) 3593 return -EINVAL; 3594 if (copy_from_user(&val, optval, sizeof(val))) 3595 return -EFAULT; 3596 switch (val) { 3597 case TPACKET_V1: 3598 case TPACKET_V2: 3599 case TPACKET_V3: 3600 break; 3601 default: 3602 return -EINVAL; 3603 } 3604 lock_sock(sk); 3605 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3606 ret = -EBUSY; 3607 } else { 3608 po->tp_version = val; 3609 ret = 0; 3610 } 3611 release_sock(sk); 3612 return ret; 3613 } 3614 case PACKET_RESERVE: 3615 { 3616 unsigned int val; 3617 3618 if (optlen != sizeof(val)) 3619 return -EINVAL; 3620 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3621 return -EBUSY; 3622 if (copy_from_user(&val, optval, sizeof(val))) 3623 return -EFAULT; 3624 po->tp_reserve = val; 3625 return 0; 3626 } 3627 case PACKET_LOSS: 3628 { 3629 unsigned int val; 3630 3631 if (optlen != sizeof(val)) 3632 return -EINVAL; 3633 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3634 return -EBUSY; 3635 if (copy_from_user(&val, optval, sizeof(val))) 3636 return -EFAULT; 3637 po->tp_loss = !!val; 3638 return 0; 3639 } 3640 case PACKET_AUXDATA: 3641 { 3642 int val; 3643 3644 if (optlen < sizeof(val)) 3645 return -EINVAL; 3646 if (copy_from_user(&val, optval, sizeof(val))) 3647 return -EFAULT; 3648 3649 po->auxdata = !!val; 3650 return 0; 3651 } 3652 case PACKET_ORIGDEV: 3653 { 3654 int val; 3655 3656 if (optlen < sizeof(val)) 3657 return -EINVAL; 3658 if (copy_from_user(&val, optval, sizeof(val))) 3659 return -EFAULT; 3660 3661 po->origdev = !!val; 3662 return 0; 3663 } 3664 case PACKET_VNET_HDR: 3665 { 3666 int val; 3667 3668 if (sock->type != SOCK_RAW) 3669 return -EINVAL; 3670 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3671 return -EBUSY; 3672 if (optlen < sizeof(val)) 3673 return -EINVAL; 3674 if (copy_from_user(&val, optval, sizeof(val))) 3675 return -EFAULT; 3676 3677 po->has_vnet_hdr = !!val; 3678 return 0; 3679 } 3680 case PACKET_TIMESTAMP: 3681 { 3682 int val; 3683 3684 if (optlen != sizeof(val)) 3685 return -EINVAL; 3686 if (copy_from_user(&val, optval, sizeof(val))) 3687 return -EFAULT; 3688 3689 po->tp_tstamp = val; 3690 return 0; 3691 } 3692 case PACKET_FANOUT: 3693 { 3694 int val; 3695 3696 if (optlen != sizeof(val)) 3697 return -EINVAL; 3698 if (copy_from_user(&val, optval, sizeof(val))) 3699 return -EFAULT; 3700 3701 return fanout_add(sk, val & 0xffff, val >> 16); 3702 } 3703 case PACKET_FANOUT_DATA: 3704 { 3705 if (!po->fanout) 3706 return -EINVAL; 3707 3708 return fanout_set_data(po, optval, optlen); 3709 } 3710 case PACKET_TX_HAS_OFF: 3711 { 3712 unsigned int val; 3713 3714 if (optlen != sizeof(val)) 3715 return -EINVAL; 3716 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3717 return -EBUSY; 3718 if (copy_from_user(&val, optval, sizeof(val))) 3719 return -EFAULT; 3720 po->tp_tx_has_off = !!val; 3721 return 0; 3722 } 3723 case PACKET_QDISC_BYPASS: 3724 { 3725 int val; 3726 3727 if (optlen != sizeof(val)) 3728 return -EINVAL; 3729 if (copy_from_user(&val, optval, sizeof(val))) 3730 return -EFAULT; 3731 3732 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; 3733 return 0; 3734 } 3735 default: 3736 return -ENOPROTOOPT; 3737 } 3738 } 3739 3740 static int packet_getsockopt(struct socket *sock, int level, int optname, 3741 char __user *optval, int __user *optlen) 3742 { 3743 int len; 3744 int val, lv = sizeof(val); 3745 struct sock *sk = sock->sk; 3746 struct packet_sock *po = pkt_sk(sk); 3747 void *data = &val; 3748 union tpacket_stats_u st; 3749 struct tpacket_rollover_stats rstats; 3750 3751 if (level != SOL_PACKET) 3752 return -ENOPROTOOPT; 3753 3754 if (get_user(len, optlen)) 3755 return -EFAULT; 3756 3757 if (len < 0) 3758 return -EINVAL; 3759 3760 switch (optname) { 3761 case PACKET_STATISTICS: 3762 spin_lock_bh(&sk->sk_receive_queue.lock); 3763 memcpy(&st, &po->stats, sizeof(st)); 3764 memset(&po->stats, 0, sizeof(po->stats)); 3765 spin_unlock_bh(&sk->sk_receive_queue.lock); 3766 3767 if (po->tp_version == TPACKET_V3) { 3768 lv = sizeof(struct tpacket_stats_v3); 3769 st.stats3.tp_packets += st.stats3.tp_drops; 3770 data = &st.stats3; 3771 } else { 3772 lv = sizeof(struct tpacket_stats); 3773 st.stats1.tp_packets += st.stats1.tp_drops; 3774 data = &st.stats1; 3775 } 3776 3777 break; 3778 case PACKET_AUXDATA: 3779 val = po->auxdata; 3780 break; 3781 case PACKET_ORIGDEV: 3782 val = po->origdev; 3783 break; 3784 case PACKET_VNET_HDR: 3785 val = po->has_vnet_hdr; 3786 break; 3787 case PACKET_VERSION: 3788 val = po->tp_version; 3789 break; 3790 case PACKET_HDRLEN: 3791 if (len > sizeof(int)) 3792 len = sizeof(int); 3793 if (copy_from_user(&val, optval, len)) 3794 return -EFAULT; 3795 switch (val) { 3796 case TPACKET_V1: 3797 val = sizeof(struct tpacket_hdr); 3798 break; 3799 case TPACKET_V2: 3800 val = sizeof(struct tpacket2_hdr); 3801 break; 3802 case TPACKET_V3: 3803 val = sizeof(struct tpacket3_hdr); 3804 break; 3805 default: 3806 return -EINVAL; 3807 } 3808 break; 3809 case PACKET_RESERVE: 3810 val = po->tp_reserve; 3811 break; 3812 case PACKET_LOSS: 3813 val = po->tp_loss; 3814 break; 3815 case PACKET_TIMESTAMP: 3816 val = po->tp_tstamp; 3817 break; 3818 case PACKET_FANOUT: 3819 val = (po->fanout ? 3820 ((u32)po->fanout->id | 3821 ((u32)po->fanout->type << 16) | 3822 ((u32)po->fanout->flags << 24)) : 3823 0); 3824 break; 3825 case PACKET_ROLLOVER_STATS: 3826 if (!po->rollover) 3827 return -EINVAL; 3828 rstats.tp_all = atomic_long_read(&po->rollover->num); 3829 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 3830 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 3831 data = &rstats; 3832 lv = sizeof(rstats); 3833 break; 3834 case PACKET_TX_HAS_OFF: 3835 val = po->tp_tx_has_off; 3836 break; 3837 case PACKET_QDISC_BYPASS: 3838 val = packet_use_direct_xmit(po); 3839 break; 3840 default: 3841 return -ENOPROTOOPT; 3842 } 3843 3844 if (len > lv) 3845 len = lv; 3846 if (put_user(len, optlen)) 3847 return -EFAULT; 3848 if (copy_to_user(optval, data, len)) 3849 return -EFAULT; 3850 return 0; 3851 } 3852 3853 3854 #ifdef CONFIG_COMPAT 3855 static int compat_packet_setsockopt(struct socket *sock, int level, int optname, 3856 char __user *optval, unsigned int optlen) 3857 { 3858 struct packet_sock *po = pkt_sk(sock->sk); 3859 3860 if (level != SOL_PACKET) 3861 return -ENOPROTOOPT; 3862 3863 if (optname == PACKET_FANOUT_DATA && 3864 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { 3865 optval = (char __user *)get_compat_bpf_fprog(optval); 3866 if (!optval) 3867 return -EFAULT; 3868 optlen = sizeof(struct sock_fprog); 3869 } 3870 3871 return packet_setsockopt(sock, level, optname, optval, optlen); 3872 } 3873 #endif 3874 3875 static int packet_notifier(struct notifier_block *this, 3876 unsigned long msg, void *ptr) 3877 { 3878 struct sock *sk; 3879 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3880 struct net *net = dev_net(dev); 3881 3882 rcu_read_lock(); 3883 sk_for_each_rcu(sk, &net->packet.sklist) { 3884 struct packet_sock *po = pkt_sk(sk); 3885 3886 switch (msg) { 3887 case NETDEV_UNREGISTER: 3888 if (po->mclist) 3889 packet_dev_mclist_delete(dev, &po->mclist); 3890 /* fallthrough */ 3891 3892 case NETDEV_DOWN: 3893 if (dev->ifindex == po->ifindex) { 3894 spin_lock(&po->bind_lock); 3895 if (po->running) { 3896 __unregister_prot_hook(sk, false); 3897 sk->sk_err = ENETDOWN; 3898 if (!sock_flag(sk, SOCK_DEAD)) 3899 sk->sk_error_report(sk); 3900 } 3901 if (msg == NETDEV_UNREGISTER) { 3902 packet_cached_dev_reset(po); 3903 fanout_release(sk); 3904 po->ifindex = -1; 3905 if (po->prot_hook.dev) 3906 dev_put(po->prot_hook.dev); 3907 po->prot_hook.dev = NULL; 3908 } 3909 spin_unlock(&po->bind_lock); 3910 } 3911 break; 3912 case NETDEV_UP: 3913 if (dev->ifindex == po->ifindex) { 3914 spin_lock(&po->bind_lock); 3915 if (po->num) 3916 register_prot_hook(sk); 3917 spin_unlock(&po->bind_lock); 3918 } 3919 break; 3920 } 3921 } 3922 rcu_read_unlock(); 3923 return NOTIFY_DONE; 3924 } 3925 3926 3927 static int packet_ioctl(struct socket *sock, unsigned int cmd, 3928 unsigned long arg) 3929 { 3930 struct sock *sk = sock->sk; 3931 3932 switch (cmd) { 3933 case SIOCOUTQ: 3934 { 3935 int amount = sk_wmem_alloc_get(sk); 3936 3937 return put_user(amount, (int __user *)arg); 3938 } 3939 case SIOCINQ: 3940 { 3941 struct sk_buff *skb; 3942 int amount = 0; 3943 3944 spin_lock_bh(&sk->sk_receive_queue.lock); 3945 skb = skb_peek(&sk->sk_receive_queue); 3946 if (skb) 3947 amount = skb->len; 3948 spin_unlock_bh(&sk->sk_receive_queue.lock); 3949 return put_user(amount, (int __user *)arg); 3950 } 3951 case SIOCGSTAMP: 3952 return sock_get_timestamp(sk, (struct timeval __user *)arg); 3953 case SIOCGSTAMPNS: 3954 return sock_get_timestampns(sk, (struct timespec __user *)arg); 3955 3956 #ifdef CONFIG_INET 3957 case SIOCADDRT: 3958 case SIOCDELRT: 3959 case SIOCDARP: 3960 case SIOCGARP: 3961 case SIOCSARP: 3962 case SIOCGIFADDR: 3963 case SIOCSIFADDR: 3964 case SIOCGIFBRDADDR: 3965 case SIOCSIFBRDADDR: 3966 case SIOCGIFNETMASK: 3967 case SIOCSIFNETMASK: 3968 case SIOCGIFDSTADDR: 3969 case SIOCSIFDSTADDR: 3970 case SIOCSIFFLAGS: 3971 return inet_dgram_ops.ioctl(sock, cmd, arg); 3972 #endif 3973 3974 default: 3975 return -ENOIOCTLCMD; 3976 } 3977 return 0; 3978 } 3979 3980 static unsigned int packet_poll(struct file *file, struct socket *sock, 3981 poll_table *wait) 3982 { 3983 struct sock *sk = sock->sk; 3984 struct packet_sock *po = pkt_sk(sk); 3985 unsigned int mask = datagram_poll(file, sock, wait); 3986 3987 spin_lock_bh(&sk->sk_receive_queue.lock); 3988 if (po->rx_ring.pg_vec) { 3989 if (!packet_previous_rx_frame(po, &po->rx_ring, 3990 TP_STATUS_KERNEL)) 3991 mask |= POLLIN | POLLRDNORM; 3992 } 3993 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 3994 po->pressure = 0; 3995 spin_unlock_bh(&sk->sk_receive_queue.lock); 3996 spin_lock_bh(&sk->sk_write_queue.lock); 3997 if (po->tx_ring.pg_vec) { 3998 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 3999 mask |= POLLOUT | POLLWRNORM; 4000 } 4001 spin_unlock_bh(&sk->sk_write_queue.lock); 4002 return mask; 4003 } 4004 4005 4006 /* Dirty? Well, I still did not learn better way to account 4007 * for user mmaps. 4008 */ 4009 4010 static void packet_mm_open(struct vm_area_struct *vma) 4011 { 4012 struct file *file = vma->vm_file; 4013 struct socket *sock = file->private_data; 4014 struct sock *sk = sock->sk; 4015 4016 if (sk) 4017 atomic_inc(&pkt_sk(sk)->mapped); 4018 } 4019 4020 static void packet_mm_close(struct vm_area_struct *vma) 4021 { 4022 struct file *file = vma->vm_file; 4023 struct socket *sock = file->private_data; 4024 struct sock *sk = sock->sk; 4025 4026 if (sk) 4027 atomic_dec(&pkt_sk(sk)->mapped); 4028 } 4029 4030 static const struct vm_operations_struct packet_mmap_ops = { 4031 .open = packet_mm_open, 4032 .close = packet_mm_close, 4033 }; 4034 4035 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 4036 unsigned int len) 4037 { 4038 int i; 4039 4040 for (i = 0; i < len; i++) { 4041 if (likely(pg_vec[i].buffer)) { 4042 if (is_vmalloc_addr(pg_vec[i].buffer)) 4043 vfree(pg_vec[i].buffer); 4044 else 4045 free_pages((unsigned long)pg_vec[i].buffer, 4046 order); 4047 pg_vec[i].buffer = NULL; 4048 } 4049 } 4050 kfree(pg_vec); 4051 } 4052 4053 static char *alloc_one_pg_vec_page(unsigned long order) 4054 { 4055 char *buffer; 4056 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 4057 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 4058 4059 buffer = (char *) __get_free_pages(gfp_flags, order); 4060 if (buffer) 4061 return buffer; 4062 4063 /* __get_free_pages failed, fall back to vmalloc */ 4064 buffer = vzalloc((1 << order) * PAGE_SIZE); 4065 if (buffer) 4066 return buffer; 4067 4068 /* vmalloc failed, lets dig into swap here */ 4069 gfp_flags &= ~__GFP_NORETRY; 4070 buffer = (char *) __get_free_pages(gfp_flags, order); 4071 if (buffer) 4072 return buffer; 4073 4074 /* complete and utter failure */ 4075 return NULL; 4076 } 4077 4078 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 4079 { 4080 unsigned int block_nr = req->tp_block_nr; 4081 struct pgv *pg_vec; 4082 int i; 4083 4084 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); 4085 if (unlikely(!pg_vec)) 4086 goto out; 4087 4088 for (i = 0; i < block_nr; i++) { 4089 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 4090 if (unlikely(!pg_vec[i].buffer)) 4091 goto out_free_pgvec; 4092 } 4093 4094 out: 4095 return pg_vec; 4096 4097 out_free_pgvec: 4098 free_pg_vec(pg_vec, order, block_nr); 4099 pg_vec = NULL; 4100 goto out; 4101 } 4102 4103 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 4104 int closing, int tx_ring) 4105 { 4106 struct pgv *pg_vec = NULL; 4107 struct packet_sock *po = pkt_sk(sk); 4108 int was_running, order = 0; 4109 struct packet_ring_buffer *rb; 4110 struct sk_buff_head *rb_queue; 4111 __be16 num; 4112 int err = -EINVAL; 4113 /* Added to avoid minimal code churn */ 4114 struct tpacket_req *req = &req_u->req; 4115 4116 lock_sock(sk); 4117 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ 4118 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { 4119 net_warn_ratelimited("Tx-ring is not supported.\n"); 4120 goto out; 4121 } 4122 4123 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4124 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4125 4126 err = -EBUSY; 4127 if (!closing) { 4128 if (atomic_read(&po->mapped)) 4129 goto out; 4130 if (packet_read_pending(rb)) 4131 goto out; 4132 } 4133 4134 if (req->tp_block_nr) { 4135 /* Sanity tests and some calculations */ 4136 err = -EBUSY; 4137 if (unlikely(rb->pg_vec)) 4138 goto out; 4139 4140 switch (po->tp_version) { 4141 case TPACKET_V1: 4142 po->tp_hdrlen = TPACKET_HDRLEN; 4143 break; 4144 case TPACKET_V2: 4145 po->tp_hdrlen = TPACKET2_HDRLEN; 4146 break; 4147 case TPACKET_V3: 4148 po->tp_hdrlen = TPACKET3_HDRLEN; 4149 break; 4150 } 4151 4152 err = -EINVAL; 4153 if (unlikely((int)req->tp_block_size <= 0)) 4154 goto out; 4155 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4156 goto out; 4157 if (po->tp_version >= TPACKET_V3 && 4158 (int)(req->tp_block_size - 4159 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) 4160 goto out; 4161 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4162 po->tp_reserve)) 4163 goto out; 4164 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4165 goto out; 4166 4167 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4168 if (unlikely(rb->frames_per_block == 0)) 4169 goto out; 4170 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4171 req->tp_frame_nr)) 4172 goto out; 4173 4174 err = -ENOMEM; 4175 order = get_order(req->tp_block_size); 4176 pg_vec = alloc_pg_vec(req, order); 4177 if (unlikely(!pg_vec)) 4178 goto out; 4179 switch (po->tp_version) { 4180 case TPACKET_V3: 4181 /* Transmit path is not supported. We checked 4182 * it above but just being paranoid 4183 */ 4184 if (!tx_ring) 4185 init_prb_bdqc(po, rb, pg_vec, req_u); 4186 break; 4187 default: 4188 break; 4189 } 4190 } 4191 /* Done */ 4192 else { 4193 err = -EINVAL; 4194 if (unlikely(req->tp_frame_nr)) 4195 goto out; 4196 } 4197 4198 4199 /* Detach socket from network */ 4200 spin_lock(&po->bind_lock); 4201 was_running = po->running; 4202 num = po->num; 4203 if (was_running) { 4204 po->num = 0; 4205 __unregister_prot_hook(sk, false); 4206 } 4207 spin_unlock(&po->bind_lock); 4208 4209 synchronize_net(); 4210 4211 err = -EBUSY; 4212 mutex_lock(&po->pg_vec_lock); 4213 if (closing || atomic_read(&po->mapped) == 0) { 4214 err = 0; 4215 spin_lock_bh(&rb_queue->lock); 4216 swap(rb->pg_vec, pg_vec); 4217 rb->frame_max = (req->tp_frame_nr - 1); 4218 rb->head = 0; 4219 rb->frame_size = req->tp_frame_size; 4220 spin_unlock_bh(&rb_queue->lock); 4221 4222 swap(rb->pg_vec_order, order); 4223 swap(rb->pg_vec_len, req->tp_block_nr); 4224 4225 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4226 po->prot_hook.func = (po->rx_ring.pg_vec) ? 4227 tpacket_rcv : packet_rcv; 4228 skb_queue_purge(rb_queue); 4229 if (atomic_read(&po->mapped)) 4230 pr_err("packet_mmap: vma is busy: %d\n", 4231 atomic_read(&po->mapped)); 4232 } 4233 mutex_unlock(&po->pg_vec_lock); 4234 4235 spin_lock(&po->bind_lock); 4236 if (was_running) { 4237 po->num = num; 4238 register_prot_hook(sk); 4239 } 4240 spin_unlock(&po->bind_lock); 4241 if (closing && (po->tp_version > TPACKET_V2)) { 4242 /* Because we don't support block-based V3 on tx-ring */ 4243 if (!tx_ring) 4244 prb_shutdown_retire_blk_timer(po, rb_queue); 4245 } 4246 4247 if (pg_vec) 4248 free_pg_vec(pg_vec, order, req->tp_block_nr); 4249 out: 4250 release_sock(sk); 4251 return err; 4252 } 4253 4254 static int packet_mmap(struct file *file, struct socket *sock, 4255 struct vm_area_struct *vma) 4256 { 4257 struct sock *sk = sock->sk; 4258 struct packet_sock *po = pkt_sk(sk); 4259 unsigned long size, expected_size; 4260 struct packet_ring_buffer *rb; 4261 unsigned long start; 4262 int err = -EINVAL; 4263 int i; 4264 4265 if (vma->vm_pgoff) 4266 return -EINVAL; 4267 4268 mutex_lock(&po->pg_vec_lock); 4269 4270 expected_size = 0; 4271 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4272 if (rb->pg_vec) { 4273 expected_size += rb->pg_vec_len 4274 * rb->pg_vec_pages 4275 * PAGE_SIZE; 4276 } 4277 } 4278 4279 if (expected_size == 0) 4280 goto out; 4281 4282 size = vma->vm_end - vma->vm_start; 4283 if (size != expected_size) 4284 goto out; 4285 4286 start = vma->vm_start; 4287 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4288 if (rb->pg_vec == NULL) 4289 continue; 4290 4291 for (i = 0; i < rb->pg_vec_len; i++) { 4292 struct page *page; 4293 void *kaddr = rb->pg_vec[i].buffer; 4294 int pg_num; 4295 4296 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 4297 page = pgv_to_page(kaddr); 4298 err = vm_insert_page(vma, start, page); 4299 if (unlikely(err)) 4300 goto out; 4301 start += PAGE_SIZE; 4302 kaddr += PAGE_SIZE; 4303 } 4304 } 4305 } 4306 4307 atomic_inc(&po->mapped); 4308 vma->vm_ops = &packet_mmap_ops; 4309 err = 0; 4310 4311 out: 4312 mutex_unlock(&po->pg_vec_lock); 4313 return err; 4314 } 4315 4316 static const struct proto_ops packet_ops_spkt = { 4317 .family = PF_PACKET, 4318 .owner = THIS_MODULE, 4319 .release = packet_release, 4320 .bind = packet_bind_spkt, 4321 .connect = sock_no_connect, 4322 .socketpair = sock_no_socketpair, 4323 .accept = sock_no_accept, 4324 .getname = packet_getname_spkt, 4325 .poll = datagram_poll, 4326 .ioctl = packet_ioctl, 4327 .listen = sock_no_listen, 4328 .shutdown = sock_no_shutdown, 4329 .setsockopt = sock_no_setsockopt, 4330 .getsockopt = sock_no_getsockopt, 4331 .sendmsg = packet_sendmsg_spkt, 4332 .recvmsg = packet_recvmsg, 4333 .mmap = sock_no_mmap, 4334 .sendpage = sock_no_sendpage, 4335 }; 4336 4337 static const struct proto_ops packet_ops = { 4338 .family = PF_PACKET, 4339 .owner = THIS_MODULE, 4340 .release = packet_release, 4341 .bind = packet_bind, 4342 .connect = sock_no_connect, 4343 .socketpair = sock_no_socketpair, 4344 .accept = sock_no_accept, 4345 .getname = packet_getname, 4346 .poll = packet_poll, 4347 .ioctl = packet_ioctl, 4348 .listen = sock_no_listen, 4349 .shutdown = sock_no_shutdown, 4350 .setsockopt = packet_setsockopt, 4351 .getsockopt = packet_getsockopt, 4352 #ifdef CONFIG_COMPAT 4353 .compat_setsockopt = compat_packet_setsockopt, 4354 #endif 4355 .sendmsg = packet_sendmsg, 4356 .recvmsg = packet_recvmsg, 4357 .mmap = packet_mmap, 4358 .sendpage = sock_no_sendpage, 4359 }; 4360 4361 static const struct net_proto_family packet_family_ops = { 4362 .family = PF_PACKET, 4363 .create = packet_create, 4364 .owner = THIS_MODULE, 4365 }; 4366 4367 static struct notifier_block packet_netdev_notifier = { 4368 .notifier_call = packet_notifier, 4369 }; 4370 4371 #ifdef CONFIG_PROC_FS 4372 4373 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4374 __acquires(RCU) 4375 { 4376 struct net *net = seq_file_net(seq); 4377 4378 rcu_read_lock(); 4379 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4380 } 4381 4382 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4383 { 4384 struct net *net = seq_file_net(seq); 4385 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4386 } 4387 4388 static void packet_seq_stop(struct seq_file *seq, void *v) 4389 __releases(RCU) 4390 { 4391 rcu_read_unlock(); 4392 } 4393 4394 static int packet_seq_show(struct seq_file *seq, void *v) 4395 { 4396 if (v == SEQ_START_TOKEN) 4397 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 4398 else { 4399 struct sock *s = sk_entry(v); 4400 const struct packet_sock *po = pkt_sk(s); 4401 4402 seq_printf(seq, 4403 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 4404 s, 4405 atomic_read(&s->sk_refcnt), 4406 s->sk_type, 4407 ntohs(po->num), 4408 po->ifindex, 4409 po->running, 4410 atomic_read(&s->sk_rmem_alloc), 4411 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4412 sock_i_ino(s)); 4413 } 4414 4415 return 0; 4416 } 4417 4418 static const struct seq_operations packet_seq_ops = { 4419 .start = packet_seq_start, 4420 .next = packet_seq_next, 4421 .stop = packet_seq_stop, 4422 .show = packet_seq_show, 4423 }; 4424 4425 static int packet_seq_open(struct inode *inode, struct file *file) 4426 { 4427 return seq_open_net(inode, file, &packet_seq_ops, 4428 sizeof(struct seq_net_private)); 4429 } 4430 4431 static const struct file_operations packet_seq_fops = { 4432 .owner = THIS_MODULE, 4433 .open = packet_seq_open, 4434 .read = seq_read, 4435 .llseek = seq_lseek, 4436 .release = seq_release_net, 4437 }; 4438 4439 #endif 4440 4441 static int __net_init packet_net_init(struct net *net) 4442 { 4443 mutex_init(&net->packet.sklist_lock); 4444 INIT_HLIST_HEAD(&net->packet.sklist); 4445 4446 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) 4447 return -ENOMEM; 4448 4449 return 0; 4450 } 4451 4452 static void __net_exit packet_net_exit(struct net *net) 4453 { 4454 remove_proc_entry("packet", net->proc_net); 4455 } 4456 4457 static struct pernet_operations packet_net_ops = { 4458 .init = packet_net_init, 4459 .exit = packet_net_exit, 4460 }; 4461 4462 4463 static void __exit packet_exit(void) 4464 { 4465 unregister_netdevice_notifier(&packet_netdev_notifier); 4466 unregister_pernet_subsys(&packet_net_ops); 4467 sock_unregister(PF_PACKET); 4468 proto_unregister(&packet_proto); 4469 } 4470 4471 static int __init packet_init(void) 4472 { 4473 int rc = proto_register(&packet_proto, 0); 4474 4475 if (rc != 0) 4476 goto out; 4477 4478 sock_register(&packet_family_ops); 4479 register_pernet_subsys(&packet_net_ops); 4480 register_netdevice_notifier(&packet_netdev_notifier); 4481 out: 4482 return rc; 4483 } 4484 4485 module_init(packet_init); 4486 module_exit(packet_exit); 4487 MODULE_LICENSE("GPL"); 4488 MODULE_ALIAS_NETPROTO(PF_PACKET); 4489