1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * PACKET - implements raw packet sockets. 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org> 11 * 12 * Fixes: 13 * Alan Cox : verify_area() now used correctly 14 * Alan Cox : new skbuff lists, look ma no backlogs! 15 * Alan Cox : tidied skbuff lists. 16 * Alan Cox : Now uses generic datagram routines I 17 * added. Also fixed the peek/read crash 18 * from all old Linux datagram code. 19 * Alan Cox : Uses the improved datagram code. 20 * Alan Cox : Added NULL's for socket options. 21 * Alan Cox : Re-commented the code. 22 * Alan Cox : Use new kernel side addressing 23 * Rob Janssen : Correct MTU usage. 24 * Dave Platt : Counter leaks caused by incorrect 25 * interrupt locking and some slightly 26 * dubious gcc output. Can you read 27 * compiler: it said _VOLATILE_ 28 * Richard Kooijman : Timestamp fixes. 29 * Alan Cox : New buffers. Use sk->mac.raw. 30 * Alan Cox : sendmsg/recvmsg support. 31 * Alan Cox : Protocol setting support 32 * Alexey Kuznetsov : Untied from IPv4 stack. 33 * Cyrus Durgin : Fixed kerneld for kmod. 34 * Michal Ostrowski : Module initialization cleanup. 35 * Ulises Alonso : Frame number limit removal and 36 * packet_set_ring memory leak. 37 * Eric Biederman : Allow for > 8 byte hardware addresses. 38 * The convention is that longer addresses 39 * will simply extend the hardware address 40 * byte arrays at the end of sockaddr_ll 41 * and packet_mreq. 42 * Johann Baudy : Added TX RING. 43 * Chetan Loke : Implemented TPACKET_V3 block abstraction 44 * layer. 45 * Copyright (C) 2011, <lokec@ccs.neu.edu> 46 * 47 * 48 * This program is free software; you can redistribute it and/or 49 * modify it under the terms of the GNU General Public License 50 * as published by the Free Software Foundation; either version 51 * 2 of the License, or (at your option) any later version. 52 * 53 */ 54 55 #include <linux/types.h> 56 #include <linux/mm.h> 57 #include <linux/capability.h> 58 #include <linux/fcntl.h> 59 #include <linux/socket.h> 60 #include <linux/in.h> 61 #include <linux/inet.h> 62 #include <linux/netdevice.h> 63 #include <linux/if_packet.h> 64 #include <linux/wireless.h> 65 #include <linux/kernel.h> 66 #include <linux/kmod.h> 67 #include <linux/slab.h> 68 #include <linux/vmalloc.h> 69 #include <net/net_namespace.h> 70 #include <net/ip.h> 71 #include <net/protocol.h> 72 #include <linux/skbuff.h> 73 #include <net/sock.h> 74 #include <linux/errno.h> 75 #include <linux/timer.h> 76 #include <linux/uaccess.h> 77 #include <asm/ioctls.h> 78 #include <asm/page.h> 79 #include <asm/cacheflush.h> 80 #include <asm/io.h> 81 #include <linux/proc_fs.h> 82 #include <linux/seq_file.h> 83 #include <linux/poll.h> 84 #include <linux/module.h> 85 #include <linux/init.h> 86 #include <linux/mutex.h> 87 #include <linux/if_vlan.h> 88 #include <linux/virtio_net.h> 89 #include <linux/errqueue.h> 90 #include <linux/net_tstamp.h> 91 #include <linux/percpu.h> 92 #ifdef CONFIG_INET 93 #include <net/inet_common.h> 94 #endif 95 #include <linux/bpf.h> 96 #include <net/compat.h> 97 98 #include "internal.h" 99 100 /* 101 Assumptions: 102 - if device has no dev->hard_header routine, it adds and removes ll header 103 inside itself. In this case ll header is invisible outside of device, 104 but higher levels still should reserve dev->hard_header_len. 105 Some devices are enough clever to reallocate skb, when header 106 will not fit to reserved space (tunnel), another ones are silly 107 (PPP). 108 - packet socket receives packets with pulled ll header, 109 so that SOCK_RAW should push it back. 110 111 On receive: 112 ----------- 113 114 Incoming, dev->hard_header!=NULL 115 mac_header -> ll header 116 data -> data 117 118 Outgoing, dev->hard_header!=NULL 119 mac_header -> ll header 120 data -> ll header 121 122 Incoming, dev->hard_header==NULL 123 mac_header -> UNKNOWN position. It is very likely, that it points to ll 124 header. PPP makes it, that is wrong, because introduce 125 assymetry between rx and tx paths. 126 data -> data 127 128 Outgoing, dev->hard_header==NULL 129 mac_header -> data. ll header is still not built! 130 data -> data 131 132 Resume 133 If dev->hard_header==NULL we are unlikely to restore sensible ll header. 134 135 136 On transmit: 137 ------------ 138 139 dev->hard_header != NULL 140 mac_header -> ll header 141 data -> ll header 142 143 dev->hard_header == NULL (ll header is added by device, we cannot control it) 144 mac_header -> data 145 data -> data 146 147 We should set nh.raw on output to correct posistion, 148 packet classifier depends on it. 149 */ 150 151 /* Private packet socket structures. */ 152 153 /* identical to struct packet_mreq except it has 154 * a longer address field. 155 */ 156 struct packet_mreq_max { 157 int mr_ifindex; 158 unsigned short mr_type; 159 unsigned short mr_alen; 160 unsigned char mr_address[MAX_ADDR_LEN]; 161 }; 162 163 union tpacket_uhdr { 164 struct tpacket_hdr *h1; 165 struct tpacket2_hdr *h2; 166 struct tpacket3_hdr *h3; 167 void *raw; 168 }; 169 170 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 171 int closing, int tx_ring); 172 173 #define V3_ALIGNMENT (8) 174 175 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 176 177 #define BLK_PLUS_PRIV(sz_of_priv) \ 178 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 179 180 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 181 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 182 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 183 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 184 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 185 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 186 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) 187 188 struct packet_sock; 189 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 190 struct packet_type *pt, struct net_device *orig_dev); 191 192 static void *packet_previous_frame(struct packet_sock *po, 193 struct packet_ring_buffer *rb, 194 int status); 195 static void packet_increment_head(struct packet_ring_buffer *buff); 196 static int prb_curr_blk_in_use(struct tpacket_block_desc *); 197 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 198 struct packet_sock *); 199 static void prb_retire_current_block(struct tpacket_kbdq_core *, 200 struct packet_sock *, unsigned int status); 201 static int prb_queue_frozen(struct tpacket_kbdq_core *); 202 static void prb_open_block(struct tpacket_kbdq_core *, 203 struct tpacket_block_desc *); 204 static void prb_retire_rx_blk_timer_expired(struct timer_list *); 205 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); 206 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 207 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 208 struct tpacket3_hdr *); 209 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 210 struct tpacket3_hdr *); 211 static void packet_flush_mclist(struct sock *sk); 212 static u16 packet_pick_tx_queue(struct sk_buff *skb); 213 214 struct packet_skb_cb { 215 union { 216 struct sockaddr_pkt pkt; 217 union { 218 /* Trick: alias skb original length with 219 * ll.sll_family and ll.protocol in order 220 * to save room. 221 */ 222 unsigned int origlen; 223 struct sockaddr_ll ll; 224 }; 225 } sa; 226 }; 227 228 #define vio_le() virtio_legacy_is_little_endian() 229 230 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 231 232 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 233 #define GET_PBLOCK_DESC(x, bid) \ 234 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 235 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 236 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 237 #define GET_NEXT_PRB_BLK_NUM(x) \ 238 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 239 ((x)->kactive_blk_num+1) : 0) 240 241 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 242 static void __fanout_link(struct sock *sk, struct packet_sock *po); 243 244 static int packet_direct_xmit(struct sk_buff *skb) 245 { 246 return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); 247 } 248 249 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 250 { 251 struct net_device *dev; 252 253 rcu_read_lock(); 254 dev = rcu_dereference(po->cached_dev); 255 if (likely(dev)) 256 dev_hold(dev); 257 rcu_read_unlock(); 258 259 return dev; 260 } 261 262 static void packet_cached_dev_assign(struct packet_sock *po, 263 struct net_device *dev) 264 { 265 rcu_assign_pointer(po->cached_dev, dev); 266 } 267 268 static void packet_cached_dev_reset(struct packet_sock *po) 269 { 270 RCU_INIT_POINTER(po->cached_dev, NULL); 271 } 272 273 static bool packet_use_direct_xmit(const struct packet_sock *po) 274 { 275 return po->xmit == packet_direct_xmit; 276 } 277 278 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb, 279 struct net_device *sb_dev) 280 { 281 return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL); 282 } 283 284 static u16 packet_pick_tx_queue(struct sk_buff *skb) 285 { 286 struct net_device *dev = skb->dev; 287 const struct net_device_ops *ops = dev->netdev_ops; 288 u16 queue_index; 289 290 if (ops->ndo_select_queue) { 291 queue_index = ops->ndo_select_queue(dev, skb, NULL, 292 __packet_pick_tx_queue); 293 queue_index = netdev_cap_txqueue(dev, queue_index); 294 } else { 295 queue_index = __packet_pick_tx_queue(dev, skb, NULL); 296 } 297 298 return queue_index; 299 } 300 301 /* __register_prot_hook must be invoked through register_prot_hook 302 * or from a context in which asynchronous accesses to the packet 303 * socket is not possible (packet_create()). 304 */ 305 static void __register_prot_hook(struct sock *sk) 306 { 307 struct packet_sock *po = pkt_sk(sk); 308 309 if (!po->running) { 310 if (po->fanout) 311 __fanout_link(sk, po); 312 else 313 dev_add_pack(&po->prot_hook); 314 315 sock_hold(sk); 316 po->running = 1; 317 } 318 } 319 320 static void register_prot_hook(struct sock *sk) 321 { 322 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); 323 __register_prot_hook(sk); 324 } 325 326 /* If the sync parameter is true, we will temporarily drop 327 * the po->bind_lock and do a synchronize_net to make sure no 328 * asynchronous packet processing paths still refer to the elements 329 * of po->prot_hook. If the sync parameter is false, it is the 330 * callers responsibility to take care of this. 331 */ 332 static void __unregister_prot_hook(struct sock *sk, bool sync) 333 { 334 struct packet_sock *po = pkt_sk(sk); 335 336 lockdep_assert_held_once(&po->bind_lock); 337 338 po->running = 0; 339 340 if (po->fanout) 341 __fanout_unlink(sk, po); 342 else 343 __dev_remove_pack(&po->prot_hook); 344 345 __sock_put(sk); 346 347 if (sync) { 348 spin_unlock(&po->bind_lock); 349 synchronize_net(); 350 spin_lock(&po->bind_lock); 351 } 352 } 353 354 static void unregister_prot_hook(struct sock *sk, bool sync) 355 { 356 struct packet_sock *po = pkt_sk(sk); 357 358 if (po->running) 359 __unregister_prot_hook(sk, sync); 360 } 361 362 static inline struct page * __pure pgv_to_page(void *addr) 363 { 364 if (is_vmalloc_addr(addr)) 365 return vmalloc_to_page(addr); 366 return virt_to_page(addr); 367 } 368 369 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 370 { 371 union tpacket_uhdr h; 372 373 h.raw = frame; 374 switch (po->tp_version) { 375 case TPACKET_V1: 376 h.h1->tp_status = status; 377 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 378 break; 379 case TPACKET_V2: 380 h.h2->tp_status = status; 381 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 382 break; 383 case TPACKET_V3: 384 h.h3->tp_status = status; 385 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 386 break; 387 default: 388 WARN(1, "TPACKET version not supported.\n"); 389 BUG(); 390 } 391 392 smp_wmb(); 393 } 394 395 static int __packet_get_status(struct packet_sock *po, void *frame) 396 { 397 union tpacket_uhdr h; 398 399 smp_rmb(); 400 401 h.raw = frame; 402 switch (po->tp_version) { 403 case TPACKET_V1: 404 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 405 return h.h1->tp_status; 406 case TPACKET_V2: 407 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 408 return h.h2->tp_status; 409 case TPACKET_V3: 410 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 411 return h.h3->tp_status; 412 default: 413 WARN(1, "TPACKET version not supported.\n"); 414 BUG(); 415 return 0; 416 } 417 } 418 419 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, 420 unsigned int flags) 421 { 422 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 423 424 if (shhwtstamps && 425 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 426 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) 427 return TP_STATUS_TS_RAW_HARDWARE; 428 429 if (ktime_to_timespec_cond(skb->tstamp, ts)) 430 return TP_STATUS_TS_SOFTWARE; 431 432 return 0; 433 } 434 435 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 436 struct sk_buff *skb) 437 { 438 union tpacket_uhdr h; 439 struct timespec ts; 440 __u32 ts_status; 441 442 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 443 return 0; 444 445 h.raw = frame; 446 switch (po->tp_version) { 447 case TPACKET_V1: 448 h.h1->tp_sec = ts.tv_sec; 449 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 450 break; 451 case TPACKET_V2: 452 h.h2->tp_sec = ts.tv_sec; 453 h.h2->tp_nsec = ts.tv_nsec; 454 break; 455 case TPACKET_V3: 456 h.h3->tp_sec = ts.tv_sec; 457 h.h3->tp_nsec = ts.tv_nsec; 458 break; 459 default: 460 WARN(1, "TPACKET version not supported.\n"); 461 BUG(); 462 } 463 464 /* one flush is safe, as both fields always lie on the same cacheline */ 465 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 466 smp_wmb(); 467 468 return ts_status; 469 } 470 471 static void *packet_lookup_frame(struct packet_sock *po, 472 struct packet_ring_buffer *rb, 473 unsigned int position, 474 int status) 475 { 476 unsigned int pg_vec_pos, frame_offset; 477 union tpacket_uhdr h; 478 479 pg_vec_pos = position / rb->frames_per_block; 480 frame_offset = position % rb->frames_per_block; 481 482 h.raw = rb->pg_vec[pg_vec_pos].buffer + 483 (frame_offset * rb->frame_size); 484 485 if (status != __packet_get_status(po, h.raw)) 486 return NULL; 487 488 return h.raw; 489 } 490 491 static void *packet_current_frame(struct packet_sock *po, 492 struct packet_ring_buffer *rb, 493 int status) 494 { 495 return packet_lookup_frame(po, rb, rb->head, status); 496 } 497 498 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 499 { 500 del_timer_sync(&pkc->retire_blk_timer); 501 } 502 503 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 504 struct sk_buff_head *rb_queue) 505 { 506 struct tpacket_kbdq_core *pkc; 507 508 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 509 510 spin_lock_bh(&rb_queue->lock); 511 pkc->delete_blk_timer = 1; 512 spin_unlock_bh(&rb_queue->lock); 513 514 prb_del_retire_blk_timer(pkc); 515 } 516 517 static void prb_setup_retire_blk_timer(struct packet_sock *po) 518 { 519 struct tpacket_kbdq_core *pkc; 520 521 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 522 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired, 523 0); 524 pkc->retire_blk_timer.expires = jiffies; 525 } 526 527 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 528 int blk_size_in_bytes) 529 { 530 struct net_device *dev; 531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; 532 struct ethtool_link_ksettings ecmd; 533 int err; 534 535 rtnl_lock(); 536 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 537 if (unlikely(!dev)) { 538 rtnl_unlock(); 539 return DEFAULT_PRB_RETIRE_TOV; 540 } 541 err = __ethtool_get_link_ksettings(dev, &ecmd); 542 rtnl_unlock(); 543 if (!err) { 544 /* 545 * If the link speed is so slow you don't really 546 * need to worry about perf anyways 547 */ 548 if (ecmd.base.speed < SPEED_1000 || 549 ecmd.base.speed == SPEED_UNKNOWN) { 550 return DEFAULT_PRB_RETIRE_TOV; 551 } else { 552 msec = 1; 553 div = ecmd.base.speed / 1000; 554 } 555 } 556 557 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 558 559 if (div) 560 mbits /= div; 561 562 tmo = mbits * msec; 563 564 if (div) 565 return tmo+1; 566 return tmo; 567 } 568 569 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 570 union tpacket_req_u *req_u) 571 { 572 p1->feature_req_word = req_u->req3.tp_feature_req_word; 573 } 574 575 static void init_prb_bdqc(struct packet_sock *po, 576 struct packet_ring_buffer *rb, 577 struct pgv *pg_vec, 578 union tpacket_req_u *req_u) 579 { 580 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 581 struct tpacket_block_desc *pbd; 582 583 memset(p1, 0x0, sizeof(*p1)); 584 585 p1->knxt_seq_num = 1; 586 p1->pkbdq = pg_vec; 587 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 588 p1->pkblk_start = pg_vec[0].buffer; 589 p1->kblk_size = req_u->req3.tp_block_size; 590 p1->knum_blocks = req_u->req3.tp_block_nr; 591 p1->hdrlen = po->tp_hdrlen; 592 p1->version = po->tp_version; 593 p1->last_kactive_blk_num = 0; 594 po->stats.stats3.tp_freeze_q_cnt = 0; 595 if (req_u->req3.tp_retire_blk_tov) 596 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; 597 else 598 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, 599 req_u->req3.tp_block_size); 600 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); 601 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 602 603 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 604 prb_init_ft_ops(p1, req_u); 605 prb_setup_retire_blk_timer(po); 606 prb_open_block(p1, pbd); 607 } 608 609 /* Do NOT update the last_blk_num first. 610 * Assumes sk_buff_head lock is held. 611 */ 612 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) 613 { 614 mod_timer(&pkc->retire_blk_timer, 615 jiffies + pkc->tov_in_jiffies); 616 pkc->last_kactive_blk_num = pkc->kactive_blk_num; 617 } 618 619 /* 620 * Timer logic: 621 * 1) We refresh the timer only when we open a block. 622 * By doing this we don't waste cycles refreshing the timer 623 * on packet-by-packet basis. 624 * 625 * With a 1MB block-size, on a 1Gbps line, it will take 626 * i) ~8 ms to fill a block + ii) memcpy etc. 627 * In this cut we are not accounting for the memcpy time. 628 * 629 * So, if the user sets the 'tmo' to 10ms then the timer 630 * will never fire while the block is still getting filled 631 * (which is what we want). However, the user could choose 632 * to close a block early and that's fine. 633 * 634 * But when the timer does fire, we check whether or not to refresh it. 635 * Since the tmo granularity is in msecs, it is not too expensive 636 * to refresh the timer, lets say every '8' msecs. 637 * Either the user can set the 'tmo' or we can derive it based on 638 * a) line-speed and b) block-size. 639 * prb_calc_retire_blk_tmo() calculates the tmo. 640 * 641 */ 642 static void prb_retire_rx_blk_timer_expired(struct timer_list *t) 643 { 644 struct packet_sock *po = 645 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); 646 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 647 unsigned int frozen; 648 struct tpacket_block_desc *pbd; 649 650 spin_lock(&po->sk.sk_receive_queue.lock); 651 652 frozen = prb_queue_frozen(pkc); 653 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 654 655 if (unlikely(pkc->delete_blk_timer)) 656 goto out; 657 658 /* We only need to plug the race when the block is partially filled. 659 * tpacket_rcv: 660 * lock(); increment BLOCK_NUM_PKTS; unlock() 661 * copy_bits() is in progress ... 662 * timer fires on other cpu: 663 * we can't retire the current block because copy_bits 664 * is in progress. 665 * 666 */ 667 if (BLOCK_NUM_PKTS(pbd)) { 668 while (atomic_read(&pkc->blk_fill_in_prog)) { 669 /* Waiting for skb_copy_bits to finish... */ 670 cpu_relax(); 671 } 672 } 673 674 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 675 if (!frozen) { 676 if (!BLOCK_NUM_PKTS(pbd)) { 677 /* An empty block. Just refresh the timer. */ 678 goto refresh_timer; 679 } 680 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 681 if (!prb_dispatch_next_block(pkc, po)) 682 goto refresh_timer; 683 else 684 goto out; 685 } else { 686 /* Case 1. Queue was frozen because user-space was 687 * lagging behind. 688 */ 689 if (prb_curr_blk_in_use(pbd)) { 690 /* 691 * Ok, user-space is still behind. 692 * So just refresh the timer. 693 */ 694 goto refresh_timer; 695 } else { 696 /* Case 2. queue was frozen,user-space caught up, 697 * now the link went idle && the timer fired. 698 * We don't have a block to close.So we open this 699 * block and restart the timer. 700 * opening a block thaws the queue,restarts timer 701 * Thawing/timer-refresh is a side effect. 702 */ 703 prb_open_block(pkc, pbd); 704 goto out; 705 } 706 } 707 } 708 709 refresh_timer: 710 _prb_refresh_rx_retire_blk_timer(pkc); 711 712 out: 713 spin_unlock(&po->sk.sk_receive_queue.lock); 714 } 715 716 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 717 struct tpacket_block_desc *pbd1, __u32 status) 718 { 719 /* Flush everything minus the block header */ 720 721 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 722 u8 *start, *end; 723 724 start = (u8 *)pbd1; 725 726 /* Skip the block header(we know header WILL fit in 4K) */ 727 start += PAGE_SIZE; 728 729 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 730 for (; start < end; start += PAGE_SIZE) 731 flush_dcache_page(pgv_to_page(start)); 732 733 smp_wmb(); 734 #endif 735 736 /* Now update the block status. */ 737 738 BLOCK_STATUS(pbd1) = status; 739 740 /* Flush the block header */ 741 742 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 743 start = (u8 *)pbd1; 744 flush_dcache_page(pgv_to_page(start)); 745 746 smp_wmb(); 747 #endif 748 } 749 750 /* 751 * Side effect: 752 * 753 * 1) flush the block 754 * 2) Increment active_blk_num 755 * 756 * Note:We DONT refresh the timer on purpose. 757 * Because almost always the next block will be opened. 758 */ 759 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 760 struct tpacket_block_desc *pbd1, 761 struct packet_sock *po, unsigned int stat) 762 { 763 __u32 status = TP_STATUS_USER | stat; 764 765 struct tpacket3_hdr *last_pkt; 766 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 767 struct sock *sk = &po->sk; 768 769 if (po->stats.stats3.tp_drops) 770 status |= TP_STATUS_LOSING; 771 772 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 773 last_pkt->tp_next_offset = 0; 774 775 /* Get the ts of the last pkt */ 776 if (BLOCK_NUM_PKTS(pbd1)) { 777 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 778 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 779 } else { 780 /* Ok, we tmo'd - so get the current time. 781 * 782 * It shouldn't really happen as we don't close empty 783 * blocks. See prb_retire_rx_blk_timer_expired(). 784 */ 785 struct timespec ts; 786 getnstimeofday(&ts); 787 h1->ts_last_pkt.ts_sec = ts.tv_sec; 788 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 789 } 790 791 smp_wmb(); 792 793 /* Flush the block */ 794 prb_flush_block(pkc1, pbd1, status); 795 796 sk->sk_data_ready(sk); 797 798 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 799 } 800 801 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 802 { 803 pkc->reset_pending_on_curr_blk = 0; 804 } 805 806 /* 807 * Side effect of opening a block: 808 * 809 * 1) prb_queue is thawed. 810 * 2) retire_blk_timer is refreshed. 811 * 812 */ 813 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 814 struct tpacket_block_desc *pbd1) 815 { 816 struct timespec ts; 817 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 818 819 smp_rmb(); 820 821 /* We could have just memset this but we will lose the 822 * flexibility of making the priv area sticky 823 */ 824 825 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 826 BLOCK_NUM_PKTS(pbd1) = 0; 827 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 828 829 getnstimeofday(&ts); 830 831 h1->ts_first_pkt.ts_sec = ts.tv_sec; 832 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 833 834 pkc1->pkblk_start = (char *)pbd1; 835 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 836 837 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 838 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 839 840 pbd1->version = pkc1->version; 841 pkc1->prev = pkc1->nxt_offset; 842 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 843 844 prb_thaw_queue(pkc1); 845 _prb_refresh_rx_retire_blk_timer(pkc1); 846 847 smp_wmb(); 848 } 849 850 /* 851 * Queue freeze logic: 852 * 1) Assume tp_block_nr = 8 blocks. 853 * 2) At time 't0', user opens Rx ring. 854 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 855 * 4) user-space is either sleeping or processing block '0'. 856 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 857 * it will close block-7,loop around and try to fill block '0'. 858 * call-flow: 859 * __packet_lookup_frame_in_block 860 * prb_retire_current_block() 861 * prb_dispatch_next_block() 862 * |->(BLOCK_STATUS == USER) evaluates to true 863 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 864 * 6) Now there are two cases: 865 * 6.1) Link goes idle right after the queue is frozen. 866 * But remember, the last open_block() refreshed the timer. 867 * When this timer expires,it will refresh itself so that we can 868 * re-open block-0 in near future. 869 * 6.2) Link is busy and keeps on receiving packets. This is a simple 870 * case and __packet_lookup_frame_in_block will check if block-0 871 * is free and can now be re-used. 872 */ 873 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 874 struct packet_sock *po) 875 { 876 pkc->reset_pending_on_curr_blk = 1; 877 po->stats.stats3.tp_freeze_q_cnt++; 878 } 879 880 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 881 882 /* 883 * If the next block is free then we will dispatch it 884 * and return a good offset. 885 * Else, we will freeze the queue. 886 * So, caller must check the return value. 887 */ 888 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 889 struct packet_sock *po) 890 { 891 struct tpacket_block_desc *pbd; 892 893 smp_rmb(); 894 895 /* 1. Get current block num */ 896 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 897 898 /* 2. If this block is currently in_use then freeze the queue */ 899 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 900 prb_freeze_queue(pkc, po); 901 return NULL; 902 } 903 904 /* 905 * 3. 906 * open this block and return the offset where the first packet 907 * needs to get stored. 908 */ 909 prb_open_block(pkc, pbd); 910 return (void *)pkc->nxt_offset; 911 } 912 913 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 914 struct packet_sock *po, unsigned int status) 915 { 916 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 917 918 /* retire/close the current block */ 919 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 920 /* 921 * Plug the case where copy_bits() is in progress on 922 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 923 * have space to copy the pkt in the current block and 924 * called prb_retire_current_block() 925 * 926 * We don't need to worry about the TMO case because 927 * the timer-handler already handled this case. 928 */ 929 if (!(status & TP_STATUS_BLK_TMO)) { 930 while (atomic_read(&pkc->blk_fill_in_prog)) { 931 /* Waiting for skb_copy_bits to finish... */ 932 cpu_relax(); 933 } 934 } 935 prb_close_block(pkc, pbd, po, status); 936 return; 937 } 938 } 939 940 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) 941 { 942 return TP_STATUS_USER & BLOCK_STATUS(pbd); 943 } 944 945 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 946 { 947 return pkc->reset_pending_on_curr_blk; 948 } 949 950 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 951 { 952 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 953 atomic_dec(&pkc->blk_fill_in_prog); 954 } 955 956 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 957 struct tpacket3_hdr *ppd) 958 { 959 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 960 } 961 962 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 963 struct tpacket3_hdr *ppd) 964 { 965 ppd->hv1.tp_rxhash = 0; 966 } 967 968 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 969 struct tpacket3_hdr *ppd) 970 { 971 if (skb_vlan_tag_present(pkc->skb)) { 972 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 973 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 974 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 975 } else { 976 ppd->hv1.tp_vlan_tci = 0; 977 ppd->hv1.tp_vlan_tpid = 0; 978 ppd->tp_status = TP_STATUS_AVAILABLE; 979 } 980 } 981 982 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 983 struct tpacket3_hdr *ppd) 984 { 985 ppd->hv1.tp_padding = 0; 986 prb_fill_vlan_info(pkc, ppd); 987 988 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 989 prb_fill_rxhash(pkc, ppd); 990 else 991 prb_clear_rxhash(pkc, ppd); 992 } 993 994 static void prb_fill_curr_block(char *curr, 995 struct tpacket_kbdq_core *pkc, 996 struct tpacket_block_desc *pbd, 997 unsigned int len) 998 { 999 struct tpacket3_hdr *ppd; 1000 1001 ppd = (struct tpacket3_hdr *)curr; 1002 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 1003 pkc->prev = curr; 1004 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1005 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1006 BLOCK_NUM_PKTS(pbd) += 1; 1007 atomic_inc(&pkc->blk_fill_in_prog); 1008 prb_run_all_ft_ops(pkc, ppd); 1009 } 1010 1011 /* Assumes caller has the sk->rx_queue.lock */ 1012 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1013 struct sk_buff *skb, 1014 int status, 1015 unsigned int len 1016 ) 1017 { 1018 struct tpacket_kbdq_core *pkc; 1019 struct tpacket_block_desc *pbd; 1020 char *curr, *end; 1021 1022 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1023 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1024 1025 /* Queue is frozen when user space is lagging behind */ 1026 if (prb_queue_frozen(pkc)) { 1027 /* 1028 * Check if that last block which caused the queue to freeze, 1029 * is still in_use by user-space. 1030 */ 1031 if (prb_curr_blk_in_use(pbd)) { 1032 /* Can't record this packet */ 1033 return NULL; 1034 } else { 1035 /* 1036 * Ok, the block was released by user-space. 1037 * Now let's open that block. 1038 * opening a block also thaws the queue. 1039 * Thawing is a side effect. 1040 */ 1041 prb_open_block(pkc, pbd); 1042 } 1043 } 1044 1045 smp_mb(); 1046 curr = pkc->nxt_offset; 1047 pkc->skb = skb; 1048 end = (char *)pbd + pkc->kblk_size; 1049 1050 /* first try the current block */ 1051 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1052 prb_fill_curr_block(curr, pkc, pbd, len); 1053 return (void *)curr; 1054 } 1055 1056 /* Ok, close the current block */ 1057 prb_retire_current_block(pkc, po, 0); 1058 1059 /* Now, try to dispatch the next block */ 1060 curr = (char *)prb_dispatch_next_block(pkc, po); 1061 if (curr) { 1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1063 prb_fill_curr_block(curr, pkc, pbd, len); 1064 return (void *)curr; 1065 } 1066 1067 /* 1068 * No free blocks are available.user_space hasn't caught up yet. 1069 * Queue was just frozen and now this packet will get dropped. 1070 */ 1071 return NULL; 1072 } 1073 1074 static void *packet_current_rx_frame(struct packet_sock *po, 1075 struct sk_buff *skb, 1076 int status, unsigned int len) 1077 { 1078 char *curr = NULL; 1079 switch (po->tp_version) { 1080 case TPACKET_V1: 1081 case TPACKET_V2: 1082 curr = packet_lookup_frame(po, &po->rx_ring, 1083 po->rx_ring.head, status); 1084 return curr; 1085 case TPACKET_V3: 1086 return __packet_lookup_frame_in_block(po, skb, status, len); 1087 default: 1088 WARN(1, "TPACKET version not supported\n"); 1089 BUG(); 1090 return NULL; 1091 } 1092 } 1093 1094 static void *prb_lookup_block(struct packet_sock *po, 1095 struct packet_ring_buffer *rb, 1096 unsigned int idx, 1097 int status) 1098 { 1099 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1100 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1101 1102 if (status != BLOCK_STATUS(pbd)) 1103 return NULL; 1104 return pbd; 1105 } 1106 1107 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1108 { 1109 unsigned int prev; 1110 if (rb->prb_bdqc.kactive_blk_num) 1111 prev = rb->prb_bdqc.kactive_blk_num-1; 1112 else 1113 prev = rb->prb_bdqc.knum_blocks-1; 1114 return prev; 1115 } 1116 1117 /* Assumes caller has held the rx_queue.lock */ 1118 static void *__prb_previous_block(struct packet_sock *po, 1119 struct packet_ring_buffer *rb, 1120 int status) 1121 { 1122 unsigned int previous = prb_previous_blk_num(rb); 1123 return prb_lookup_block(po, rb, previous, status); 1124 } 1125 1126 static void *packet_previous_rx_frame(struct packet_sock *po, 1127 struct packet_ring_buffer *rb, 1128 int status) 1129 { 1130 if (po->tp_version <= TPACKET_V2) 1131 return packet_previous_frame(po, rb, status); 1132 1133 return __prb_previous_block(po, rb, status); 1134 } 1135 1136 static void packet_increment_rx_head(struct packet_sock *po, 1137 struct packet_ring_buffer *rb) 1138 { 1139 switch (po->tp_version) { 1140 case TPACKET_V1: 1141 case TPACKET_V2: 1142 return packet_increment_head(rb); 1143 case TPACKET_V3: 1144 default: 1145 WARN(1, "TPACKET version not supported.\n"); 1146 BUG(); 1147 return; 1148 } 1149 } 1150 1151 static void *packet_previous_frame(struct packet_sock *po, 1152 struct packet_ring_buffer *rb, 1153 int status) 1154 { 1155 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1156 return packet_lookup_frame(po, rb, previous, status); 1157 } 1158 1159 static void packet_increment_head(struct packet_ring_buffer *buff) 1160 { 1161 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1162 } 1163 1164 static void packet_inc_pending(struct packet_ring_buffer *rb) 1165 { 1166 this_cpu_inc(*rb->pending_refcnt); 1167 } 1168 1169 static void packet_dec_pending(struct packet_ring_buffer *rb) 1170 { 1171 this_cpu_dec(*rb->pending_refcnt); 1172 } 1173 1174 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1175 { 1176 unsigned int refcnt = 0; 1177 int cpu; 1178 1179 /* We don't use pending refcount in rx_ring. */ 1180 if (rb->pending_refcnt == NULL) 1181 return 0; 1182 1183 for_each_possible_cpu(cpu) 1184 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1185 1186 return refcnt; 1187 } 1188 1189 static int packet_alloc_pending(struct packet_sock *po) 1190 { 1191 po->rx_ring.pending_refcnt = NULL; 1192 1193 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1194 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1195 return -ENOBUFS; 1196 1197 return 0; 1198 } 1199 1200 static void packet_free_pending(struct packet_sock *po) 1201 { 1202 free_percpu(po->tx_ring.pending_refcnt); 1203 } 1204 1205 #define ROOM_POW_OFF 2 1206 #define ROOM_NONE 0x0 1207 #define ROOM_LOW 0x1 1208 #define ROOM_NORMAL 0x2 1209 1210 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) 1211 { 1212 int idx, len; 1213 1214 len = po->rx_ring.frame_max + 1; 1215 idx = po->rx_ring.head; 1216 if (pow_off) 1217 idx += len >> pow_off; 1218 if (idx >= len) 1219 idx -= len; 1220 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1221 } 1222 1223 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) 1224 { 1225 int idx, len; 1226 1227 len = po->rx_ring.prb_bdqc.knum_blocks; 1228 idx = po->rx_ring.prb_bdqc.kactive_blk_num; 1229 if (pow_off) 1230 idx += len >> pow_off; 1231 if (idx >= len) 1232 idx -= len; 1233 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1234 } 1235 1236 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1237 { 1238 struct sock *sk = &po->sk; 1239 int ret = ROOM_NONE; 1240 1241 if (po->prot_hook.func != tpacket_rcv) { 1242 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) 1243 - (skb ? skb->truesize : 0); 1244 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) 1245 return ROOM_NORMAL; 1246 else if (avail > 0) 1247 return ROOM_LOW; 1248 else 1249 return ROOM_NONE; 1250 } 1251 1252 if (po->tp_version == TPACKET_V3) { 1253 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) 1254 ret = ROOM_NORMAL; 1255 else if (__tpacket_v3_has_room(po, 0)) 1256 ret = ROOM_LOW; 1257 } else { 1258 if (__tpacket_has_room(po, ROOM_POW_OFF)) 1259 ret = ROOM_NORMAL; 1260 else if (__tpacket_has_room(po, 0)) 1261 ret = ROOM_LOW; 1262 } 1263 1264 return ret; 1265 } 1266 1267 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1268 { 1269 int ret; 1270 bool has_room; 1271 1272 spin_lock_bh(&po->sk.sk_receive_queue.lock); 1273 ret = __packet_rcv_has_room(po, skb); 1274 has_room = ret == ROOM_NORMAL; 1275 if (po->pressure == has_room) 1276 po->pressure = !has_room; 1277 spin_unlock_bh(&po->sk.sk_receive_queue.lock); 1278 1279 return ret; 1280 } 1281 1282 static void packet_sock_destruct(struct sock *sk) 1283 { 1284 skb_queue_purge(&sk->sk_error_queue); 1285 1286 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1287 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 1288 1289 if (!sock_flag(sk, SOCK_DEAD)) { 1290 pr_err("Attempt to release alive packet socket: %p\n", sk); 1291 return; 1292 } 1293 1294 sk_refcnt_debug_dec(sk); 1295 } 1296 1297 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) 1298 { 1299 u32 rxhash; 1300 int i, count = 0; 1301 1302 rxhash = skb_get_hash(skb); 1303 for (i = 0; i < ROLLOVER_HLEN; i++) 1304 if (po->rollover->history[i] == rxhash) 1305 count++; 1306 1307 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; 1308 return count > (ROLLOVER_HLEN >> 1); 1309 } 1310 1311 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1312 struct sk_buff *skb, 1313 unsigned int num) 1314 { 1315 return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1316 } 1317 1318 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1319 struct sk_buff *skb, 1320 unsigned int num) 1321 { 1322 unsigned int val = atomic_inc_return(&f->rr_cur); 1323 1324 return val % num; 1325 } 1326 1327 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1328 struct sk_buff *skb, 1329 unsigned int num) 1330 { 1331 return smp_processor_id() % num; 1332 } 1333 1334 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1335 struct sk_buff *skb, 1336 unsigned int num) 1337 { 1338 return prandom_u32_max(num); 1339 } 1340 1341 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1342 struct sk_buff *skb, 1343 unsigned int idx, bool try_self, 1344 unsigned int num) 1345 { 1346 struct packet_sock *po, *po_next, *po_skip = NULL; 1347 unsigned int i, j, room = ROOM_NONE; 1348 1349 po = pkt_sk(f->arr[idx]); 1350 1351 if (try_self) { 1352 room = packet_rcv_has_room(po, skb); 1353 if (room == ROOM_NORMAL || 1354 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) 1355 return idx; 1356 po_skip = po; 1357 } 1358 1359 i = j = min_t(int, po->rollover->sock, num - 1); 1360 do { 1361 po_next = pkt_sk(f->arr[i]); 1362 if (po_next != po_skip && !po_next->pressure && 1363 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { 1364 if (i != j) 1365 po->rollover->sock = i; 1366 atomic_long_inc(&po->rollover->num); 1367 if (room == ROOM_LOW) 1368 atomic_long_inc(&po->rollover->num_huge); 1369 return i; 1370 } 1371 1372 if (++i == num) 1373 i = 0; 1374 } while (i != j); 1375 1376 atomic_long_inc(&po->rollover->num_failed); 1377 return idx; 1378 } 1379 1380 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1381 struct sk_buff *skb, 1382 unsigned int num) 1383 { 1384 return skb_get_queue_mapping(skb) % num; 1385 } 1386 1387 static unsigned int fanout_demux_bpf(struct packet_fanout *f, 1388 struct sk_buff *skb, 1389 unsigned int num) 1390 { 1391 struct bpf_prog *prog; 1392 unsigned int ret = 0; 1393 1394 rcu_read_lock(); 1395 prog = rcu_dereference(f->bpf_prog); 1396 if (prog) 1397 ret = bpf_prog_run_clear_cb(prog, skb) % num; 1398 rcu_read_unlock(); 1399 1400 return ret; 1401 } 1402 1403 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1404 { 1405 return f->flags & (flag >> 8); 1406 } 1407 1408 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1409 struct packet_type *pt, struct net_device *orig_dev) 1410 { 1411 struct packet_fanout *f = pt->af_packet_priv; 1412 unsigned int num = READ_ONCE(f->num_members); 1413 struct net *net = read_pnet(&f->net); 1414 struct packet_sock *po; 1415 unsigned int idx; 1416 1417 if (!net_eq(dev_net(dev), net) || !num) { 1418 kfree_skb(skb); 1419 return 0; 1420 } 1421 1422 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1423 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); 1424 if (!skb) 1425 return 0; 1426 } 1427 switch (f->type) { 1428 case PACKET_FANOUT_HASH: 1429 default: 1430 idx = fanout_demux_hash(f, skb, num); 1431 break; 1432 case PACKET_FANOUT_LB: 1433 idx = fanout_demux_lb(f, skb, num); 1434 break; 1435 case PACKET_FANOUT_CPU: 1436 idx = fanout_demux_cpu(f, skb, num); 1437 break; 1438 case PACKET_FANOUT_RND: 1439 idx = fanout_demux_rnd(f, skb, num); 1440 break; 1441 case PACKET_FANOUT_QM: 1442 idx = fanout_demux_qm(f, skb, num); 1443 break; 1444 case PACKET_FANOUT_ROLLOVER: 1445 idx = fanout_demux_rollover(f, skb, 0, false, num); 1446 break; 1447 case PACKET_FANOUT_CBPF: 1448 case PACKET_FANOUT_EBPF: 1449 idx = fanout_demux_bpf(f, skb, num); 1450 break; 1451 } 1452 1453 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) 1454 idx = fanout_demux_rollover(f, skb, idx, true, num); 1455 1456 po = pkt_sk(f->arr[idx]); 1457 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1458 } 1459 1460 DEFINE_MUTEX(fanout_mutex); 1461 EXPORT_SYMBOL_GPL(fanout_mutex); 1462 static LIST_HEAD(fanout_list); 1463 static u16 fanout_next_id; 1464 1465 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1466 { 1467 struct packet_fanout *f = po->fanout; 1468 1469 spin_lock(&f->lock); 1470 f->arr[f->num_members] = sk; 1471 smp_wmb(); 1472 f->num_members++; 1473 if (f->num_members == 1) 1474 dev_add_pack(&f->prot_hook); 1475 spin_unlock(&f->lock); 1476 } 1477 1478 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1479 { 1480 struct packet_fanout *f = po->fanout; 1481 int i; 1482 1483 spin_lock(&f->lock); 1484 for (i = 0; i < f->num_members; i++) { 1485 if (f->arr[i] == sk) 1486 break; 1487 } 1488 BUG_ON(i >= f->num_members); 1489 f->arr[i] = f->arr[f->num_members - 1]; 1490 f->num_members--; 1491 if (f->num_members == 0) 1492 __dev_remove_pack(&f->prot_hook); 1493 spin_unlock(&f->lock); 1494 } 1495 1496 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1497 { 1498 if (sk->sk_family != PF_PACKET) 1499 return false; 1500 1501 return ptype->af_packet_priv == pkt_sk(sk)->fanout; 1502 } 1503 1504 static void fanout_init_data(struct packet_fanout *f) 1505 { 1506 switch (f->type) { 1507 case PACKET_FANOUT_LB: 1508 atomic_set(&f->rr_cur, 0); 1509 break; 1510 case PACKET_FANOUT_CBPF: 1511 case PACKET_FANOUT_EBPF: 1512 RCU_INIT_POINTER(f->bpf_prog, NULL); 1513 break; 1514 } 1515 } 1516 1517 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) 1518 { 1519 struct bpf_prog *old; 1520 1521 spin_lock(&f->lock); 1522 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); 1523 rcu_assign_pointer(f->bpf_prog, new); 1524 spin_unlock(&f->lock); 1525 1526 if (old) { 1527 synchronize_net(); 1528 bpf_prog_destroy(old); 1529 } 1530 } 1531 1532 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, 1533 unsigned int len) 1534 { 1535 struct bpf_prog *new; 1536 struct sock_fprog fprog; 1537 int ret; 1538 1539 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1540 return -EPERM; 1541 if (len != sizeof(fprog)) 1542 return -EINVAL; 1543 if (copy_from_user(&fprog, data, len)) 1544 return -EFAULT; 1545 1546 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); 1547 if (ret) 1548 return ret; 1549 1550 __fanout_set_data_bpf(po->fanout, new); 1551 return 0; 1552 } 1553 1554 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, 1555 unsigned int len) 1556 { 1557 struct bpf_prog *new; 1558 u32 fd; 1559 1560 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1561 return -EPERM; 1562 if (len != sizeof(fd)) 1563 return -EINVAL; 1564 if (copy_from_user(&fd, data, len)) 1565 return -EFAULT; 1566 1567 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 1568 if (IS_ERR(new)) 1569 return PTR_ERR(new); 1570 1571 __fanout_set_data_bpf(po->fanout, new); 1572 return 0; 1573 } 1574 1575 static int fanout_set_data(struct packet_sock *po, char __user *data, 1576 unsigned int len) 1577 { 1578 switch (po->fanout->type) { 1579 case PACKET_FANOUT_CBPF: 1580 return fanout_set_data_cbpf(po, data, len); 1581 case PACKET_FANOUT_EBPF: 1582 return fanout_set_data_ebpf(po, data, len); 1583 default: 1584 return -EINVAL; 1585 } 1586 } 1587 1588 static void fanout_release_data(struct packet_fanout *f) 1589 { 1590 switch (f->type) { 1591 case PACKET_FANOUT_CBPF: 1592 case PACKET_FANOUT_EBPF: 1593 __fanout_set_data_bpf(f, NULL); 1594 } 1595 } 1596 1597 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) 1598 { 1599 struct packet_fanout *f; 1600 1601 list_for_each_entry(f, &fanout_list, list) { 1602 if (f->id == candidate_id && 1603 read_pnet(&f->net) == sock_net(sk)) { 1604 return false; 1605 } 1606 } 1607 return true; 1608 } 1609 1610 static bool fanout_find_new_id(struct sock *sk, u16 *new_id) 1611 { 1612 u16 id = fanout_next_id; 1613 1614 do { 1615 if (__fanout_id_is_free(sk, id)) { 1616 *new_id = id; 1617 fanout_next_id = id + 1; 1618 return true; 1619 } 1620 1621 id++; 1622 } while (id != fanout_next_id); 1623 1624 return false; 1625 } 1626 1627 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1628 { 1629 struct packet_rollover *rollover = NULL; 1630 struct packet_sock *po = pkt_sk(sk); 1631 struct packet_fanout *f, *match; 1632 u8 type = type_flags & 0xff; 1633 u8 flags = type_flags >> 8; 1634 int err; 1635 1636 switch (type) { 1637 case PACKET_FANOUT_ROLLOVER: 1638 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1639 return -EINVAL; 1640 case PACKET_FANOUT_HASH: 1641 case PACKET_FANOUT_LB: 1642 case PACKET_FANOUT_CPU: 1643 case PACKET_FANOUT_RND: 1644 case PACKET_FANOUT_QM: 1645 case PACKET_FANOUT_CBPF: 1646 case PACKET_FANOUT_EBPF: 1647 break; 1648 default: 1649 return -EINVAL; 1650 } 1651 1652 mutex_lock(&fanout_mutex); 1653 1654 err = -EALREADY; 1655 if (po->fanout) 1656 goto out; 1657 1658 if (type == PACKET_FANOUT_ROLLOVER || 1659 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1660 err = -ENOMEM; 1661 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); 1662 if (!rollover) 1663 goto out; 1664 atomic_long_set(&rollover->num, 0); 1665 atomic_long_set(&rollover->num_huge, 0); 1666 atomic_long_set(&rollover->num_failed, 0); 1667 } 1668 1669 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { 1670 if (id != 0) { 1671 err = -EINVAL; 1672 goto out; 1673 } 1674 if (!fanout_find_new_id(sk, &id)) { 1675 err = -ENOMEM; 1676 goto out; 1677 } 1678 /* ephemeral flag for the first socket in the group: drop it */ 1679 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); 1680 } 1681 1682 match = NULL; 1683 list_for_each_entry(f, &fanout_list, list) { 1684 if (f->id == id && 1685 read_pnet(&f->net) == sock_net(sk)) { 1686 match = f; 1687 break; 1688 } 1689 } 1690 err = -EINVAL; 1691 if (match && match->flags != flags) 1692 goto out; 1693 if (!match) { 1694 err = -ENOMEM; 1695 match = kzalloc(sizeof(*match), GFP_KERNEL); 1696 if (!match) 1697 goto out; 1698 write_pnet(&match->net, sock_net(sk)); 1699 match->id = id; 1700 match->type = type; 1701 match->flags = flags; 1702 INIT_LIST_HEAD(&match->list); 1703 spin_lock_init(&match->lock); 1704 refcount_set(&match->sk_ref, 0); 1705 fanout_init_data(match); 1706 match->prot_hook.type = po->prot_hook.type; 1707 match->prot_hook.dev = po->prot_hook.dev; 1708 match->prot_hook.func = packet_rcv_fanout; 1709 match->prot_hook.af_packet_priv = match; 1710 match->prot_hook.id_match = match_fanout_group; 1711 list_add(&match->list, &fanout_list); 1712 } 1713 err = -EINVAL; 1714 1715 spin_lock(&po->bind_lock); 1716 if (po->running && 1717 match->type == type && 1718 match->prot_hook.type == po->prot_hook.type && 1719 match->prot_hook.dev == po->prot_hook.dev) { 1720 err = -ENOSPC; 1721 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 1722 __dev_remove_pack(&po->prot_hook); 1723 po->fanout = match; 1724 po->rollover = rollover; 1725 rollover = NULL; 1726 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1727 __fanout_link(sk, po); 1728 err = 0; 1729 } 1730 } 1731 spin_unlock(&po->bind_lock); 1732 1733 if (err && !refcount_read(&match->sk_ref)) { 1734 list_del(&match->list); 1735 kfree(match); 1736 } 1737 1738 out: 1739 kfree(rollover); 1740 mutex_unlock(&fanout_mutex); 1741 return err; 1742 } 1743 1744 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes 1745 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. 1746 * It is the responsibility of the caller to call fanout_release_data() and 1747 * free the returned packet_fanout (after synchronize_net()) 1748 */ 1749 static struct packet_fanout *fanout_release(struct sock *sk) 1750 { 1751 struct packet_sock *po = pkt_sk(sk); 1752 struct packet_fanout *f; 1753 1754 mutex_lock(&fanout_mutex); 1755 f = po->fanout; 1756 if (f) { 1757 po->fanout = NULL; 1758 1759 if (refcount_dec_and_test(&f->sk_ref)) 1760 list_del(&f->list); 1761 else 1762 f = NULL; 1763 } 1764 mutex_unlock(&fanout_mutex); 1765 1766 return f; 1767 } 1768 1769 static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1770 struct sk_buff *skb) 1771 { 1772 /* Earlier code assumed this would be a VLAN pkt, double-check 1773 * this now that we have the actual packet in hand. We can only 1774 * do this check on Ethernet devices. 1775 */ 1776 if (unlikely(dev->type != ARPHRD_ETHER)) 1777 return false; 1778 1779 skb_reset_mac_header(skb); 1780 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); 1781 } 1782 1783 static const struct proto_ops packet_ops; 1784 1785 static const struct proto_ops packet_ops_spkt; 1786 1787 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1788 struct packet_type *pt, struct net_device *orig_dev) 1789 { 1790 struct sock *sk; 1791 struct sockaddr_pkt *spkt; 1792 1793 /* 1794 * When we registered the protocol we saved the socket in the data 1795 * field for just this event. 1796 */ 1797 1798 sk = pt->af_packet_priv; 1799 1800 /* 1801 * Yank back the headers [hope the device set this 1802 * right or kerboom...] 1803 * 1804 * Incoming packets have ll header pulled, 1805 * push it back. 1806 * 1807 * For outgoing ones skb->data == skb_mac_header(skb) 1808 * so that this procedure is noop. 1809 */ 1810 1811 if (skb->pkt_type == PACKET_LOOPBACK) 1812 goto out; 1813 1814 if (!net_eq(dev_net(dev), sock_net(sk))) 1815 goto out; 1816 1817 skb = skb_share_check(skb, GFP_ATOMIC); 1818 if (skb == NULL) 1819 goto oom; 1820 1821 /* drop any routing info */ 1822 skb_dst_drop(skb); 1823 1824 /* drop conntrack reference */ 1825 nf_reset(skb); 1826 1827 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1828 1829 skb_push(skb, skb->data - skb_mac_header(skb)); 1830 1831 /* 1832 * The SOCK_PACKET socket receives _all_ frames. 1833 */ 1834 1835 spkt->spkt_family = dev->type; 1836 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1837 spkt->spkt_protocol = skb->protocol; 1838 1839 /* 1840 * Charge the memory to the socket. This is done specifically 1841 * to prevent sockets using all the memory up. 1842 */ 1843 1844 if (sock_queue_rcv_skb(sk, skb) == 0) 1845 return 0; 1846 1847 out: 1848 kfree_skb(skb); 1849 oom: 1850 return 0; 1851 } 1852 1853 1854 /* 1855 * Output a raw packet to a device layer. This bypasses all the other 1856 * protocol layers and you must therefore supply it with a complete frame 1857 */ 1858 1859 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, 1860 size_t len) 1861 { 1862 struct sock *sk = sock->sk; 1863 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1864 struct sk_buff *skb = NULL; 1865 struct net_device *dev; 1866 struct sockcm_cookie sockc; 1867 __be16 proto = 0; 1868 int err; 1869 int extra_len = 0; 1870 1871 /* 1872 * Get and verify the address. 1873 */ 1874 1875 if (saddr) { 1876 if (msg->msg_namelen < sizeof(struct sockaddr)) 1877 return -EINVAL; 1878 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1879 proto = saddr->spkt_protocol; 1880 } else 1881 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1882 1883 /* 1884 * Find the device first to size check it 1885 */ 1886 1887 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1888 retry: 1889 rcu_read_lock(); 1890 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1891 err = -ENODEV; 1892 if (dev == NULL) 1893 goto out_unlock; 1894 1895 err = -ENETDOWN; 1896 if (!(dev->flags & IFF_UP)) 1897 goto out_unlock; 1898 1899 /* 1900 * You may not queue a frame bigger than the mtu. This is the lowest level 1901 * raw protocol and you must do your own fragmentation at this level. 1902 */ 1903 1904 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1905 if (!netif_supports_nofcs(dev)) { 1906 err = -EPROTONOSUPPORT; 1907 goto out_unlock; 1908 } 1909 extra_len = 4; /* We're doing our own CRC */ 1910 } 1911 1912 err = -EMSGSIZE; 1913 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 1914 goto out_unlock; 1915 1916 if (!skb) { 1917 size_t reserved = LL_RESERVED_SPACE(dev); 1918 int tlen = dev->needed_tailroom; 1919 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 1920 1921 rcu_read_unlock(); 1922 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 1923 if (skb == NULL) 1924 return -ENOBUFS; 1925 /* FIXME: Save some space for broken drivers that write a hard 1926 * header at transmission time by themselves. PPP is the notable 1927 * one here. This should really be fixed at the driver level. 1928 */ 1929 skb_reserve(skb, reserved); 1930 skb_reset_network_header(skb); 1931 1932 /* Try to align data part correctly */ 1933 if (hhlen) { 1934 skb->data -= hhlen; 1935 skb->tail -= hhlen; 1936 if (len < hhlen) 1937 skb_reset_network_header(skb); 1938 } 1939 err = memcpy_from_msg(skb_put(skb, len), msg, len); 1940 if (err) 1941 goto out_free; 1942 goto retry; 1943 } 1944 1945 if (!dev_validate_header(dev, skb->data, len)) { 1946 err = -EINVAL; 1947 goto out_unlock; 1948 } 1949 if (len > (dev->mtu + dev->hard_header_len + extra_len) && 1950 !packet_extra_vlan_len_allowed(dev, skb)) { 1951 err = -EMSGSIZE; 1952 goto out_unlock; 1953 } 1954 1955 sockcm_init(&sockc, sk); 1956 if (msg->msg_controllen) { 1957 err = sock_cmsg_send(sk, msg, &sockc); 1958 if (unlikely(err)) 1959 goto out_unlock; 1960 } 1961 1962 skb->protocol = proto; 1963 skb->dev = dev; 1964 skb->priority = sk->sk_priority; 1965 skb->mark = sk->sk_mark; 1966 skb->tstamp = sockc.transmit_time; 1967 1968 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); 1969 1970 if (unlikely(extra_len == 4)) 1971 skb->no_fcs = 1; 1972 1973 skb_probe_transport_header(skb, 0); 1974 1975 dev_queue_xmit(skb); 1976 rcu_read_unlock(); 1977 return len; 1978 1979 out_unlock: 1980 rcu_read_unlock(); 1981 out_free: 1982 kfree_skb(skb); 1983 return err; 1984 } 1985 1986 static unsigned int run_filter(struct sk_buff *skb, 1987 const struct sock *sk, 1988 unsigned int res) 1989 { 1990 struct sk_filter *filter; 1991 1992 rcu_read_lock(); 1993 filter = rcu_dereference(sk->sk_filter); 1994 if (filter != NULL) 1995 res = bpf_prog_run_clear_cb(filter->prog, skb); 1996 rcu_read_unlock(); 1997 1998 return res; 1999 } 2000 2001 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, 2002 size_t *len) 2003 { 2004 struct virtio_net_hdr vnet_hdr; 2005 2006 if (*len < sizeof(vnet_hdr)) 2007 return -EINVAL; 2008 *len -= sizeof(vnet_hdr); 2009 2010 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0)) 2011 return -EINVAL; 2012 2013 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); 2014 } 2015 2016 /* 2017 * This function makes lazy skb cloning in hope that most of packets 2018 * are discarded by BPF. 2019 * 2020 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 2021 * and skb->cb are mangled. It works because (and until) packets 2022 * falling here are owned by current CPU. Output packets are cloned 2023 * by dev_queue_xmit_nit(), input packets are processed by net_bh 2024 * sequencially, so that if we return skb to original state on exit, 2025 * we will not harm anyone. 2026 */ 2027 2028 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 2029 struct packet_type *pt, struct net_device *orig_dev) 2030 { 2031 struct sock *sk; 2032 struct sockaddr_ll *sll; 2033 struct packet_sock *po; 2034 u8 *skb_head = skb->data; 2035 int skb_len = skb->len; 2036 unsigned int snaplen, res; 2037 bool is_drop_n_account = false; 2038 2039 if (skb->pkt_type == PACKET_LOOPBACK) 2040 goto drop; 2041 2042 sk = pt->af_packet_priv; 2043 po = pkt_sk(sk); 2044 2045 if (!net_eq(dev_net(dev), sock_net(sk))) 2046 goto drop; 2047 2048 skb->dev = dev; 2049 2050 if (dev->header_ops) { 2051 /* The device has an explicit notion of ll header, 2052 * exported to higher levels. 2053 * 2054 * Otherwise, the device hides details of its frame 2055 * structure, so that corresponding packet head is 2056 * never delivered to user. 2057 */ 2058 if (sk->sk_type != SOCK_DGRAM) 2059 skb_push(skb, skb->data - skb_mac_header(skb)); 2060 else if (skb->pkt_type == PACKET_OUTGOING) { 2061 /* Special case: outgoing packets have ll header at head */ 2062 skb_pull(skb, skb_network_offset(skb)); 2063 } 2064 } 2065 2066 snaplen = skb->len; 2067 2068 res = run_filter(skb, sk, snaplen); 2069 if (!res) 2070 goto drop_n_restore; 2071 if (snaplen > res) 2072 snaplen = res; 2073 2074 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2075 goto drop_n_acct; 2076 2077 if (skb_shared(skb)) { 2078 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2079 if (nskb == NULL) 2080 goto drop_n_acct; 2081 2082 if (skb_head != skb->data) { 2083 skb->data = skb_head; 2084 skb->len = skb_len; 2085 } 2086 consume_skb(skb); 2087 skb = nskb; 2088 } 2089 2090 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); 2091 2092 sll = &PACKET_SKB_CB(skb)->sa.ll; 2093 sll->sll_hatype = dev->type; 2094 sll->sll_pkttype = skb->pkt_type; 2095 if (unlikely(po->origdev)) 2096 sll->sll_ifindex = orig_dev->ifindex; 2097 else 2098 sll->sll_ifindex = dev->ifindex; 2099 2100 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2101 2102 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). 2103 * Use their space for storing the original skb length. 2104 */ 2105 PACKET_SKB_CB(skb)->sa.origlen = skb->len; 2106 2107 if (pskb_trim(skb, snaplen)) 2108 goto drop_n_acct; 2109 2110 skb_set_owner_r(skb, sk); 2111 skb->dev = NULL; 2112 skb_dst_drop(skb); 2113 2114 /* drop conntrack reference */ 2115 nf_reset(skb); 2116 2117 spin_lock(&sk->sk_receive_queue.lock); 2118 po->stats.stats1.tp_packets++; 2119 sock_skb_set_dropcount(sk, skb); 2120 __skb_queue_tail(&sk->sk_receive_queue, skb); 2121 spin_unlock(&sk->sk_receive_queue.lock); 2122 sk->sk_data_ready(sk); 2123 return 0; 2124 2125 drop_n_acct: 2126 is_drop_n_account = true; 2127 spin_lock(&sk->sk_receive_queue.lock); 2128 po->stats.stats1.tp_drops++; 2129 atomic_inc(&sk->sk_drops); 2130 spin_unlock(&sk->sk_receive_queue.lock); 2131 2132 drop_n_restore: 2133 if (skb_head != skb->data && skb_shared(skb)) { 2134 skb->data = skb_head; 2135 skb->len = skb_len; 2136 } 2137 drop: 2138 if (!is_drop_n_account) 2139 consume_skb(skb); 2140 else 2141 kfree_skb(skb); 2142 return 0; 2143 } 2144 2145 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 2146 struct packet_type *pt, struct net_device *orig_dev) 2147 { 2148 struct sock *sk; 2149 struct packet_sock *po; 2150 struct sockaddr_ll *sll; 2151 union tpacket_uhdr h; 2152 u8 *skb_head = skb->data; 2153 int skb_len = skb->len; 2154 unsigned int snaplen, res; 2155 unsigned long status = TP_STATUS_USER; 2156 unsigned short macoff, netoff, hdrlen; 2157 struct sk_buff *copy_skb = NULL; 2158 struct timespec ts; 2159 __u32 ts_status; 2160 bool is_drop_n_account = false; 2161 bool do_vnet = false; 2162 2163 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2164 * We may add members to them until current aligned size without forcing 2165 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 2166 */ 2167 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 2168 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 2169 2170 if (skb->pkt_type == PACKET_LOOPBACK) 2171 goto drop; 2172 2173 sk = pt->af_packet_priv; 2174 po = pkt_sk(sk); 2175 2176 if (!net_eq(dev_net(dev), sock_net(sk))) 2177 goto drop; 2178 2179 if (dev->header_ops) { 2180 if (sk->sk_type != SOCK_DGRAM) 2181 skb_push(skb, skb->data - skb_mac_header(skb)); 2182 else if (skb->pkt_type == PACKET_OUTGOING) { 2183 /* Special case: outgoing packets have ll header at head */ 2184 skb_pull(skb, skb_network_offset(skb)); 2185 } 2186 } 2187 2188 snaplen = skb->len; 2189 2190 res = run_filter(skb, sk, snaplen); 2191 if (!res) 2192 goto drop_n_restore; 2193 2194 if (skb->ip_summed == CHECKSUM_PARTIAL) 2195 status |= TP_STATUS_CSUMNOTREADY; 2196 else if (skb->pkt_type != PACKET_OUTGOING && 2197 (skb->ip_summed == CHECKSUM_COMPLETE || 2198 skb_csum_unnecessary(skb))) 2199 status |= TP_STATUS_CSUM_VALID; 2200 2201 if (snaplen > res) 2202 snaplen = res; 2203 2204 if (sk->sk_type == SOCK_DGRAM) { 2205 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 2206 po->tp_reserve; 2207 } else { 2208 unsigned int maclen = skb_network_offset(skb); 2209 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2210 (maclen < 16 ? 16 : maclen)) + 2211 po->tp_reserve; 2212 if (po->has_vnet_hdr) { 2213 netoff += sizeof(struct virtio_net_hdr); 2214 do_vnet = true; 2215 } 2216 macoff = netoff - maclen; 2217 } 2218 if (po->tp_version <= TPACKET_V2) { 2219 if (macoff + snaplen > po->rx_ring.frame_size) { 2220 if (po->copy_thresh && 2221 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 2222 if (skb_shared(skb)) { 2223 copy_skb = skb_clone(skb, GFP_ATOMIC); 2224 } else { 2225 copy_skb = skb_get(skb); 2226 skb_head = skb->data; 2227 } 2228 if (copy_skb) 2229 skb_set_owner_r(copy_skb, sk); 2230 } 2231 snaplen = po->rx_ring.frame_size - macoff; 2232 if ((int)snaplen < 0) { 2233 snaplen = 0; 2234 do_vnet = false; 2235 } 2236 } 2237 } else if (unlikely(macoff + snaplen > 2238 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2239 u32 nval; 2240 2241 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 2242 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 2243 snaplen, nval, macoff); 2244 snaplen = nval; 2245 if (unlikely((int)snaplen < 0)) { 2246 snaplen = 0; 2247 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2248 do_vnet = false; 2249 } 2250 } 2251 spin_lock(&sk->sk_receive_queue.lock); 2252 h.raw = packet_current_rx_frame(po, skb, 2253 TP_STATUS_KERNEL, (macoff+snaplen)); 2254 if (!h.raw) 2255 goto drop_n_account; 2256 if (po->tp_version <= TPACKET_V2) { 2257 packet_increment_rx_head(po, &po->rx_ring); 2258 /* 2259 * LOSING will be reported till you read the stats, 2260 * because it's COR - Clear On Read. 2261 * Anyways, moving it for V1/V2 only as V3 doesn't need this 2262 * at packet level. 2263 */ 2264 if (po->stats.stats1.tp_drops) 2265 status |= TP_STATUS_LOSING; 2266 } 2267 2268 if (do_vnet && 2269 virtio_net_hdr_from_skb(skb, h.raw + macoff - 2270 sizeof(struct virtio_net_hdr), 2271 vio_le(), true, 0)) 2272 goto drop_n_account; 2273 2274 po->stats.stats1.tp_packets++; 2275 if (copy_skb) { 2276 status |= TP_STATUS_COPY; 2277 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 2278 } 2279 spin_unlock(&sk->sk_receive_queue.lock); 2280 2281 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 2282 2283 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 2284 getnstimeofday(&ts); 2285 2286 status |= ts_status; 2287 2288 switch (po->tp_version) { 2289 case TPACKET_V1: 2290 h.h1->tp_len = skb->len; 2291 h.h1->tp_snaplen = snaplen; 2292 h.h1->tp_mac = macoff; 2293 h.h1->tp_net = netoff; 2294 h.h1->tp_sec = ts.tv_sec; 2295 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 2296 hdrlen = sizeof(*h.h1); 2297 break; 2298 case TPACKET_V2: 2299 h.h2->tp_len = skb->len; 2300 h.h2->tp_snaplen = snaplen; 2301 h.h2->tp_mac = macoff; 2302 h.h2->tp_net = netoff; 2303 h.h2->tp_sec = ts.tv_sec; 2304 h.h2->tp_nsec = ts.tv_nsec; 2305 if (skb_vlan_tag_present(skb)) { 2306 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2307 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2308 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2309 } else { 2310 h.h2->tp_vlan_tci = 0; 2311 h.h2->tp_vlan_tpid = 0; 2312 } 2313 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2314 hdrlen = sizeof(*h.h2); 2315 break; 2316 case TPACKET_V3: 2317 /* tp_nxt_offset,vlan are already populated above. 2318 * So DONT clear those fields here 2319 */ 2320 h.h3->tp_status |= status; 2321 h.h3->tp_len = skb->len; 2322 h.h3->tp_snaplen = snaplen; 2323 h.h3->tp_mac = macoff; 2324 h.h3->tp_net = netoff; 2325 h.h3->tp_sec = ts.tv_sec; 2326 h.h3->tp_nsec = ts.tv_nsec; 2327 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2328 hdrlen = sizeof(*h.h3); 2329 break; 2330 default: 2331 BUG(); 2332 } 2333 2334 sll = h.raw + TPACKET_ALIGN(hdrlen); 2335 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2336 sll->sll_family = AF_PACKET; 2337 sll->sll_hatype = dev->type; 2338 sll->sll_protocol = skb->protocol; 2339 sll->sll_pkttype = skb->pkt_type; 2340 if (unlikely(po->origdev)) 2341 sll->sll_ifindex = orig_dev->ifindex; 2342 else 2343 sll->sll_ifindex = dev->ifindex; 2344 2345 smp_mb(); 2346 2347 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2348 if (po->tp_version <= TPACKET_V2) { 2349 u8 *start, *end; 2350 2351 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2352 macoff + snaplen); 2353 2354 for (start = h.raw; start < end; start += PAGE_SIZE) 2355 flush_dcache_page(pgv_to_page(start)); 2356 } 2357 smp_wmb(); 2358 #endif 2359 2360 if (po->tp_version <= TPACKET_V2) { 2361 __packet_set_status(po, h.raw, status); 2362 sk->sk_data_ready(sk); 2363 } else { 2364 prb_clear_blk_fill_status(&po->rx_ring); 2365 } 2366 2367 drop_n_restore: 2368 if (skb_head != skb->data && skb_shared(skb)) { 2369 skb->data = skb_head; 2370 skb->len = skb_len; 2371 } 2372 drop: 2373 if (!is_drop_n_account) 2374 consume_skb(skb); 2375 else 2376 kfree_skb(skb); 2377 return 0; 2378 2379 drop_n_account: 2380 is_drop_n_account = true; 2381 po->stats.stats1.tp_drops++; 2382 spin_unlock(&sk->sk_receive_queue.lock); 2383 2384 sk->sk_data_ready(sk); 2385 kfree_skb(copy_skb); 2386 goto drop_n_restore; 2387 } 2388 2389 static void tpacket_destruct_skb(struct sk_buff *skb) 2390 { 2391 struct packet_sock *po = pkt_sk(skb->sk); 2392 2393 if (likely(po->tx_ring.pg_vec)) { 2394 void *ph; 2395 __u32 ts; 2396 2397 ph = skb_shinfo(skb)->destructor_arg; 2398 packet_dec_pending(&po->tx_ring); 2399 2400 ts = __packet_set_timestamp(po, ph, skb); 2401 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2402 } 2403 2404 sock_wfree(skb); 2405 } 2406 2407 static void tpacket_set_protocol(const struct net_device *dev, 2408 struct sk_buff *skb) 2409 { 2410 if (dev->type == ARPHRD_ETHER) { 2411 skb_reset_mac_header(skb); 2412 skb->protocol = eth_hdr(skb)->h_proto; 2413 } 2414 } 2415 2416 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) 2417 { 2418 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2419 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2420 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > 2421 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) 2422 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), 2423 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2424 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); 2425 2426 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) 2427 return -EINVAL; 2428 2429 return 0; 2430 } 2431 2432 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, 2433 struct virtio_net_hdr *vnet_hdr) 2434 { 2435 if (*len < sizeof(*vnet_hdr)) 2436 return -EINVAL; 2437 *len -= sizeof(*vnet_hdr); 2438 2439 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) 2440 return -EFAULT; 2441 2442 return __packet_snd_vnet_parse(vnet_hdr, *len); 2443 } 2444 2445 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2446 void *frame, struct net_device *dev, void *data, int tp_len, 2447 __be16 proto, unsigned char *addr, int hlen, int copylen, 2448 const struct sockcm_cookie *sockc) 2449 { 2450 union tpacket_uhdr ph; 2451 int to_write, offset, len, nr_frags, len_max; 2452 struct socket *sock = po->sk.sk_socket; 2453 struct page *page; 2454 int err; 2455 2456 ph.raw = frame; 2457 2458 skb->protocol = proto; 2459 skb->dev = dev; 2460 skb->priority = po->sk.sk_priority; 2461 skb->mark = po->sk.sk_mark; 2462 skb->tstamp = sockc->transmit_time; 2463 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); 2464 skb_shinfo(skb)->destructor_arg = ph.raw; 2465 2466 skb_reserve(skb, hlen); 2467 skb_reset_network_header(skb); 2468 2469 to_write = tp_len; 2470 2471 if (sock->type == SOCK_DGRAM) { 2472 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2473 NULL, tp_len); 2474 if (unlikely(err < 0)) 2475 return -EINVAL; 2476 } else if (copylen) { 2477 int hdrlen = min_t(int, copylen, tp_len); 2478 2479 skb_push(skb, dev->hard_header_len); 2480 skb_put(skb, copylen - dev->hard_header_len); 2481 err = skb_store_bits(skb, 0, data, hdrlen); 2482 if (unlikely(err)) 2483 return err; 2484 if (!dev_validate_header(dev, skb->data, hdrlen)) 2485 return -EINVAL; 2486 if (!skb->protocol) 2487 tpacket_set_protocol(dev, skb); 2488 2489 data += hdrlen; 2490 to_write -= hdrlen; 2491 } 2492 2493 offset = offset_in_page(data); 2494 len_max = PAGE_SIZE - offset; 2495 len = ((to_write > len_max) ? len_max : to_write); 2496 2497 skb->data_len = to_write; 2498 skb->len += to_write; 2499 skb->truesize += to_write; 2500 refcount_add(to_write, &po->sk.sk_wmem_alloc); 2501 2502 while (likely(to_write)) { 2503 nr_frags = skb_shinfo(skb)->nr_frags; 2504 2505 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2506 pr_err("Packet exceed the number of skb frags(%lu)\n", 2507 MAX_SKB_FRAGS); 2508 return -EFAULT; 2509 } 2510 2511 page = pgv_to_page(data); 2512 data += len; 2513 flush_dcache_page(page); 2514 get_page(page); 2515 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2516 to_write -= len; 2517 offset = 0; 2518 len_max = PAGE_SIZE; 2519 len = ((to_write > len_max) ? len_max : to_write); 2520 } 2521 2522 skb_probe_transport_header(skb, 0); 2523 2524 return tp_len; 2525 } 2526 2527 static int tpacket_parse_header(struct packet_sock *po, void *frame, 2528 int size_max, void **data) 2529 { 2530 union tpacket_uhdr ph; 2531 int tp_len, off; 2532 2533 ph.raw = frame; 2534 2535 switch (po->tp_version) { 2536 case TPACKET_V3: 2537 if (ph.h3->tp_next_offset != 0) { 2538 pr_warn_once("variable sized slot not supported"); 2539 return -EINVAL; 2540 } 2541 tp_len = ph.h3->tp_len; 2542 break; 2543 case TPACKET_V2: 2544 tp_len = ph.h2->tp_len; 2545 break; 2546 default: 2547 tp_len = ph.h1->tp_len; 2548 break; 2549 } 2550 if (unlikely(tp_len > size_max)) { 2551 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2552 return -EMSGSIZE; 2553 } 2554 2555 if (unlikely(po->tp_tx_has_off)) { 2556 int off_min, off_max; 2557 2558 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2559 off_max = po->tx_ring.frame_size - tp_len; 2560 if (po->sk.sk_type == SOCK_DGRAM) { 2561 switch (po->tp_version) { 2562 case TPACKET_V3: 2563 off = ph.h3->tp_net; 2564 break; 2565 case TPACKET_V2: 2566 off = ph.h2->tp_net; 2567 break; 2568 default: 2569 off = ph.h1->tp_net; 2570 break; 2571 } 2572 } else { 2573 switch (po->tp_version) { 2574 case TPACKET_V3: 2575 off = ph.h3->tp_mac; 2576 break; 2577 case TPACKET_V2: 2578 off = ph.h2->tp_mac; 2579 break; 2580 default: 2581 off = ph.h1->tp_mac; 2582 break; 2583 } 2584 } 2585 if (unlikely((off < off_min) || (off_max < off))) 2586 return -EINVAL; 2587 } else { 2588 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2589 } 2590 2591 *data = frame + off; 2592 return tp_len; 2593 } 2594 2595 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2596 { 2597 struct sk_buff *skb; 2598 struct net_device *dev; 2599 struct virtio_net_hdr *vnet_hdr = NULL; 2600 struct sockcm_cookie sockc; 2601 __be16 proto; 2602 int err, reserve = 0; 2603 void *ph; 2604 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2605 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2606 int tp_len, size_max; 2607 unsigned char *addr; 2608 void *data; 2609 int len_sum = 0; 2610 int status = TP_STATUS_AVAILABLE; 2611 int hlen, tlen, copylen = 0; 2612 2613 mutex_lock(&po->pg_vec_lock); 2614 2615 if (likely(saddr == NULL)) { 2616 dev = packet_cached_dev_get(po); 2617 proto = po->num; 2618 addr = NULL; 2619 } else { 2620 err = -EINVAL; 2621 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2622 goto out; 2623 if (msg->msg_namelen < (saddr->sll_halen 2624 + offsetof(struct sockaddr_ll, 2625 sll_addr))) 2626 goto out; 2627 proto = saddr->sll_protocol; 2628 addr = saddr->sll_addr; 2629 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2630 } 2631 2632 err = -ENXIO; 2633 if (unlikely(dev == NULL)) 2634 goto out; 2635 err = -ENETDOWN; 2636 if (unlikely(!(dev->flags & IFF_UP))) 2637 goto out_put; 2638 2639 sockcm_init(&sockc, &po->sk); 2640 if (msg->msg_controllen) { 2641 err = sock_cmsg_send(&po->sk, msg, &sockc); 2642 if (unlikely(err)) 2643 goto out_put; 2644 } 2645 2646 if (po->sk.sk_socket->type == SOCK_RAW) 2647 reserve = dev->hard_header_len; 2648 size_max = po->tx_ring.frame_size 2649 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2650 2651 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) 2652 size_max = dev->mtu + reserve + VLAN_HLEN; 2653 2654 do { 2655 ph = packet_current_frame(po, &po->tx_ring, 2656 TP_STATUS_SEND_REQUEST); 2657 if (unlikely(ph == NULL)) { 2658 if (need_wait && need_resched()) 2659 schedule(); 2660 continue; 2661 } 2662 2663 skb = NULL; 2664 tp_len = tpacket_parse_header(po, ph, size_max, &data); 2665 if (tp_len < 0) 2666 goto tpacket_error; 2667 2668 status = TP_STATUS_SEND_REQUEST; 2669 hlen = LL_RESERVED_SPACE(dev); 2670 tlen = dev->needed_tailroom; 2671 if (po->has_vnet_hdr) { 2672 vnet_hdr = data; 2673 data += sizeof(*vnet_hdr); 2674 tp_len -= sizeof(*vnet_hdr); 2675 if (tp_len < 0 || 2676 __packet_snd_vnet_parse(vnet_hdr, tp_len)) { 2677 tp_len = -EINVAL; 2678 goto tpacket_error; 2679 } 2680 copylen = __virtio16_to_cpu(vio_le(), 2681 vnet_hdr->hdr_len); 2682 } 2683 copylen = max_t(int, copylen, dev->hard_header_len); 2684 skb = sock_alloc_send_skb(&po->sk, 2685 hlen + tlen + sizeof(struct sockaddr_ll) + 2686 (copylen - dev->hard_header_len), 2687 !need_wait, &err); 2688 2689 if (unlikely(skb == NULL)) { 2690 /* we assume the socket was initially writeable ... */ 2691 if (likely(len_sum > 0)) 2692 err = len_sum; 2693 goto out_status; 2694 } 2695 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, 2696 addr, hlen, copylen, &sockc); 2697 if (likely(tp_len >= 0) && 2698 tp_len > dev->mtu + reserve && 2699 !po->has_vnet_hdr && 2700 !packet_extra_vlan_len_allowed(dev, skb)) 2701 tp_len = -EMSGSIZE; 2702 2703 if (unlikely(tp_len < 0)) { 2704 tpacket_error: 2705 if (po->tp_loss) { 2706 __packet_set_status(po, ph, 2707 TP_STATUS_AVAILABLE); 2708 packet_increment_head(&po->tx_ring); 2709 kfree_skb(skb); 2710 continue; 2711 } else { 2712 status = TP_STATUS_WRONG_FORMAT; 2713 err = tp_len; 2714 goto out_status; 2715 } 2716 } 2717 2718 if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, 2719 vio_le())) { 2720 tp_len = -EINVAL; 2721 goto tpacket_error; 2722 } 2723 2724 skb->destructor = tpacket_destruct_skb; 2725 __packet_set_status(po, ph, TP_STATUS_SENDING); 2726 packet_inc_pending(&po->tx_ring); 2727 2728 status = TP_STATUS_SEND_REQUEST; 2729 err = po->xmit(skb); 2730 if (unlikely(err > 0)) { 2731 err = net_xmit_errno(err); 2732 if (err && __packet_get_status(po, ph) == 2733 TP_STATUS_AVAILABLE) { 2734 /* skb was destructed already */ 2735 skb = NULL; 2736 goto out_status; 2737 } 2738 /* 2739 * skb was dropped but not destructed yet; 2740 * let's treat it like congestion or err < 0 2741 */ 2742 err = 0; 2743 } 2744 packet_increment_head(&po->tx_ring); 2745 len_sum += tp_len; 2746 } while (likely((ph != NULL) || 2747 /* Note: packet_read_pending() might be slow if we have 2748 * to call it as it's per_cpu variable, but in fast-path 2749 * we already short-circuit the loop with the first 2750 * condition, and luckily don't have to go that path 2751 * anyway. 2752 */ 2753 (need_wait && packet_read_pending(&po->tx_ring)))); 2754 2755 err = len_sum; 2756 goto out_put; 2757 2758 out_status: 2759 __packet_set_status(po, ph, status); 2760 kfree_skb(skb); 2761 out_put: 2762 dev_put(dev); 2763 out: 2764 mutex_unlock(&po->pg_vec_lock); 2765 return err; 2766 } 2767 2768 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2769 size_t reserve, size_t len, 2770 size_t linear, int noblock, 2771 int *err) 2772 { 2773 struct sk_buff *skb; 2774 2775 /* Under a page? Don't bother with paged skb. */ 2776 if (prepad + len < PAGE_SIZE || !linear) 2777 linear = len; 2778 2779 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2780 err, 0); 2781 if (!skb) 2782 return NULL; 2783 2784 skb_reserve(skb, reserve); 2785 skb_put(skb, linear); 2786 skb->data_len = len - linear; 2787 skb->len += len - linear; 2788 2789 return skb; 2790 } 2791 2792 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2793 { 2794 struct sock *sk = sock->sk; 2795 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2796 struct sk_buff *skb; 2797 struct net_device *dev; 2798 __be16 proto; 2799 unsigned char *addr; 2800 int err, reserve = 0; 2801 struct sockcm_cookie sockc; 2802 struct virtio_net_hdr vnet_hdr = { 0 }; 2803 int offset = 0; 2804 struct packet_sock *po = pkt_sk(sk); 2805 bool has_vnet_hdr = false; 2806 int hlen, tlen, linear; 2807 int extra_len = 0; 2808 2809 /* 2810 * Get and verify the address. 2811 */ 2812 2813 if (likely(saddr == NULL)) { 2814 dev = packet_cached_dev_get(po); 2815 proto = po->num; 2816 addr = NULL; 2817 } else { 2818 err = -EINVAL; 2819 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2820 goto out; 2821 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2822 goto out; 2823 proto = saddr->sll_protocol; 2824 addr = saddr->sll_addr; 2825 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2826 } 2827 2828 err = -ENXIO; 2829 if (unlikely(dev == NULL)) 2830 goto out_unlock; 2831 err = -ENETDOWN; 2832 if (unlikely(!(dev->flags & IFF_UP))) 2833 goto out_unlock; 2834 2835 sockcm_init(&sockc, sk); 2836 sockc.mark = sk->sk_mark; 2837 if (msg->msg_controllen) { 2838 err = sock_cmsg_send(sk, msg, &sockc); 2839 if (unlikely(err)) 2840 goto out_unlock; 2841 } 2842 2843 if (sock->type == SOCK_RAW) 2844 reserve = dev->hard_header_len; 2845 if (po->has_vnet_hdr) { 2846 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); 2847 if (err) 2848 goto out_unlock; 2849 has_vnet_hdr = true; 2850 } 2851 2852 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 2853 if (!netif_supports_nofcs(dev)) { 2854 err = -EPROTONOSUPPORT; 2855 goto out_unlock; 2856 } 2857 extra_len = 4; /* We're doing our own CRC */ 2858 } 2859 2860 err = -EMSGSIZE; 2861 if (!vnet_hdr.gso_type && 2862 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 2863 goto out_unlock; 2864 2865 err = -ENOBUFS; 2866 hlen = LL_RESERVED_SPACE(dev); 2867 tlen = dev->needed_tailroom; 2868 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 2869 linear = max(linear, min_t(int, len, dev->hard_header_len)); 2870 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 2871 msg->msg_flags & MSG_DONTWAIT, &err); 2872 if (skb == NULL) 2873 goto out_unlock; 2874 2875 skb_reset_network_header(skb); 2876 2877 err = -EINVAL; 2878 if (sock->type == SOCK_DGRAM) { 2879 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2880 if (unlikely(offset < 0)) 2881 goto out_free; 2882 } else if (reserve) { 2883 skb_reserve(skb, -reserve); 2884 if (len < reserve) 2885 skb_reset_network_header(skb); 2886 } 2887 2888 /* Returns -EFAULT on error */ 2889 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 2890 if (err) 2891 goto out_free; 2892 2893 if (sock->type == SOCK_RAW && 2894 !dev_validate_header(dev, skb->data, len)) { 2895 err = -EINVAL; 2896 goto out_free; 2897 } 2898 2899 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); 2900 2901 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 2902 !packet_extra_vlan_len_allowed(dev, skb)) { 2903 err = -EMSGSIZE; 2904 goto out_free; 2905 } 2906 2907 skb->protocol = proto; 2908 skb->dev = dev; 2909 skb->priority = sk->sk_priority; 2910 skb->mark = sockc.mark; 2911 skb->tstamp = sockc.transmit_time; 2912 2913 if (has_vnet_hdr) { 2914 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 2915 if (err) 2916 goto out_free; 2917 len += sizeof(vnet_hdr); 2918 } 2919 2920 skb_probe_transport_header(skb, reserve); 2921 2922 if (unlikely(extra_len == 4)) 2923 skb->no_fcs = 1; 2924 2925 err = po->xmit(skb); 2926 if (err > 0 && (err = net_xmit_errno(err)) != 0) 2927 goto out_unlock; 2928 2929 dev_put(dev); 2930 2931 return len; 2932 2933 out_free: 2934 kfree_skb(skb); 2935 out_unlock: 2936 if (dev) 2937 dev_put(dev); 2938 out: 2939 return err; 2940 } 2941 2942 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 2943 { 2944 struct sock *sk = sock->sk; 2945 struct packet_sock *po = pkt_sk(sk); 2946 2947 if (po->tx_ring.pg_vec) 2948 return tpacket_snd(po, msg); 2949 else 2950 return packet_snd(sock, msg, len); 2951 } 2952 2953 /* 2954 * Close a PACKET socket. This is fairly simple. We immediately go 2955 * to 'closed' state and remove our protocol entry in the device list. 2956 */ 2957 2958 static int packet_release(struct socket *sock) 2959 { 2960 struct sock *sk = sock->sk; 2961 struct packet_sock *po; 2962 struct packet_fanout *f; 2963 struct net *net; 2964 union tpacket_req_u req_u; 2965 2966 if (!sk) 2967 return 0; 2968 2969 net = sock_net(sk); 2970 po = pkt_sk(sk); 2971 2972 mutex_lock(&net->packet.sklist_lock); 2973 sk_del_node_init_rcu(sk); 2974 mutex_unlock(&net->packet.sklist_lock); 2975 2976 preempt_disable(); 2977 sock_prot_inuse_add(net, sk->sk_prot, -1); 2978 preempt_enable(); 2979 2980 spin_lock(&po->bind_lock); 2981 unregister_prot_hook(sk, false); 2982 packet_cached_dev_reset(po); 2983 2984 if (po->prot_hook.dev) { 2985 dev_put(po->prot_hook.dev); 2986 po->prot_hook.dev = NULL; 2987 } 2988 spin_unlock(&po->bind_lock); 2989 2990 packet_flush_mclist(sk); 2991 2992 lock_sock(sk); 2993 if (po->rx_ring.pg_vec) { 2994 memset(&req_u, 0, sizeof(req_u)); 2995 packet_set_ring(sk, &req_u, 1, 0); 2996 } 2997 2998 if (po->tx_ring.pg_vec) { 2999 memset(&req_u, 0, sizeof(req_u)); 3000 packet_set_ring(sk, &req_u, 1, 1); 3001 } 3002 release_sock(sk); 3003 3004 f = fanout_release(sk); 3005 3006 synchronize_net(); 3007 3008 if (f) { 3009 kfree(po->rollover); 3010 fanout_release_data(f); 3011 kfree(f); 3012 } 3013 /* 3014 * Now the socket is dead. No more input will appear. 3015 */ 3016 sock_orphan(sk); 3017 sock->sk = NULL; 3018 3019 /* Purge queues */ 3020 3021 skb_queue_purge(&sk->sk_receive_queue); 3022 packet_free_pending(po); 3023 sk_refcnt_debug_release(sk); 3024 3025 sock_put(sk); 3026 return 0; 3027 } 3028 3029 /* 3030 * Attach a packet hook. 3031 */ 3032 3033 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 3034 __be16 proto) 3035 { 3036 struct packet_sock *po = pkt_sk(sk); 3037 struct net_device *dev_curr; 3038 __be16 proto_curr; 3039 bool need_rehook; 3040 struct net_device *dev = NULL; 3041 int ret = 0; 3042 bool unlisted = false; 3043 3044 lock_sock(sk); 3045 spin_lock(&po->bind_lock); 3046 rcu_read_lock(); 3047 3048 if (po->fanout) { 3049 ret = -EINVAL; 3050 goto out_unlock; 3051 } 3052 3053 if (name) { 3054 dev = dev_get_by_name_rcu(sock_net(sk), name); 3055 if (!dev) { 3056 ret = -ENODEV; 3057 goto out_unlock; 3058 } 3059 } else if (ifindex) { 3060 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3061 if (!dev) { 3062 ret = -ENODEV; 3063 goto out_unlock; 3064 } 3065 } 3066 3067 if (dev) 3068 dev_hold(dev); 3069 3070 proto_curr = po->prot_hook.type; 3071 dev_curr = po->prot_hook.dev; 3072 3073 need_rehook = proto_curr != proto || dev_curr != dev; 3074 3075 if (need_rehook) { 3076 if (po->running) { 3077 rcu_read_unlock(); 3078 /* prevents packet_notifier() from calling 3079 * register_prot_hook() 3080 */ 3081 po->num = 0; 3082 __unregister_prot_hook(sk, true); 3083 rcu_read_lock(); 3084 dev_curr = po->prot_hook.dev; 3085 if (dev) 3086 unlisted = !dev_get_by_index_rcu(sock_net(sk), 3087 dev->ifindex); 3088 } 3089 3090 BUG_ON(po->running); 3091 po->num = proto; 3092 po->prot_hook.type = proto; 3093 3094 if (unlikely(unlisted)) { 3095 dev_put(dev); 3096 po->prot_hook.dev = NULL; 3097 po->ifindex = -1; 3098 packet_cached_dev_reset(po); 3099 } else { 3100 po->prot_hook.dev = dev; 3101 po->ifindex = dev ? dev->ifindex : 0; 3102 packet_cached_dev_assign(po, dev); 3103 } 3104 } 3105 if (dev_curr) 3106 dev_put(dev_curr); 3107 3108 if (proto == 0 || !need_rehook) 3109 goto out_unlock; 3110 3111 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 3112 register_prot_hook(sk); 3113 } else { 3114 sk->sk_err = ENETDOWN; 3115 if (!sock_flag(sk, SOCK_DEAD)) 3116 sk->sk_error_report(sk); 3117 } 3118 3119 out_unlock: 3120 rcu_read_unlock(); 3121 spin_unlock(&po->bind_lock); 3122 release_sock(sk); 3123 return ret; 3124 } 3125 3126 /* 3127 * Bind a packet socket to a device 3128 */ 3129 3130 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 3131 int addr_len) 3132 { 3133 struct sock *sk = sock->sk; 3134 char name[sizeof(uaddr->sa_data) + 1]; 3135 3136 /* 3137 * Check legality 3138 */ 3139 3140 if (addr_len != sizeof(struct sockaddr)) 3141 return -EINVAL; 3142 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be 3143 * zero-terminated. 3144 */ 3145 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); 3146 name[sizeof(uaddr->sa_data)] = 0; 3147 3148 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3149 } 3150 3151 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3152 { 3153 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3154 struct sock *sk = sock->sk; 3155 3156 /* 3157 * Check legality 3158 */ 3159 3160 if (addr_len < sizeof(struct sockaddr_ll)) 3161 return -EINVAL; 3162 if (sll->sll_family != AF_PACKET) 3163 return -EINVAL; 3164 3165 return packet_do_bind(sk, NULL, sll->sll_ifindex, 3166 sll->sll_protocol ? : pkt_sk(sk)->num); 3167 } 3168 3169 static struct proto packet_proto = { 3170 .name = "PACKET", 3171 .owner = THIS_MODULE, 3172 .obj_size = sizeof(struct packet_sock), 3173 }; 3174 3175 /* 3176 * Create a packet of type SOCK_PACKET. 3177 */ 3178 3179 static int packet_create(struct net *net, struct socket *sock, int protocol, 3180 int kern) 3181 { 3182 struct sock *sk; 3183 struct packet_sock *po; 3184 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 3185 int err; 3186 3187 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 3188 return -EPERM; 3189 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 3190 sock->type != SOCK_PACKET) 3191 return -ESOCKTNOSUPPORT; 3192 3193 sock->state = SS_UNCONNECTED; 3194 3195 err = -ENOBUFS; 3196 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); 3197 if (sk == NULL) 3198 goto out; 3199 3200 sock->ops = &packet_ops; 3201 if (sock->type == SOCK_PACKET) 3202 sock->ops = &packet_ops_spkt; 3203 3204 sock_init_data(sock, sk); 3205 3206 po = pkt_sk(sk); 3207 sk->sk_family = PF_PACKET; 3208 po->num = proto; 3209 po->xmit = dev_queue_xmit; 3210 3211 err = packet_alloc_pending(po); 3212 if (err) 3213 goto out2; 3214 3215 packet_cached_dev_reset(po); 3216 3217 sk->sk_destruct = packet_sock_destruct; 3218 sk_refcnt_debug_inc(sk); 3219 3220 /* 3221 * Attach a protocol block 3222 */ 3223 3224 spin_lock_init(&po->bind_lock); 3225 mutex_init(&po->pg_vec_lock); 3226 po->rollover = NULL; 3227 po->prot_hook.func = packet_rcv; 3228 3229 if (sock->type == SOCK_PACKET) 3230 po->prot_hook.func = packet_rcv_spkt; 3231 3232 po->prot_hook.af_packet_priv = sk; 3233 3234 if (proto) { 3235 po->prot_hook.type = proto; 3236 __register_prot_hook(sk); 3237 } 3238 3239 mutex_lock(&net->packet.sklist_lock); 3240 sk_add_node_rcu(sk, &net->packet.sklist); 3241 mutex_unlock(&net->packet.sklist_lock); 3242 3243 preempt_disable(); 3244 sock_prot_inuse_add(net, &packet_proto, 1); 3245 preempt_enable(); 3246 3247 return 0; 3248 out2: 3249 sk_free(sk); 3250 out: 3251 return err; 3252 } 3253 3254 /* 3255 * Pull a packet from our receive queue and hand it to the user. 3256 * If necessary we block. 3257 */ 3258 3259 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 3260 int flags) 3261 { 3262 struct sock *sk = sock->sk; 3263 struct sk_buff *skb; 3264 int copied, err; 3265 int vnet_hdr_len = 0; 3266 unsigned int origlen = 0; 3267 3268 err = -EINVAL; 3269 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 3270 goto out; 3271 3272 #if 0 3273 /* What error should we return now? EUNATTACH? */ 3274 if (pkt_sk(sk)->ifindex < 0) 3275 return -ENODEV; 3276 #endif 3277 3278 if (flags & MSG_ERRQUEUE) { 3279 err = sock_recv_errqueue(sk, msg, len, 3280 SOL_PACKET, PACKET_TX_TIMESTAMP); 3281 goto out; 3282 } 3283 3284 /* 3285 * Call the generic datagram receiver. This handles all sorts 3286 * of horrible races and re-entrancy so we can forget about it 3287 * in the protocol layers. 3288 * 3289 * Now it will return ENETDOWN, if device have just gone down, 3290 * but then it will block. 3291 */ 3292 3293 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); 3294 3295 /* 3296 * An error occurred so return it. Because skb_recv_datagram() 3297 * handles the blocking we don't see and worry about blocking 3298 * retries. 3299 */ 3300 3301 if (skb == NULL) 3302 goto out; 3303 3304 if (pkt_sk(sk)->pressure) 3305 packet_rcv_has_room(pkt_sk(sk), NULL); 3306 3307 if (pkt_sk(sk)->has_vnet_hdr) { 3308 err = packet_rcv_vnet(msg, skb, &len); 3309 if (err) 3310 goto out_free; 3311 vnet_hdr_len = sizeof(struct virtio_net_hdr); 3312 } 3313 3314 /* You lose any data beyond the buffer you gave. If it worries 3315 * a user program they can ask the device for its MTU 3316 * anyway. 3317 */ 3318 copied = skb->len; 3319 if (copied > len) { 3320 copied = len; 3321 msg->msg_flags |= MSG_TRUNC; 3322 } 3323 3324 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3325 if (err) 3326 goto out_free; 3327 3328 if (sock->type != SOCK_PACKET) { 3329 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3330 3331 /* Original length was stored in sockaddr_ll fields */ 3332 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3333 sll->sll_family = AF_PACKET; 3334 sll->sll_protocol = skb->protocol; 3335 } 3336 3337 sock_recv_ts_and_drops(msg, sk, skb); 3338 3339 if (msg->msg_name) { 3340 /* If the address length field is there to be filled 3341 * in, we fill it in now. 3342 */ 3343 if (sock->type == SOCK_PACKET) { 3344 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3345 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3346 } else { 3347 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3348 3349 msg->msg_namelen = sll->sll_halen + 3350 offsetof(struct sockaddr_ll, sll_addr); 3351 } 3352 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, 3353 msg->msg_namelen); 3354 } 3355 3356 if (pkt_sk(sk)->auxdata) { 3357 struct tpacket_auxdata aux; 3358 3359 aux.tp_status = TP_STATUS_USER; 3360 if (skb->ip_summed == CHECKSUM_PARTIAL) 3361 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3362 else if (skb->pkt_type != PACKET_OUTGOING && 3363 (skb->ip_summed == CHECKSUM_COMPLETE || 3364 skb_csum_unnecessary(skb))) 3365 aux.tp_status |= TP_STATUS_CSUM_VALID; 3366 3367 aux.tp_len = origlen; 3368 aux.tp_snaplen = skb->len; 3369 aux.tp_mac = 0; 3370 aux.tp_net = skb_network_offset(skb); 3371 if (skb_vlan_tag_present(skb)) { 3372 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3373 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3374 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3375 } else { 3376 aux.tp_vlan_tci = 0; 3377 aux.tp_vlan_tpid = 0; 3378 } 3379 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3380 } 3381 3382 /* 3383 * Free or return the buffer as appropriate. Again this 3384 * hides all the races and re-entrancy issues from us. 3385 */ 3386 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3387 3388 out_free: 3389 skb_free_datagram(sk, skb); 3390 out: 3391 return err; 3392 } 3393 3394 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3395 int peer) 3396 { 3397 struct net_device *dev; 3398 struct sock *sk = sock->sk; 3399 3400 if (peer) 3401 return -EOPNOTSUPP; 3402 3403 uaddr->sa_family = AF_PACKET; 3404 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); 3405 rcu_read_lock(); 3406 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 3407 if (dev) 3408 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); 3409 rcu_read_unlock(); 3410 3411 return sizeof(*uaddr); 3412 } 3413 3414 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3415 int peer) 3416 { 3417 struct net_device *dev; 3418 struct sock *sk = sock->sk; 3419 struct packet_sock *po = pkt_sk(sk); 3420 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3421 3422 if (peer) 3423 return -EOPNOTSUPP; 3424 3425 sll->sll_family = AF_PACKET; 3426 sll->sll_ifindex = po->ifindex; 3427 sll->sll_protocol = po->num; 3428 sll->sll_pkttype = 0; 3429 rcu_read_lock(); 3430 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 3431 if (dev) { 3432 sll->sll_hatype = dev->type; 3433 sll->sll_halen = dev->addr_len; 3434 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 3435 } else { 3436 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3437 sll->sll_halen = 0; 3438 } 3439 rcu_read_unlock(); 3440 3441 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3442 } 3443 3444 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3445 int what) 3446 { 3447 switch (i->type) { 3448 case PACKET_MR_MULTICAST: 3449 if (i->alen != dev->addr_len) 3450 return -EINVAL; 3451 if (what > 0) 3452 return dev_mc_add(dev, i->addr); 3453 else 3454 return dev_mc_del(dev, i->addr); 3455 break; 3456 case PACKET_MR_PROMISC: 3457 return dev_set_promiscuity(dev, what); 3458 case PACKET_MR_ALLMULTI: 3459 return dev_set_allmulti(dev, what); 3460 case PACKET_MR_UNICAST: 3461 if (i->alen != dev->addr_len) 3462 return -EINVAL; 3463 if (what > 0) 3464 return dev_uc_add(dev, i->addr); 3465 else 3466 return dev_uc_del(dev, i->addr); 3467 break; 3468 default: 3469 break; 3470 } 3471 return 0; 3472 } 3473 3474 static void packet_dev_mclist_delete(struct net_device *dev, 3475 struct packet_mclist **mlp) 3476 { 3477 struct packet_mclist *ml; 3478 3479 while ((ml = *mlp) != NULL) { 3480 if (ml->ifindex == dev->ifindex) { 3481 packet_dev_mc(dev, ml, -1); 3482 *mlp = ml->next; 3483 kfree(ml); 3484 } else 3485 mlp = &ml->next; 3486 } 3487 } 3488 3489 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3490 { 3491 struct packet_sock *po = pkt_sk(sk); 3492 struct packet_mclist *ml, *i; 3493 struct net_device *dev; 3494 int err; 3495 3496 rtnl_lock(); 3497 3498 err = -ENODEV; 3499 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3500 if (!dev) 3501 goto done; 3502 3503 err = -EINVAL; 3504 if (mreq->mr_alen > dev->addr_len) 3505 goto done; 3506 3507 err = -ENOBUFS; 3508 i = kmalloc(sizeof(*i), GFP_KERNEL); 3509 if (i == NULL) 3510 goto done; 3511 3512 err = 0; 3513 for (ml = po->mclist; ml; ml = ml->next) { 3514 if (ml->ifindex == mreq->mr_ifindex && 3515 ml->type == mreq->mr_type && 3516 ml->alen == mreq->mr_alen && 3517 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3518 ml->count++; 3519 /* Free the new element ... */ 3520 kfree(i); 3521 goto done; 3522 } 3523 } 3524 3525 i->type = mreq->mr_type; 3526 i->ifindex = mreq->mr_ifindex; 3527 i->alen = mreq->mr_alen; 3528 memcpy(i->addr, mreq->mr_address, i->alen); 3529 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3530 i->count = 1; 3531 i->next = po->mclist; 3532 po->mclist = i; 3533 err = packet_dev_mc(dev, i, 1); 3534 if (err) { 3535 po->mclist = i->next; 3536 kfree(i); 3537 } 3538 3539 done: 3540 rtnl_unlock(); 3541 return err; 3542 } 3543 3544 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3545 { 3546 struct packet_mclist *ml, **mlp; 3547 3548 rtnl_lock(); 3549 3550 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3551 if (ml->ifindex == mreq->mr_ifindex && 3552 ml->type == mreq->mr_type && 3553 ml->alen == mreq->mr_alen && 3554 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3555 if (--ml->count == 0) { 3556 struct net_device *dev; 3557 *mlp = ml->next; 3558 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3559 if (dev) 3560 packet_dev_mc(dev, ml, -1); 3561 kfree(ml); 3562 } 3563 break; 3564 } 3565 } 3566 rtnl_unlock(); 3567 return 0; 3568 } 3569 3570 static void packet_flush_mclist(struct sock *sk) 3571 { 3572 struct packet_sock *po = pkt_sk(sk); 3573 struct packet_mclist *ml; 3574 3575 if (!po->mclist) 3576 return; 3577 3578 rtnl_lock(); 3579 while ((ml = po->mclist) != NULL) { 3580 struct net_device *dev; 3581 3582 po->mclist = ml->next; 3583 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3584 if (dev != NULL) 3585 packet_dev_mc(dev, ml, -1); 3586 kfree(ml); 3587 } 3588 rtnl_unlock(); 3589 } 3590 3591 static int 3592 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 3593 { 3594 struct sock *sk = sock->sk; 3595 struct packet_sock *po = pkt_sk(sk); 3596 int ret; 3597 3598 if (level != SOL_PACKET) 3599 return -ENOPROTOOPT; 3600 3601 switch (optname) { 3602 case PACKET_ADD_MEMBERSHIP: 3603 case PACKET_DROP_MEMBERSHIP: 3604 { 3605 struct packet_mreq_max mreq; 3606 int len = optlen; 3607 memset(&mreq, 0, sizeof(mreq)); 3608 if (len < sizeof(struct packet_mreq)) 3609 return -EINVAL; 3610 if (len > sizeof(mreq)) 3611 len = sizeof(mreq); 3612 if (copy_from_user(&mreq, optval, len)) 3613 return -EFAULT; 3614 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3615 return -EINVAL; 3616 if (optname == PACKET_ADD_MEMBERSHIP) 3617 ret = packet_mc_add(sk, &mreq); 3618 else 3619 ret = packet_mc_drop(sk, &mreq); 3620 return ret; 3621 } 3622 3623 case PACKET_RX_RING: 3624 case PACKET_TX_RING: 3625 { 3626 union tpacket_req_u req_u; 3627 int len; 3628 3629 lock_sock(sk); 3630 switch (po->tp_version) { 3631 case TPACKET_V1: 3632 case TPACKET_V2: 3633 len = sizeof(req_u.req); 3634 break; 3635 case TPACKET_V3: 3636 default: 3637 len = sizeof(req_u.req3); 3638 break; 3639 } 3640 if (optlen < len) { 3641 ret = -EINVAL; 3642 } else { 3643 if (copy_from_user(&req_u.req, optval, len)) 3644 ret = -EFAULT; 3645 else 3646 ret = packet_set_ring(sk, &req_u, 0, 3647 optname == PACKET_TX_RING); 3648 } 3649 release_sock(sk); 3650 return ret; 3651 } 3652 case PACKET_COPY_THRESH: 3653 { 3654 int val; 3655 3656 if (optlen != sizeof(val)) 3657 return -EINVAL; 3658 if (copy_from_user(&val, optval, sizeof(val))) 3659 return -EFAULT; 3660 3661 pkt_sk(sk)->copy_thresh = val; 3662 return 0; 3663 } 3664 case PACKET_VERSION: 3665 { 3666 int val; 3667 3668 if (optlen != sizeof(val)) 3669 return -EINVAL; 3670 if (copy_from_user(&val, optval, sizeof(val))) 3671 return -EFAULT; 3672 switch (val) { 3673 case TPACKET_V1: 3674 case TPACKET_V2: 3675 case TPACKET_V3: 3676 break; 3677 default: 3678 return -EINVAL; 3679 } 3680 lock_sock(sk); 3681 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3682 ret = -EBUSY; 3683 } else { 3684 po->tp_version = val; 3685 ret = 0; 3686 } 3687 release_sock(sk); 3688 return ret; 3689 } 3690 case PACKET_RESERVE: 3691 { 3692 unsigned int val; 3693 3694 if (optlen != sizeof(val)) 3695 return -EINVAL; 3696 if (copy_from_user(&val, optval, sizeof(val))) 3697 return -EFAULT; 3698 if (val > INT_MAX) 3699 return -EINVAL; 3700 lock_sock(sk); 3701 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3702 ret = -EBUSY; 3703 } else { 3704 po->tp_reserve = val; 3705 ret = 0; 3706 } 3707 release_sock(sk); 3708 return ret; 3709 } 3710 case PACKET_LOSS: 3711 { 3712 unsigned int val; 3713 3714 if (optlen != sizeof(val)) 3715 return -EINVAL; 3716 if (copy_from_user(&val, optval, sizeof(val))) 3717 return -EFAULT; 3718 3719 lock_sock(sk); 3720 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3721 ret = -EBUSY; 3722 } else { 3723 po->tp_loss = !!val; 3724 ret = 0; 3725 } 3726 release_sock(sk); 3727 return ret; 3728 } 3729 case PACKET_AUXDATA: 3730 { 3731 int val; 3732 3733 if (optlen < sizeof(val)) 3734 return -EINVAL; 3735 if (copy_from_user(&val, optval, sizeof(val))) 3736 return -EFAULT; 3737 3738 lock_sock(sk); 3739 po->auxdata = !!val; 3740 release_sock(sk); 3741 return 0; 3742 } 3743 case PACKET_ORIGDEV: 3744 { 3745 int val; 3746 3747 if (optlen < sizeof(val)) 3748 return -EINVAL; 3749 if (copy_from_user(&val, optval, sizeof(val))) 3750 return -EFAULT; 3751 3752 lock_sock(sk); 3753 po->origdev = !!val; 3754 release_sock(sk); 3755 return 0; 3756 } 3757 case PACKET_VNET_HDR: 3758 { 3759 int val; 3760 3761 if (sock->type != SOCK_RAW) 3762 return -EINVAL; 3763 if (optlen < sizeof(val)) 3764 return -EINVAL; 3765 if (copy_from_user(&val, optval, sizeof(val))) 3766 return -EFAULT; 3767 3768 lock_sock(sk); 3769 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3770 ret = -EBUSY; 3771 } else { 3772 po->has_vnet_hdr = !!val; 3773 ret = 0; 3774 } 3775 release_sock(sk); 3776 return ret; 3777 } 3778 case PACKET_TIMESTAMP: 3779 { 3780 int val; 3781 3782 if (optlen != sizeof(val)) 3783 return -EINVAL; 3784 if (copy_from_user(&val, optval, sizeof(val))) 3785 return -EFAULT; 3786 3787 po->tp_tstamp = val; 3788 return 0; 3789 } 3790 case PACKET_FANOUT: 3791 { 3792 int val; 3793 3794 if (optlen != sizeof(val)) 3795 return -EINVAL; 3796 if (copy_from_user(&val, optval, sizeof(val))) 3797 return -EFAULT; 3798 3799 return fanout_add(sk, val & 0xffff, val >> 16); 3800 } 3801 case PACKET_FANOUT_DATA: 3802 { 3803 if (!po->fanout) 3804 return -EINVAL; 3805 3806 return fanout_set_data(po, optval, optlen); 3807 } 3808 case PACKET_TX_HAS_OFF: 3809 { 3810 unsigned int val; 3811 3812 if (optlen != sizeof(val)) 3813 return -EINVAL; 3814 if (copy_from_user(&val, optval, sizeof(val))) 3815 return -EFAULT; 3816 3817 lock_sock(sk); 3818 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3819 ret = -EBUSY; 3820 } else { 3821 po->tp_tx_has_off = !!val; 3822 ret = 0; 3823 } 3824 release_sock(sk); 3825 return 0; 3826 } 3827 case PACKET_QDISC_BYPASS: 3828 { 3829 int val; 3830 3831 if (optlen != sizeof(val)) 3832 return -EINVAL; 3833 if (copy_from_user(&val, optval, sizeof(val))) 3834 return -EFAULT; 3835 3836 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; 3837 return 0; 3838 } 3839 default: 3840 return -ENOPROTOOPT; 3841 } 3842 } 3843 3844 static int packet_getsockopt(struct socket *sock, int level, int optname, 3845 char __user *optval, int __user *optlen) 3846 { 3847 int len; 3848 int val, lv = sizeof(val); 3849 struct sock *sk = sock->sk; 3850 struct packet_sock *po = pkt_sk(sk); 3851 void *data = &val; 3852 union tpacket_stats_u st; 3853 struct tpacket_rollover_stats rstats; 3854 3855 if (level != SOL_PACKET) 3856 return -ENOPROTOOPT; 3857 3858 if (get_user(len, optlen)) 3859 return -EFAULT; 3860 3861 if (len < 0) 3862 return -EINVAL; 3863 3864 switch (optname) { 3865 case PACKET_STATISTICS: 3866 spin_lock_bh(&sk->sk_receive_queue.lock); 3867 memcpy(&st, &po->stats, sizeof(st)); 3868 memset(&po->stats, 0, sizeof(po->stats)); 3869 spin_unlock_bh(&sk->sk_receive_queue.lock); 3870 3871 if (po->tp_version == TPACKET_V3) { 3872 lv = sizeof(struct tpacket_stats_v3); 3873 st.stats3.tp_packets += st.stats3.tp_drops; 3874 data = &st.stats3; 3875 } else { 3876 lv = sizeof(struct tpacket_stats); 3877 st.stats1.tp_packets += st.stats1.tp_drops; 3878 data = &st.stats1; 3879 } 3880 3881 break; 3882 case PACKET_AUXDATA: 3883 val = po->auxdata; 3884 break; 3885 case PACKET_ORIGDEV: 3886 val = po->origdev; 3887 break; 3888 case PACKET_VNET_HDR: 3889 val = po->has_vnet_hdr; 3890 break; 3891 case PACKET_VERSION: 3892 val = po->tp_version; 3893 break; 3894 case PACKET_HDRLEN: 3895 if (len > sizeof(int)) 3896 len = sizeof(int); 3897 if (len < sizeof(int)) 3898 return -EINVAL; 3899 if (copy_from_user(&val, optval, len)) 3900 return -EFAULT; 3901 switch (val) { 3902 case TPACKET_V1: 3903 val = sizeof(struct tpacket_hdr); 3904 break; 3905 case TPACKET_V2: 3906 val = sizeof(struct tpacket2_hdr); 3907 break; 3908 case TPACKET_V3: 3909 val = sizeof(struct tpacket3_hdr); 3910 break; 3911 default: 3912 return -EINVAL; 3913 } 3914 break; 3915 case PACKET_RESERVE: 3916 val = po->tp_reserve; 3917 break; 3918 case PACKET_LOSS: 3919 val = po->tp_loss; 3920 break; 3921 case PACKET_TIMESTAMP: 3922 val = po->tp_tstamp; 3923 break; 3924 case PACKET_FANOUT: 3925 val = (po->fanout ? 3926 ((u32)po->fanout->id | 3927 ((u32)po->fanout->type << 16) | 3928 ((u32)po->fanout->flags << 24)) : 3929 0); 3930 break; 3931 case PACKET_ROLLOVER_STATS: 3932 if (!po->rollover) 3933 return -EINVAL; 3934 rstats.tp_all = atomic_long_read(&po->rollover->num); 3935 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 3936 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 3937 data = &rstats; 3938 lv = sizeof(rstats); 3939 break; 3940 case PACKET_TX_HAS_OFF: 3941 val = po->tp_tx_has_off; 3942 break; 3943 case PACKET_QDISC_BYPASS: 3944 val = packet_use_direct_xmit(po); 3945 break; 3946 default: 3947 return -ENOPROTOOPT; 3948 } 3949 3950 if (len > lv) 3951 len = lv; 3952 if (put_user(len, optlen)) 3953 return -EFAULT; 3954 if (copy_to_user(optval, data, len)) 3955 return -EFAULT; 3956 return 0; 3957 } 3958 3959 3960 #ifdef CONFIG_COMPAT 3961 static int compat_packet_setsockopt(struct socket *sock, int level, int optname, 3962 char __user *optval, unsigned int optlen) 3963 { 3964 struct packet_sock *po = pkt_sk(sock->sk); 3965 3966 if (level != SOL_PACKET) 3967 return -ENOPROTOOPT; 3968 3969 if (optname == PACKET_FANOUT_DATA && 3970 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { 3971 optval = (char __user *)get_compat_bpf_fprog(optval); 3972 if (!optval) 3973 return -EFAULT; 3974 optlen = sizeof(struct sock_fprog); 3975 } 3976 3977 return packet_setsockopt(sock, level, optname, optval, optlen); 3978 } 3979 #endif 3980 3981 static int packet_notifier(struct notifier_block *this, 3982 unsigned long msg, void *ptr) 3983 { 3984 struct sock *sk; 3985 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3986 struct net *net = dev_net(dev); 3987 3988 rcu_read_lock(); 3989 sk_for_each_rcu(sk, &net->packet.sklist) { 3990 struct packet_sock *po = pkt_sk(sk); 3991 3992 switch (msg) { 3993 case NETDEV_UNREGISTER: 3994 if (po->mclist) 3995 packet_dev_mclist_delete(dev, &po->mclist); 3996 /* fallthrough */ 3997 3998 case NETDEV_DOWN: 3999 if (dev->ifindex == po->ifindex) { 4000 spin_lock(&po->bind_lock); 4001 if (po->running) { 4002 __unregister_prot_hook(sk, false); 4003 sk->sk_err = ENETDOWN; 4004 if (!sock_flag(sk, SOCK_DEAD)) 4005 sk->sk_error_report(sk); 4006 } 4007 if (msg == NETDEV_UNREGISTER) { 4008 packet_cached_dev_reset(po); 4009 po->ifindex = -1; 4010 if (po->prot_hook.dev) 4011 dev_put(po->prot_hook.dev); 4012 po->prot_hook.dev = NULL; 4013 } 4014 spin_unlock(&po->bind_lock); 4015 } 4016 break; 4017 case NETDEV_UP: 4018 if (dev->ifindex == po->ifindex) { 4019 spin_lock(&po->bind_lock); 4020 if (po->num) 4021 register_prot_hook(sk); 4022 spin_unlock(&po->bind_lock); 4023 } 4024 break; 4025 } 4026 } 4027 rcu_read_unlock(); 4028 return NOTIFY_DONE; 4029 } 4030 4031 4032 static int packet_ioctl(struct socket *sock, unsigned int cmd, 4033 unsigned long arg) 4034 { 4035 struct sock *sk = sock->sk; 4036 4037 switch (cmd) { 4038 case SIOCOUTQ: 4039 { 4040 int amount = sk_wmem_alloc_get(sk); 4041 4042 return put_user(amount, (int __user *)arg); 4043 } 4044 case SIOCINQ: 4045 { 4046 struct sk_buff *skb; 4047 int amount = 0; 4048 4049 spin_lock_bh(&sk->sk_receive_queue.lock); 4050 skb = skb_peek(&sk->sk_receive_queue); 4051 if (skb) 4052 amount = skb->len; 4053 spin_unlock_bh(&sk->sk_receive_queue.lock); 4054 return put_user(amount, (int __user *)arg); 4055 } 4056 case SIOCGSTAMP: 4057 return sock_get_timestamp(sk, (struct timeval __user *)arg); 4058 case SIOCGSTAMPNS: 4059 return sock_get_timestampns(sk, (struct timespec __user *)arg); 4060 4061 #ifdef CONFIG_INET 4062 case SIOCADDRT: 4063 case SIOCDELRT: 4064 case SIOCDARP: 4065 case SIOCGARP: 4066 case SIOCSARP: 4067 case SIOCGIFADDR: 4068 case SIOCSIFADDR: 4069 case SIOCGIFBRDADDR: 4070 case SIOCSIFBRDADDR: 4071 case SIOCGIFNETMASK: 4072 case SIOCSIFNETMASK: 4073 case SIOCGIFDSTADDR: 4074 case SIOCSIFDSTADDR: 4075 case SIOCSIFFLAGS: 4076 return inet_dgram_ops.ioctl(sock, cmd, arg); 4077 #endif 4078 4079 default: 4080 return -ENOIOCTLCMD; 4081 } 4082 return 0; 4083 } 4084 4085 static __poll_t packet_poll(struct file *file, struct socket *sock, 4086 poll_table *wait) 4087 { 4088 struct sock *sk = sock->sk; 4089 struct packet_sock *po = pkt_sk(sk); 4090 __poll_t mask = datagram_poll(file, sock, wait); 4091 4092 spin_lock_bh(&sk->sk_receive_queue.lock); 4093 if (po->rx_ring.pg_vec) { 4094 if (!packet_previous_rx_frame(po, &po->rx_ring, 4095 TP_STATUS_KERNEL)) 4096 mask |= EPOLLIN | EPOLLRDNORM; 4097 } 4098 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 4099 po->pressure = 0; 4100 spin_unlock_bh(&sk->sk_receive_queue.lock); 4101 spin_lock_bh(&sk->sk_write_queue.lock); 4102 if (po->tx_ring.pg_vec) { 4103 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4104 mask |= EPOLLOUT | EPOLLWRNORM; 4105 } 4106 spin_unlock_bh(&sk->sk_write_queue.lock); 4107 return mask; 4108 } 4109 4110 4111 /* Dirty? Well, I still did not learn better way to account 4112 * for user mmaps. 4113 */ 4114 4115 static void packet_mm_open(struct vm_area_struct *vma) 4116 { 4117 struct file *file = vma->vm_file; 4118 struct socket *sock = file->private_data; 4119 struct sock *sk = sock->sk; 4120 4121 if (sk) 4122 atomic_inc(&pkt_sk(sk)->mapped); 4123 } 4124 4125 static void packet_mm_close(struct vm_area_struct *vma) 4126 { 4127 struct file *file = vma->vm_file; 4128 struct socket *sock = file->private_data; 4129 struct sock *sk = sock->sk; 4130 4131 if (sk) 4132 atomic_dec(&pkt_sk(sk)->mapped); 4133 } 4134 4135 static const struct vm_operations_struct packet_mmap_ops = { 4136 .open = packet_mm_open, 4137 .close = packet_mm_close, 4138 }; 4139 4140 static void free_pg_vec(struct pgv *pg_vec, unsigned int len) 4141 { 4142 int i; 4143 4144 for (i = 0; i < len; i++) { 4145 if (likely(pg_vec[i].buffer)) { 4146 kvfree(pg_vec[i].buffer); 4147 pg_vec[i].buffer = NULL; 4148 } 4149 } 4150 kfree(pg_vec); 4151 } 4152 4153 static char *alloc_one_pg_vec_page(unsigned long size) 4154 { 4155 char *buffer; 4156 4157 buffer = kvzalloc(size, GFP_KERNEL); 4158 if (buffer) 4159 return buffer; 4160 4161 buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 4162 4163 return buffer; 4164 } 4165 4166 static struct pgv *alloc_pg_vec(struct tpacket_req *req) 4167 { 4168 unsigned int block_nr = req->tp_block_nr; 4169 unsigned long size = req->tp_block_size; 4170 struct pgv *pg_vec; 4171 int i; 4172 4173 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); 4174 if (unlikely(!pg_vec)) 4175 goto out; 4176 4177 for (i = 0; i < block_nr; i++) { 4178 pg_vec[i].buffer = alloc_one_pg_vec_page(size); 4179 if (unlikely(!pg_vec[i].buffer)) 4180 goto out_free_pgvec; 4181 } 4182 4183 out: 4184 return pg_vec; 4185 4186 out_free_pgvec: 4187 free_pg_vec(pg_vec, block_nr); 4188 pg_vec = NULL; 4189 goto out; 4190 } 4191 4192 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 4193 int closing, int tx_ring) 4194 { 4195 struct pgv *pg_vec = NULL; 4196 struct packet_sock *po = pkt_sk(sk); 4197 struct packet_ring_buffer *rb; 4198 struct sk_buff_head *rb_queue; 4199 int was_running; 4200 __be16 num; 4201 int err = -EINVAL; 4202 /* Added to avoid minimal code churn */ 4203 struct tpacket_req *req = &req_u->req; 4204 4205 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4206 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4207 4208 err = -EBUSY; 4209 if (!closing) { 4210 if (atomic_read(&po->mapped)) 4211 goto out; 4212 if (packet_read_pending(rb)) 4213 goto out; 4214 } 4215 4216 if (req->tp_block_nr) { 4217 unsigned int min_frame_size; 4218 4219 /* Sanity tests and some calculations */ 4220 err = -EBUSY; 4221 if (unlikely(rb->pg_vec)) 4222 goto out; 4223 4224 switch (po->tp_version) { 4225 case TPACKET_V1: 4226 po->tp_hdrlen = TPACKET_HDRLEN; 4227 break; 4228 case TPACKET_V2: 4229 po->tp_hdrlen = TPACKET2_HDRLEN; 4230 break; 4231 case TPACKET_V3: 4232 po->tp_hdrlen = TPACKET3_HDRLEN; 4233 break; 4234 } 4235 4236 err = -EINVAL; 4237 if (unlikely((int)req->tp_block_size <= 0)) 4238 goto out; 4239 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4240 goto out; 4241 min_frame_size = po->tp_hdrlen + po->tp_reserve; 4242 if (po->tp_version >= TPACKET_V3 && 4243 req->tp_block_size < 4244 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) 4245 goto out; 4246 if (unlikely(req->tp_frame_size < min_frame_size)) 4247 goto out; 4248 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4249 goto out; 4250 4251 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4252 if (unlikely(rb->frames_per_block == 0)) 4253 goto out; 4254 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) 4255 goto out; 4256 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4257 req->tp_frame_nr)) 4258 goto out; 4259 4260 err = -ENOMEM; 4261 pg_vec = alloc_pg_vec(req); 4262 if (unlikely(!pg_vec)) 4263 goto out; 4264 switch (po->tp_version) { 4265 case TPACKET_V3: 4266 /* Block transmit is not supported yet */ 4267 if (!tx_ring) { 4268 init_prb_bdqc(po, rb, pg_vec, req_u); 4269 } else { 4270 struct tpacket_req3 *req3 = &req_u->req3; 4271 4272 if (req3->tp_retire_blk_tov || 4273 req3->tp_sizeof_priv || 4274 req3->tp_feature_req_word) { 4275 err = -EINVAL; 4276 goto out; 4277 } 4278 } 4279 break; 4280 default: 4281 break; 4282 } 4283 } 4284 /* Done */ 4285 else { 4286 err = -EINVAL; 4287 if (unlikely(req->tp_frame_nr)) 4288 goto out; 4289 } 4290 4291 4292 /* Detach socket from network */ 4293 spin_lock(&po->bind_lock); 4294 was_running = po->running; 4295 num = po->num; 4296 if (was_running) { 4297 po->num = 0; 4298 __unregister_prot_hook(sk, false); 4299 } 4300 spin_unlock(&po->bind_lock); 4301 4302 synchronize_net(); 4303 4304 err = -EBUSY; 4305 mutex_lock(&po->pg_vec_lock); 4306 if (closing || atomic_read(&po->mapped) == 0) { 4307 err = 0; 4308 spin_lock_bh(&rb_queue->lock); 4309 swap(rb->pg_vec, pg_vec); 4310 rb->frame_max = (req->tp_frame_nr - 1); 4311 rb->head = 0; 4312 rb->frame_size = req->tp_frame_size; 4313 spin_unlock_bh(&rb_queue->lock); 4314 4315 swap(rb->pg_vec_len, req->tp_block_nr); 4316 4317 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4318 po->prot_hook.func = (po->rx_ring.pg_vec) ? 4319 tpacket_rcv : packet_rcv; 4320 skb_queue_purge(rb_queue); 4321 if (atomic_read(&po->mapped)) 4322 pr_err("packet_mmap: vma is busy: %d\n", 4323 atomic_read(&po->mapped)); 4324 } 4325 mutex_unlock(&po->pg_vec_lock); 4326 4327 spin_lock(&po->bind_lock); 4328 if (was_running) { 4329 po->num = num; 4330 register_prot_hook(sk); 4331 } 4332 spin_unlock(&po->bind_lock); 4333 if (pg_vec && (po->tp_version > TPACKET_V2)) { 4334 /* Because we don't support block-based V3 on tx-ring */ 4335 if (!tx_ring) 4336 prb_shutdown_retire_blk_timer(po, rb_queue); 4337 } 4338 4339 if (pg_vec) 4340 free_pg_vec(pg_vec, req->tp_block_nr); 4341 out: 4342 return err; 4343 } 4344 4345 static int packet_mmap(struct file *file, struct socket *sock, 4346 struct vm_area_struct *vma) 4347 { 4348 struct sock *sk = sock->sk; 4349 struct packet_sock *po = pkt_sk(sk); 4350 unsigned long size, expected_size; 4351 struct packet_ring_buffer *rb; 4352 unsigned long start; 4353 int err = -EINVAL; 4354 int i; 4355 4356 if (vma->vm_pgoff) 4357 return -EINVAL; 4358 4359 mutex_lock(&po->pg_vec_lock); 4360 4361 expected_size = 0; 4362 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4363 if (rb->pg_vec) { 4364 expected_size += rb->pg_vec_len 4365 * rb->pg_vec_pages 4366 * PAGE_SIZE; 4367 } 4368 } 4369 4370 if (expected_size == 0) 4371 goto out; 4372 4373 size = vma->vm_end - vma->vm_start; 4374 if (size != expected_size) 4375 goto out; 4376 4377 start = vma->vm_start; 4378 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4379 if (rb->pg_vec == NULL) 4380 continue; 4381 4382 for (i = 0; i < rb->pg_vec_len; i++) { 4383 struct page *page; 4384 void *kaddr = rb->pg_vec[i].buffer; 4385 int pg_num; 4386 4387 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 4388 page = pgv_to_page(kaddr); 4389 err = vm_insert_page(vma, start, page); 4390 if (unlikely(err)) 4391 goto out; 4392 start += PAGE_SIZE; 4393 kaddr += PAGE_SIZE; 4394 } 4395 } 4396 } 4397 4398 atomic_inc(&po->mapped); 4399 vma->vm_ops = &packet_mmap_ops; 4400 err = 0; 4401 4402 out: 4403 mutex_unlock(&po->pg_vec_lock); 4404 return err; 4405 } 4406 4407 static const struct proto_ops packet_ops_spkt = { 4408 .family = PF_PACKET, 4409 .owner = THIS_MODULE, 4410 .release = packet_release, 4411 .bind = packet_bind_spkt, 4412 .connect = sock_no_connect, 4413 .socketpair = sock_no_socketpair, 4414 .accept = sock_no_accept, 4415 .getname = packet_getname_spkt, 4416 .poll = datagram_poll, 4417 .ioctl = packet_ioctl, 4418 .listen = sock_no_listen, 4419 .shutdown = sock_no_shutdown, 4420 .setsockopt = sock_no_setsockopt, 4421 .getsockopt = sock_no_getsockopt, 4422 .sendmsg = packet_sendmsg_spkt, 4423 .recvmsg = packet_recvmsg, 4424 .mmap = sock_no_mmap, 4425 .sendpage = sock_no_sendpage, 4426 }; 4427 4428 static const struct proto_ops packet_ops = { 4429 .family = PF_PACKET, 4430 .owner = THIS_MODULE, 4431 .release = packet_release, 4432 .bind = packet_bind, 4433 .connect = sock_no_connect, 4434 .socketpair = sock_no_socketpair, 4435 .accept = sock_no_accept, 4436 .getname = packet_getname, 4437 .poll = packet_poll, 4438 .ioctl = packet_ioctl, 4439 .listen = sock_no_listen, 4440 .shutdown = sock_no_shutdown, 4441 .setsockopt = packet_setsockopt, 4442 .getsockopt = packet_getsockopt, 4443 #ifdef CONFIG_COMPAT 4444 .compat_setsockopt = compat_packet_setsockopt, 4445 #endif 4446 .sendmsg = packet_sendmsg, 4447 .recvmsg = packet_recvmsg, 4448 .mmap = packet_mmap, 4449 .sendpage = sock_no_sendpage, 4450 }; 4451 4452 static const struct net_proto_family packet_family_ops = { 4453 .family = PF_PACKET, 4454 .create = packet_create, 4455 .owner = THIS_MODULE, 4456 }; 4457 4458 static struct notifier_block packet_netdev_notifier = { 4459 .notifier_call = packet_notifier, 4460 }; 4461 4462 #ifdef CONFIG_PROC_FS 4463 4464 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4465 __acquires(RCU) 4466 { 4467 struct net *net = seq_file_net(seq); 4468 4469 rcu_read_lock(); 4470 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4471 } 4472 4473 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4474 { 4475 struct net *net = seq_file_net(seq); 4476 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4477 } 4478 4479 static void packet_seq_stop(struct seq_file *seq, void *v) 4480 __releases(RCU) 4481 { 4482 rcu_read_unlock(); 4483 } 4484 4485 static int packet_seq_show(struct seq_file *seq, void *v) 4486 { 4487 if (v == SEQ_START_TOKEN) 4488 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 4489 else { 4490 struct sock *s = sk_entry(v); 4491 const struct packet_sock *po = pkt_sk(s); 4492 4493 seq_printf(seq, 4494 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 4495 s, 4496 refcount_read(&s->sk_refcnt), 4497 s->sk_type, 4498 ntohs(po->num), 4499 po->ifindex, 4500 po->running, 4501 atomic_read(&s->sk_rmem_alloc), 4502 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4503 sock_i_ino(s)); 4504 } 4505 4506 return 0; 4507 } 4508 4509 static const struct seq_operations packet_seq_ops = { 4510 .start = packet_seq_start, 4511 .next = packet_seq_next, 4512 .stop = packet_seq_stop, 4513 .show = packet_seq_show, 4514 }; 4515 #endif 4516 4517 static int __net_init packet_net_init(struct net *net) 4518 { 4519 mutex_init(&net->packet.sklist_lock); 4520 INIT_HLIST_HEAD(&net->packet.sklist); 4521 4522 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, 4523 sizeof(struct seq_net_private))) 4524 return -ENOMEM; 4525 4526 return 0; 4527 } 4528 4529 static void __net_exit packet_net_exit(struct net *net) 4530 { 4531 remove_proc_entry("packet", net->proc_net); 4532 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); 4533 } 4534 4535 static struct pernet_operations packet_net_ops = { 4536 .init = packet_net_init, 4537 .exit = packet_net_exit, 4538 }; 4539 4540 4541 static void __exit packet_exit(void) 4542 { 4543 unregister_netdevice_notifier(&packet_netdev_notifier); 4544 unregister_pernet_subsys(&packet_net_ops); 4545 sock_unregister(PF_PACKET); 4546 proto_unregister(&packet_proto); 4547 } 4548 4549 static int __init packet_init(void) 4550 { 4551 int rc = proto_register(&packet_proto, 0); 4552 4553 if (rc != 0) 4554 goto out; 4555 4556 sock_register(&packet_family_ops); 4557 register_pernet_subsys(&packet_net_ops); 4558 register_netdevice_notifier(&packet_netdev_notifier); 4559 out: 4560 return rc; 4561 } 4562 4563 module_init(packet_init); 4564 module_exit(packet_exit); 4565 MODULE_LICENSE("GPL"); 4566 MODULE_ALIAS_NETPROTO(PF_PACKET); 4567