1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * PACKET - implements raw packet sockets. 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org> 11 * 12 * Fixes: 13 * Alan Cox : verify_area() now used correctly 14 * Alan Cox : new skbuff lists, look ma no backlogs! 15 * Alan Cox : tidied skbuff lists. 16 * Alan Cox : Now uses generic datagram routines I 17 * added. Also fixed the peek/read crash 18 * from all old Linux datagram code. 19 * Alan Cox : Uses the improved datagram code. 20 * Alan Cox : Added NULL's for socket options. 21 * Alan Cox : Re-commented the code. 22 * Alan Cox : Use new kernel side addressing 23 * Rob Janssen : Correct MTU usage. 24 * Dave Platt : Counter leaks caused by incorrect 25 * interrupt locking and some slightly 26 * dubious gcc output. Can you read 27 * compiler: it said _VOLATILE_ 28 * Richard Kooijman : Timestamp fixes. 29 * Alan Cox : New buffers. Use sk->mac.raw. 30 * Alan Cox : sendmsg/recvmsg support. 31 * Alan Cox : Protocol setting support 32 * Alexey Kuznetsov : Untied from IPv4 stack. 33 * Cyrus Durgin : Fixed kerneld for kmod. 34 * Michal Ostrowski : Module initialization cleanup. 35 * Ulises Alonso : Frame number limit removal and 36 * packet_set_ring memory leak. 37 * Eric Biederman : Allow for > 8 byte hardware addresses. 38 * The convention is that longer addresses 39 * will simply extend the hardware address 40 * byte arrays at the end of sockaddr_ll 41 * and packet_mreq. 42 * Johann Baudy : Added TX RING. 43 * Chetan Loke : Implemented TPACKET_V3 block abstraction 44 * layer. 45 * Copyright (C) 2011, <lokec@ccs.neu.edu> 46 * 47 * 48 * This program is free software; you can redistribute it and/or 49 * modify it under the terms of the GNU General Public License 50 * as published by the Free Software Foundation; either version 51 * 2 of the License, or (at your option) any later version. 52 * 53 */ 54 55 #include <linux/types.h> 56 #include <linux/mm.h> 57 #include <linux/capability.h> 58 #include <linux/fcntl.h> 59 #include <linux/socket.h> 60 #include <linux/in.h> 61 #include <linux/inet.h> 62 #include <linux/netdevice.h> 63 #include <linux/if_packet.h> 64 #include <linux/wireless.h> 65 #include <linux/kernel.h> 66 #include <linux/kmod.h> 67 #include <linux/slab.h> 68 #include <linux/vmalloc.h> 69 #include <net/net_namespace.h> 70 #include <net/ip.h> 71 #include <net/protocol.h> 72 #include <linux/skbuff.h> 73 #include <net/sock.h> 74 #include <linux/errno.h> 75 #include <linux/timer.h> 76 #include <linux/uaccess.h> 77 #include <asm/ioctls.h> 78 #include <asm/page.h> 79 #include <asm/cacheflush.h> 80 #include <asm/io.h> 81 #include <linux/proc_fs.h> 82 #include <linux/seq_file.h> 83 #include <linux/poll.h> 84 #include <linux/module.h> 85 #include <linux/init.h> 86 #include <linux/mutex.h> 87 #include <linux/if_vlan.h> 88 #include <linux/virtio_net.h> 89 #include <linux/errqueue.h> 90 #include <linux/net_tstamp.h> 91 #include <linux/percpu.h> 92 #ifdef CONFIG_INET 93 #include <net/inet_common.h> 94 #endif 95 #include <linux/bpf.h> 96 #include <net/compat.h> 97 98 #include "internal.h" 99 100 /* 101 Assumptions: 102 - if device has no dev->hard_header routine, it adds and removes ll header 103 inside itself. In this case ll header is invisible outside of device, 104 but higher levels still should reserve dev->hard_header_len. 105 Some devices are enough clever to reallocate skb, when header 106 will not fit to reserved space (tunnel), another ones are silly 107 (PPP). 108 - packet socket receives packets with pulled ll header, 109 so that SOCK_RAW should push it back. 110 111 On receive: 112 ----------- 113 114 Incoming, dev->hard_header!=NULL 115 mac_header -> ll header 116 data -> data 117 118 Outgoing, dev->hard_header!=NULL 119 mac_header -> ll header 120 data -> ll header 121 122 Incoming, dev->hard_header==NULL 123 mac_header -> UNKNOWN position. It is very likely, that it points to ll 124 header. PPP makes it, that is wrong, because introduce 125 assymetry between rx and tx paths. 126 data -> data 127 128 Outgoing, dev->hard_header==NULL 129 mac_header -> data. ll header is still not built! 130 data -> data 131 132 Resume 133 If dev->hard_header==NULL we are unlikely to restore sensible ll header. 134 135 136 On transmit: 137 ------------ 138 139 dev->hard_header != NULL 140 mac_header -> ll header 141 data -> ll header 142 143 dev->hard_header == NULL (ll header is added by device, we cannot control it) 144 mac_header -> data 145 data -> data 146 147 We should set nh.raw on output to correct posistion, 148 packet classifier depends on it. 149 */ 150 151 /* Private packet socket structures. */ 152 153 /* identical to struct packet_mreq except it has 154 * a longer address field. 155 */ 156 struct packet_mreq_max { 157 int mr_ifindex; 158 unsigned short mr_type; 159 unsigned short mr_alen; 160 unsigned char mr_address[MAX_ADDR_LEN]; 161 }; 162 163 union tpacket_uhdr { 164 struct tpacket_hdr *h1; 165 struct tpacket2_hdr *h2; 166 struct tpacket3_hdr *h3; 167 void *raw; 168 }; 169 170 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 171 int closing, int tx_ring); 172 173 #define V3_ALIGNMENT (8) 174 175 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 176 177 #define BLK_PLUS_PRIV(sz_of_priv) \ 178 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 179 180 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 181 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 182 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 183 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 184 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 185 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 186 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) 187 188 struct packet_sock; 189 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 190 struct packet_type *pt, struct net_device *orig_dev); 191 192 static void *packet_previous_frame(struct packet_sock *po, 193 struct packet_ring_buffer *rb, 194 int status); 195 static void packet_increment_head(struct packet_ring_buffer *buff); 196 static int prb_curr_blk_in_use(struct tpacket_block_desc *); 197 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 198 struct packet_sock *); 199 static void prb_retire_current_block(struct tpacket_kbdq_core *, 200 struct packet_sock *, unsigned int status); 201 static int prb_queue_frozen(struct tpacket_kbdq_core *); 202 static void prb_open_block(struct tpacket_kbdq_core *, 203 struct tpacket_block_desc *); 204 static void prb_retire_rx_blk_timer_expired(struct timer_list *); 205 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); 206 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 207 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 208 struct tpacket3_hdr *); 209 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 210 struct tpacket3_hdr *); 211 static void packet_flush_mclist(struct sock *sk); 212 static u16 packet_pick_tx_queue(struct sk_buff *skb); 213 214 struct packet_skb_cb { 215 union { 216 struct sockaddr_pkt pkt; 217 union { 218 /* Trick: alias skb original length with 219 * ll.sll_family and ll.protocol in order 220 * to save room. 221 */ 222 unsigned int origlen; 223 struct sockaddr_ll ll; 224 }; 225 } sa; 226 }; 227 228 #define vio_le() virtio_legacy_is_little_endian() 229 230 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 231 232 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 233 #define GET_PBLOCK_DESC(x, bid) \ 234 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 235 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 236 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 237 #define GET_NEXT_PRB_BLK_NUM(x) \ 238 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 239 ((x)->kactive_blk_num+1) : 0) 240 241 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 242 static void __fanout_link(struct sock *sk, struct packet_sock *po); 243 244 static int packet_direct_xmit(struct sk_buff *skb) 245 { 246 return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); 247 } 248 249 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 250 { 251 struct net_device *dev; 252 253 rcu_read_lock(); 254 dev = rcu_dereference(po->cached_dev); 255 if (likely(dev)) 256 dev_hold(dev); 257 rcu_read_unlock(); 258 259 return dev; 260 } 261 262 static void packet_cached_dev_assign(struct packet_sock *po, 263 struct net_device *dev) 264 { 265 rcu_assign_pointer(po->cached_dev, dev); 266 } 267 268 static void packet_cached_dev_reset(struct packet_sock *po) 269 { 270 RCU_INIT_POINTER(po->cached_dev, NULL); 271 } 272 273 static bool packet_use_direct_xmit(const struct packet_sock *po) 274 { 275 return po->xmit == packet_direct_xmit; 276 } 277 278 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 279 { 280 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; 281 } 282 283 static u16 packet_pick_tx_queue(struct sk_buff *skb) 284 { 285 struct net_device *dev = skb->dev; 286 const struct net_device_ops *ops = dev->netdev_ops; 287 u16 queue_index; 288 289 if (ops->ndo_select_queue) { 290 queue_index = ops->ndo_select_queue(dev, skb, NULL, 291 __packet_pick_tx_queue); 292 queue_index = netdev_cap_txqueue(dev, queue_index); 293 } else { 294 queue_index = __packet_pick_tx_queue(dev, skb); 295 } 296 297 return queue_index; 298 } 299 300 /* __register_prot_hook must be invoked through register_prot_hook 301 * or from a context in which asynchronous accesses to the packet 302 * socket is not possible (packet_create()). 303 */ 304 static void __register_prot_hook(struct sock *sk) 305 { 306 struct packet_sock *po = pkt_sk(sk); 307 308 if (!po->running) { 309 if (po->fanout) 310 __fanout_link(sk, po); 311 else 312 dev_add_pack(&po->prot_hook); 313 314 sock_hold(sk); 315 po->running = 1; 316 } 317 } 318 319 static void register_prot_hook(struct sock *sk) 320 { 321 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); 322 __register_prot_hook(sk); 323 } 324 325 /* If the sync parameter is true, we will temporarily drop 326 * the po->bind_lock and do a synchronize_net to make sure no 327 * asynchronous packet processing paths still refer to the elements 328 * of po->prot_hook. If the sync parameter is false, it is the 329 * callers responsibility to take care of this. 330 */ 331 static void __unregister_prot_hook(struct sock *sk, bool sync) 332 { 333 struct packet_sock *po = pkt_sk(sk); 334 335 lockdep_assert_held_once(&po->bind_lock); 336 337 po->running = 0; 338 339 if (po->fanout) 340 __fanout_unlink(sk, po); 341 else 342 __dev_remove_pack(&po->prot_hook); 343 344 __sock_put(sk); 345 346 if (sync) { 347 spin_unlock(&po->bind_lock); 348 synchronize_net(); 349 spin_lock(&po->bind_lock); 350 } 351 } 352 353 static void unregister_prot_hook(struct sock *sk, bool sync) 354 { 355 struct packet_sock *po = pkt_sk(sk); 356 357 if (po->running) 358 __unregister_prot_hook(sk, sync); 359 } 360 361 static inline struct page * __pure pgv_to_page(void *addr) 362 { 363 if (is_vmalloc_addr(addr)) 364 return vmalloc_to_page(addr); 365 return virt_to_page(addr); 366 } 367 368 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 369 { 370 union tpacket_uhdr h; 371 372 h.raw = frame; 373 switch (po->tp_version) { 374 case TPACKET_V1: 375 h.h1->tp_status = status; 376 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 377 break; 378 case TPACKET_V2: 379 h.h2->tp_status = status; 380 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 381 break; 382 case TPACKET_V3: 383 h.h3->tp_status = status; 384 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 385 break; 386 default: 387 WARN(1, "TPACKET version not supported.\n"); 388 BUG(); 389 } 390 391 smp_wmb(); 392 } 393 394 static int __packet_get_status(struct packet_sock *po, void *frame) 395 { 396 union tpacket_uhdr h; 397 398 smp_rmb(); 399 400 h.raw = frame; 401 switch (po->tp_version) { 402 case TPACKET_V1: 403 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 404 return h.h1->tp_status; 405 case TPACKET_V2: 406 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 407 return h.h2->tp_status; 408 case TPACKET_V3: 409 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 410 return h.h3->tp_status; 411 default: 412 WARN(1, "TPACKET version not supported.\n"); 413 BUG(); 414 return 0; 415 } 416 } 417 418 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, 419 unsigned int flags) 420 { 421 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 422 423 if (shhwtstamps && 424 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 425 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) 426 return TP_STATUS_TS_RAW_HARDWARE; 427 428 if (ktime_to_timespec_cond(skb->tstamp, ts)) 429 return TP_STATUS_TS_SOFTWARE; 430 431 return 0; 432 } 433 434 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 435 struct sk_buff *skb) 436 { 437 union tpacket_uhdr h; 438 struct timespec ts; 439 __u32 ts_status; 440 441 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 442 return 0; 443 444 h.raw = frame; 445 switch (po->tp_version) { 446 case TPACKET_V1: 447 h.h1->tp_sec = ts.tv_sec; 448 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 449 break; 450 case TPACKET_V2: 451 h.h2->tp_sec = ts.tv_sec; 452 h.h2->tp_nsec = ts.tv_nsec; 453 break; 454 case TPACKET_V3: 455 h.h3->tp_sec = ts.tv_sec; 456 h.h3->tp_nsec = ts.tv_nsec; 457 break; 458 default: 459 WARN(1, "TPACKET version not supported.\n"); 460 BUG(); 461 } 462 463 /* one flush is safe, as both fields always lie on the same cacheline */ 464 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 465 smp_wmb(); 466 467 return ts_status; 468 } 469 470 static void *packet_lookup_frame(struct packet_sock *po, 471 struct packet_ring_buffer *rb, 472 unsigned int position, 473 int status) 474 { 475 unsigned int pg_vec_pos, frame_offset; 476 union tpacket_uhdr h; 477 478 pg_vec_pos = position / rb->frames_per_block; 479 frame_offset = position % rb->frames_per_block; 480 481 h.raw = rb->pg_vec[pg_vec_pos].buffer + 482 (frame_offset * rb->frame_size); 483 484 if (status != __packet_get_status(po, h.raw)) 485 return NULL; 486 487 return h.raw; 488 } 489 490 static void *packet_current_frame(struct packet_sock *po, 491 struct packet_ring_buffer *rb, 492 int status) 493 { 494 return packet_lookup_frame(po, rb, rb->head, status); 495 } 496 497 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 498 { 499 del_timer_sync(&pkc->retire_blk_timer); 500 } 501 502 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 503 struct sk_buff_head *rb_queue) 504 { 505 struct tpacket_kbdq_core *pkc; 506 507 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 508 509 spin_lock_bh(&rb_queue->lock); 510 pkc->delete_blk_timer = 1; 511 spin_unlock_bh(&rb_queue->lock); 512 513 prb_del_retire_blk_timer(pkc); 514 } 515 516 static void prb_setup_retire_blk_timer(struct packet_sock *po) 517 { 518 struct tpacket_kbdq_core *pkc; 519 520 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 521 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired, 522 0); 523 pkc->retire_blk_timer.expires = jiffies; 524 } 525 526 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 527 int blk_size_in_bytes) 528 { 529 struct net_device *dev; 530 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; 531 struct ethtool_link_ksettings ecmd; 532 int err; 533 534 rtnl_lock(); 535 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 536 if (unlikely(!dev)) { 537 rtnl_unlock(); 538 return DEFAULT_PRB_RETIRE_TOV; 539 } 540 err = __ethtool_get_link_ksettings(dev, &ecmd); 541 rtnl_unlock(); 542 if (!err) { 543 /* 544 * If the link speed is so slow you don't really 545 * need to worry about perf anyways 546 */ 547 if (ecmd.base.speed < SPEED_1000 || 548 ecmd.base.speed == SPEED_UNKNOWN) { 549 return DEFAULT_PRB_RETIRE_TOV; 550 } else { 551 msec = 1; 552 div = ecmd.base.speed / 1000; 553 } 554 } 555 556 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 557 558 if (div) 559 mbits /= div; 560 561 tmo = mbits * msec; 562 563 if (div) 564 return tmo+1; 565 return tmo; 566 } 567 568 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 569 union tpacket_req_u *req_u) 570 { 571 p1->feature_req_word = req_u->req3.tp_feature_req_word; 572 } 573 574 static void init_prb_bdqc(struct packet_sock *po, 575 struct packet_ring_buffer *rb, 576 struct pgv *pg_vec, 577 union tpacket_req_u *req_u) 578 { 579 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 580 struct tpacket_block_desc *pbd; 581 582 memset(p1, 0x0, sizeof(*p1)); 583 584 p1->knxt_seq_num = 1; 585 p1->pkbdq = pg_vec; 586 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 587 p1->pkblk_start = pg_vec[0].buffer; 588 p1->kblk_size = req_u->req3.tp_block_size; 589 p1->knum_blocks = req_u->req3.tp_block_nr; 590 p1->hdrlen = po->tp_hdrlen; 591 p1->version = po->tp_version; 592 p1->last_kactive_blk_num = 0; 593 po->stats.stats3.tp_freeze_q_cnt = 0; 594 if (req_u->req3.tp_retire_blk_tov) 595 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; 596 else 597 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, 598 req_u->req3.tp_block_size); 599 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); 600 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 601 602 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 603 prb_init_ft_ops(p1, req_u); 604 prb_setup_retire_blk_timer(po); 605 prb_open_block(p1, pbd); 606 } 607 608 /* Do NOT update the last_blk_num first. 609 * Assumes sk_buff_head lock is held. 610 */ 611 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) 612 { 613 mod_timer(&pkc->retire_blk_timer, 614 jiffies + pkc->tov_in_jiffies); 615 pkc->last_kactive_blk_num = pkc->kactive_blk_num; 616 } 617 618 /* 619 * Timer logic: 620 * 1) We refresh the timer only when we open a block. 621 * By doing this we don't waste cycles refreshing the timer 622 * on packet-by-packet basis. 623 * 624 * With a 1MB block-size, on a 1Gbps line, it will take 625 * i) ~8 ms to fill a block + ii) memcpy etc. 626 * In this cut we are not accounting for the memcpy time. 627 * 628 * So, if the user sets the 'tmo' to 10ms then the timer 629 * will never fire while the block is still getting filled 630 * (which is what we want). However, the user could choose 631 * to close a block early and that's fine. 632 * 633 * But when the timer does fire, we check whether or not to refresh it. 634 * Since the tmo granularity is in msecs, it is not too expensive 635 * to refresh the timer, lets say every '8' msecs. 636 * Either the user can set the 'tmo' or we can derive it based on 637 * a) line-speed and b) block-size. 638 * prb_calc_retire_blk_tmo() calculates the tmo. 639 * 640 */ 641 static void prb_retire_rx_blk_timer_expired(struct timer_list *t) 642 { 643 struct packet_sock *po = 644 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); 645 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 646 unsigned int frozen; 647 struct tpacket_block_desc *pbd; 648 649 spin_lock(&po->sk.sk_receive_queue.lock); 650 651 frozen = prb_queue_frozen(pkc); 652 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 653 654 if (unlikely(pkc->delete_blk_timer)) 655 goto out; 656 657 /* We only need to plug the race when the block is partially filled. 658 * tpacket_rcv: 659 * lock(); increment BLOCK_NUM_PKTS; unlock() 660 * copy_bits() is in progress ... 661 * timer fires on other cpu: 662 * we can't retire the current block because copy_bits 663 * is in progress. 664 * 665 */ 666 if (BLOCK_NUM_PKTS(pbd)) { 667 while (atomic_read(&pkc->blk_fill_in_prog)) { 668 /* Waiting for skb_copy_bits to finish... */ 669 cpu_relax(); 670 } 671 } 672 673 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 674 if (!frozen) { 675 if (!BLOCK_NUM_PKTS(pbd)) { 676 /* An empty block. Just refresh the timer. */ 677 goto refresh_timer; 678 } 679 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 680 if (!prb_dispatch_next_block(pkc, po)) 681 goto refresh_timer; 682 else 683 goto out; 684 } else { 685 /* Case 1. Queue was frozen because user-space was 686 * lagging behind. 687 */ 688 if (prb_curr_blk_in_use(pbd)) { 689 /* 690 * Ok, user-space is still behind. 691 * So just refresh the timer. 692 */ 693 goto refresh_timer; 694 } else { 695 /* Case 2. queue was frozen,user-space caught up, 696 * now the link went idle && the timer fired. 697 * We don't have a block to close.So we open this 698 * block and restart the timer. 699 * opening a block thaws the queue,restarts timer 700 * Thawing/timer-refresh is a side effect. 701 */ 702 prb_open_block(pkc, pbd); 703 goto out; 704 } 705 } 706 } 707 708 refresh_timer: 709 _prb_refresh_rx_retire_blk_timer(pkc); 710 711 out: 712 spin_unlock(&po->sk.sk_receive_queue.lock); 713 } 714 715 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 716 struct tpacket_block_desc *pbd1, __u32 status) 717 { 718 /* Flush everything minus the block header */ 719 720 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 721 u8 *start, *end; 722 723 start = (u8 *)pbd1; 724 725 /* Skip the block header(we know header WILL fit in 4K) */ 726 start += PAGE_SIZE; 727 728 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 729 for (; start < end; start += PAGE_SIZE) 730 flush_dcache_page(pgv_to_page(start)); 731 732 smp_wmb(); 733 #endif 734 735 /* Now update the block status. */ 736 737 BLOCK_STATUS(pbd1) = status; 738 739 /* Flush the block header */ 740 741 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 742 start = (u8 *)pbd1; 743 flush_dcache_page(pgv_to_page(start)); 744 745 smp_wmb(); 746 #endif 747 } 748 749 /* 750 * Side effect: 751 * 752 * 1) flush the block 753 * 2) Increment active_blk_num 754 * 755 * Note:We DONT refresh the timer on purpose. 756 * Because almost always the next block will be opened. 757 */ 758 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 759 struct tpacket_block_desc *pbd1, 760 struct packet_sock *po, unsigned int stat) 761 { 762 __u32 status = TP_STATUS_USER | stat; 763 764 struct tpacket3_hdr *last_pkt; 765 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 766 struct sock *sk = &po->sk; 767 768 if (po->stats.stats3.tp_drops) 769 status |= TP_STATUS_LOSING; 770 771 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 772 last_pkt->tp_next_offset = 0; 773 774 /* Get the ts of the last pkt */ 775 if (BLOCK_NUM_PKTS(pbd1)) { 776 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 777 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 778 } else { 779 /* Ok, we tmo'd - so get the current time. 780 * 781 * It shouldn't really happen as we don't close empty 782 * blocks. See prb_retire_rx_blk_timer_expired(). 783 */ 784 struct timespec ts; 785 getnstimeofday(&ts); 786 h1->ts_last_pkt.ts_sec = ts.tv_sec; 787 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 788 } 789 790 smp_wmb(); 791 792 /* Flush the block */ 793 prb_flush_block(pkc1, pbd1, status); 794 795 sk->sk_data_ready(sk); 796 797 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 798 } 799 800 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 801 { 802 pkc->reset_pending_on_curr_blk = 0; 803 } 804 805 /* 806 * Side effect of opening a block: 807 * 808 * 1) prb_queue is thawed. 809 * 2) retire_blk_timer is refreshed. 810 * 811 */ 812 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 813 struct tpacket_block_desc *pbd1) 814 { 815 struct timespec ts; 816 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 817 818 smp_rmb(); 819 820 /* We could have just memset this but we will lose the 821 * flexibility of making the priv area sticky 822 */ 823 824 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 825 BLOCK_NUM_PKTS(pbd1) = 0; 826 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 827 828 getnstimeofday(&ts); 829 830 h1->ts_first_pkt.ts_sec = ts.tv_sec; 831 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 832 833 pkc1->pkblk_start = (char *)pbd1; 834 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 835 836 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 837 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 838 839 pbd1->version = pkc1->version; 840 pkc1->prev = pkc1->nxt_offset; 841 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 842 843 prb_thaw_queue(pkc1); 844 _prb_refresh_rx_retire_blk_timer(pkc1); 845 846 smp_wmb(); 847 } 848 849 /* 850 * Queue freeze logic: 851 * 1) Assume tp_block_nr = 8 blocks. 852 * 2) At time 't0', user opens Rx ring. 853 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 854 * 4) user-space is either sleeping or processing block '0'. 855 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 856 * it will close block-7,loop around and try to fill block '0'. 857 * call-flow: 858 * __packet_lookup_frame_in_block 859 * prb_retire_current_block() 860 * prb_dispatch_next_block() 861 * |->(BLOCK_STATUS == USER) evaluates to true 862 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 863 * 6) Now there are two cases: 864 * 6.1) Link goes idle right after the queue is frozen. 865 * But remember, the last open_block() refreshed the timer. 866 * When this timer expires,it will refresh itself so that we can 867 * re-open block-0 in near future. 868 * 6.2) Link is busy and keeps on receiving packets. This is a simple 869 * case and __packet_lookup_frame_in_block will check if block-0 870 * is free and can now be re-used. 871 */ 872 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 873 struct packet_sock *po) 874 { 875 pkc->reset_pending_on_curr_blk = 1; 876 po->stats.stats3.tp_freeze_q_cnt++; 877 } 878 879 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 880 881 /* 882 * If the next block is free then we will dispatch it 883 * and return a good offset. 884 * Else, we will freeze the queue. 885 * So, caller must check the return value. 886 */ 887 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 888 struct packet_sock *po) 889 { 890 struct tpacket_block_desc *pbd; 891 892 smp_rmb(); 893 894 /* 1. Get current block num */ 895 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 896 897 /* 2. If this block is currently in_use then freeze the queue */ 898 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 899 prb_freeze_queue(pkc, po); 900 return NULL; 901 } 902 903 /* 904 * 3. 905 * open this block and return the offset where the first packet 906 * needs to get stored. 907 */ 908 prb_open_block(pkc, pbd); 909 return (void *)pkc->nxt_offset; 910 } 911 912 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 913 struct packet_sock *po, unsigned int status) 914 { 915 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 916 917 /* retire/close the current block */ 918 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 919 /* 920 * Plug the case where copy_bits() is in progress on 921 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 922 * have space to copy the pkt in the current block and 923 * called prb_retire_current_block() 924 * 925 * We don't need to worry about the TMO case because 926 * the timer-handler already handled this case. 927 */ 928 if (!(status & TP_STATUS_BLK_TMO)) { 929 while (atomic_read(&pkc->blk_fill_in_prog)) { 930 /* Waiting for skb_copy_bits to finish... */ 931 cpu_relax(); 932 } 933 } 934 prb_close_block(pkc, pbd, po, status); 935 return; 936 } 937 } 938 939 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) 940 { 941 return TP_STATUS_USER & BLOCK_STATUS(pbd); 942 } 943 944 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 945 { 946 return pkc->reset_pending_on_curr_blk; 947 } 948 949 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 950 { 951 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 952 atomic_dec(&pkc->blk_fill_in_prog); 953 } 954 955 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 956 struct tpacket3_hdr *ppd) 957 { 958 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 959 } 960 961 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 962 struct tpacket3_hdr *ppd) 963 { 964 ppd->hv1.tp_rxhash = 0; 965 } 966 967 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 968 struct tpacket3_hdr *ppd) 969 { 970 if (skb_vlan_tag_present(pkc->skb)) { 971 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 972 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 973 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 974 } else { 975 ppd->hv1.tp_vlan_tci = 0; 976 ppd->hv1.tp_vlan_tpid = 0; 977 ppd->tp_status = TP_STATUS_AVAILABLE; 978 } 979 } 980 981 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 982 struct tpacket3_hdr *ppd) 983 { 984 ppd->hv1.tp_padding = 0; 985 prb_fill_vlan_info(pkc, ppd); 986 987 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 988 prb_fill_rxhash(pkc, ppd); 989 else 990 prb_clear_rxhash(pkc, ppd); 991 } 992 993 static void prb_fill_curr_block(char *curr, 994 struct tpacket_kbdq_core *pkc, 995 struct tpacket_block_desc *pbd, 996 unsigned int len) 997 { 998 struct tpacket3_hdr *ppd; 999 1000 ppd = (struct tpacket3_hdr *)curr; 1001 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 1002 pkc->prev = curr; 1003 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1004 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1005 BLOCK_NUM_PKTS(pbd) += 1; 1006 atomic_inc(&pkc->blk_fill_in_prog); 1007 prb_run_all_ft_ops(pkc, ppd); 1008 } 1009 1010 /* Assumes caller has the sk->rx_queue.lock */ 1011 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1012 struct sk_buff *skb, 1013 int status, 1014 unsigned int len 1015 ) 1016 { 1017 struct tpacket_kbdq_core *pkc; 1018 struct tpacket_block_desc *pbd; 1019 char *curr, *end; 1020 1021 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1022 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1023 1024 /* Queue is frozen when user space is lagging behind */ 1025 if (prb_queue_frozen(pkc)) { 1026 /* 1027 * Check if that last block which caused the queue to freeze, 1028 * is still in_use by user-space. 1029 */ 1030 if (prb_curr_blk_in_use(pbd)) { 1031 /* Can't record this packet */ 1032 return NULL; 1033 } else { 1034 /* 1035 * Ok, the block was released by user-space. 1036 * Now let's open that block. 1037 * opening a block also thaws the queue. 1038 * Thawing is a side effect. 1039 */ 1040 prb_open_block(pkc, pbd); 1041 } 1042 } 1043 1044 smp_mb(); 1045 curr = pkc->nxt_offset; 1046 pkc->skb = skb; 1047 end = (char *)pbd + pkc->kblk_size; 1048 1049 /* first try the current block */ 1050 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1051 prb_fill_curr_block(curr, pkc, pbd, len); 1052 return (void *)curr; 1053 } 1054 1055 /* Ok, close the current block */ 1056 prb_retire_current_block(pkc, po, 0); 1057 1058 /* Now, try to dispatch the next block */ 1059 curr = (char *)prb_dispatch_next_block(pkc, po); 1060 if (curr) { 1061 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1062 prb_fill_curr_block(curr, pkc, pbd, len); 1063 return (void *)curr; 1064 } 1065 1066 /* 1067 * No free blocks are available.user_space hasn't caught up yet. 1068 * Queue was just frozen and now this packet will get dropped. 1069 */ 1070 return NULL; 1071 } 1072 1073 static void *packet_current_rx_frame(struct packet_sock *po, 1074 struct sk_buff *skb, 1075 int status, unsigned int len) 1076 { 1077 char *curr = NULL; 1078 switch (po->tp_version) { 1079 case TPACKET_V1: 1080 case TPACKET_V2: 1081 curr = packet_lookup_frame(po, &po->rx_ring, 1082 po->rx_ring.head, status); 1083 return curr; 1084 case TPACKET_V3: 1085 return __packet_lookup_frame_in_block(po, skb, status, len); 1086 default: 1087 WARN(1, "TPACKET version not supported\n"); 1088 BUG(); 1089 return NULL; 1090 } 1091 } 1092 1093 static void *prb_lookup_block(struct packet_sock *po, 1094 struct packet_ring_buffer *rb, 1095 unsigned int idx, 1096 int status) 1097 { 1098 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1099 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1100 1101 if (status != BLOCK_STATUS(pbd)) 1102 return NULL; 1103 return pbd; 1104 } 1105 1106 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1107 { 1108 unsigned int prev; 1109 if (rb->prb_bdqc.kactive_blk_num) 1110 prev = rb->prb_bdqc.kactive_blk_num-1; 1111 else 1112 prev = rb->prb_bdqc.knum_blocks-1; 1113 return prev; 1114 } 1115 1116 /* Assumes caller has held the rx_queue.lock */ 1117 static void *__prb_previous_block(struct packet_sock *po, 1118 struct packet_ring_buffer *rb, 1119 int status) 1120 { 1121 unsigned int previous = prb_previous_blk_num(rb); 1122 return prb_lookup_block(po, rb, previous, status); 1123 } 1124 1125 static void *packet_previous_rx_frame(struct packet_sock *po, 1126 struct packet_ring_buffer *rb, 1127 int status) 1128 { 1129 if (po->tp_version <= TPACKET_V2) 1130 return packet_previous_frame(po, rb, status); 1131 1132 return __prb_previous_block(po, rb, status); 1133 } 1134 1135 static void packet_increment_rx_head(struct packet_sock *po, 1136 struct packet_ring_buffer *rb) 1137 { 1138 switch (po->tp_version) { 1139 case TPACKET_V1: 1140 case TPACKET_V2: 1141 return packet_increment_head(rb); 1142 case TPACKET_V3: 1143 default: 1144 WARN(1, "TPACKET version not supported.\n"); 1145 BUG(); 1146 return; 1147 } 1148 } 1149 1150 static void *packet_previous_frame(struct packet_sock *po, 1151 struct packet_ring_buffer *rb, 1152 int status) 1153 { 1154 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1155 return packet_lookup_frame(po, rb, previous, status); 1156 } 1157 1158 static void packet_increment_head(struct packet_ring_buffer *buff) 1159 { 1160 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1161 } 1162 1163 static void packet_inc_pending(struct packet_ring_buffer *rb) 1164 { 1165 this_cpu_inc(*rb->pending_refcnt); 1166 } 1167 1168 static void packet_dec_pending(struct packet_ring_buffer *rb) 1169 { 1170 this_cpu_dec(*rb->pending_refcnt); 1171 } 1172 1173 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1174 { 1175 unsigned int refcnt = 0; 1176 int cpu; 1177 1178 /* We don't use pending refcount in rx_ring. */ 1179 if (rb->pending_refcnt == NULL) 1180 return 0; 1181 1182 for_each_possible_cpu(cpu) 1183 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1184 1185 return refcnt; 1186 } 1187 1188 static int packet_alloc_pending(struct packet_sock *po) 1189 { 1190 po->rx_ring.pending_refcnt = NULL; 1191 1192 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1193 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1194 return -ENOBUFS; 1195 1196 return 0; 1197 } 1198 1199 static void packet_free_pending(struct packet_sock *po) 1200 { 1201 free_percpu(po->tx_ring.pending_refcnt); 1202 } 1203 1204 #define ROOM_POW_OFF 2 1205 #define ROOM_NONE 0x0 1206 #define ROOM_LOW 0x1 1207 #define ROOM_NORMAL 0x2 1208 1209 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) 1210 { 1211 int idx, len; 1212 1213 len = po->rx_ring.frame_max + 1; 1214 idx = po->rx_ring.head; 1215 if (pow_off) 1216 idx += len >> pow_off; 1217 if (idx >= len) 1218 idx -= len; 1219 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1220 } 1221 1222 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) 1223 { 1224 int idx, len; 1225 1226 len = po->rx_ring.prb_bdqc.knum_blocks; 1227 idx = po->rx_ring.prb_bdqc.kactive_blk_num; 1228 if (pow_off) 1229 idx += len >> pow_off; 1230 if (idx >= len) 1231 idx -= len; 1232 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1233 } 1234 1235 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1236 { 1237 struct sock *sk = &po->sk; 1238 int ret = ROOM_NONE; 1239 1240 if (po->prot_hook.func != tpacket_rcv) { 1241 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) 1242 - (skb ? skb->truesize : 0); 1243 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) 1244 return ROOM_NORMAL; 1245 else if (avail > 0) 1246 return ROOM_LOW; 1247 else 1248 return ROOM_NONE; 1249 } 1250 1251 if (po->tp_version == TPACKET_V3) { 1252 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) 1253 ret = ROOM_NORMAL; 1254 else if (__tpacket_v3_has_room(po, 0)) 1255 ret = ROOM_LOW; 1256 } else { 1257 if (__tpacket_has_room(po, ROOM_POW_OFF)) 1258 ret = ROOM_NORMAL; 1259 else if (__tpacket_has_room(po, 0)) 1260 ret = ROOM_LOW; 1261 } 1262 1263 return ret; 1264 } 1265 1266 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1267 { 1268 int ret; 1269 bool has_room; 1270 1271 spin_lock_bh(&po->sk.sk_receive_queue.lock); 1272 ret = __packet_rcv_has_room(po, skb); 1273 has_room = ret == ROOM_NORMAL; 1274 if (po->pressure == has_room) 1275 po->pressure = !has_room; 1276 spin_unlock_bh(&po->sk.sk_receive_queue.lock); 1277 1278 return ret; 1279 } 1280 1281 static void packet_sock_destruct(struct sock *sk) 1282 { 1283 skb_queue_purge(&sk->sk_error_queue); 1284 1285 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1286 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 1287 1288 if (!sock_flag(sk, SOCK_DEAD)) { 1289 pr_err("Attempt to release alive packet socket: %p\n", sk); 1290 return; 1291 } 1292 1293 sk_refcnt_debug_dec(sk); 1294 } 1295 1296 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) 1297 { 1298 u32 rxhash; 1299 int i, count = 0; 1300 1301 rxhash = skb_get_hash(skb); 1302 for (i = 0; i < ROLLOVER_HLEN; i++) 1303 if (po->rollover->history[i] == rxhash) 1304 count++; 1305 1306 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; 1307 return count > (ROLLOVER_HLEN >> 1); 1308 } 1309 1310 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1311 struct sk_buff *skb, 1312 unsigned int num) 1313 { 1314 return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1315 } 1316 1317 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1318 struct sk_buff *skb, 1319 unsigned int num) 1320 { 1321 unsigned int val = atomic_inc_return(&f->rr_cur); 1322 1323 return val % num; 1324 } 1325 1326 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1327 struct sk_buff *skb, 1328 unsigned int num) 1329 { 1330 return smp_processor_id() % num; 1331 } 1332 1333 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1334 struct sk_buff *skb, 1335 unsigned int num) 1336 { 1337 return prandom_u32_max(num); 1338 } 1339 1340 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1341 struct sk_buff *skb, 1342 unsigned int idx, bool try_self, 1343 unsigned int num) 1344 { 1345 struct packet_sock *po, *po_next, *po_skip = NULL; 1346 unsigned int i, j, room = ROOM_NONE; 1347 1348 po = pkt_sk(f->arr[idx]); 1349 1350 if (try_self) { 1351 room = packet_rcv_has_room(po, skb); 1352 if (room == ROOM_NORMAL || 1353 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) 1354 return idx; 1355 po_skip = po; 1356 } 1357 1358 i = j = min_t(int, po->rollover->sock, num - 1); 1359 do { 1360 po_next = pkt_sk(f->arr[i]); 1361 if (po_next != po_skip && !po_next->pressure && 1362 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { 1363 if (i != j) 1364 po->rollover->sock = i; 1365 atomic_long_inc(&po->rollover->num); 1366 if (room == ROOM_LOW) 1367 atomic_long_inc(&po->rollover->num_huge); 1368 return i; 1369 } 1370 1371 if (++i == num) 1372 i = 0; 1373 } while (i != j); 1374 1375 atomic_long_inc(&po->rollover->num_failed); 1376 return idx; 1377 } 1378 1379 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1380 struct sk_buff *skb, 1381 unsigned int num) 1382 { 1383 return skb_get_queue_mapping(skb) % num; 1384 } 1385 1386 static unsigned int fanout_demux_bpf(struct packet_fanout *f, 1387 struct sk_buff *skb, 1388 unsigned int num) 1389 { 1390 struct bpf_prog *prog; 1391 unsigned int ret = 0; 1392 1393 rcu_read_lock(); 1394 prog = rcu_dereference(f->bpf_prog); 1395 if (prog) 1396 ret = bpf_prog_run_clear_cb(prog, skb) % num; 1397 rcu_read_unlock(); 1398 1399 return ret; 1400 } 1401 1402 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1403 { 1404 return f->flags & (flag >> 8); 1405 } 1406 1407 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1408 struct packet_type *pt, struct net_device *orig_dev) 1409 { 1410 struct packet_fanout *f = pt->af_packet_priv; 1411 unsigned int num = READ_ONCE(f->num_members); 1412 struct net *net = read_pnet(&f->net); 1413 struct packet_sock *po; 1414 unsigned int idx; 1415 1416 if (!net_eq(dev_net(dev), net) || !num) { 1417 kfree_skb(skb); 1418 return 0; 1419 } 1420 1421 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1422 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); 1423 if (!skb) 1424 return 0; 1425 } 1426 switch (f->type) { 1427 case PACKET_FANOUT_HASH: 1428 default: 1429 idx = fanout_demux_hash(f, skb, num); 1430 break; 1431 case PACKET_FANOUT_LB: 1432 idx = fanout_demux_lb(f, skb, num); 1433 break; 1434 case PACKET_FANOUT_CPU: 1435 idx = fanout_demux_cpu(f, skb, num); 1436 break; 1437 case PACKET_FANOUT_RND: 1438 idx = fanout_demux_rnd(f, skb, num); 1439 break; 1440 case PACKET_FANOUT_QM: 1441 idx = fanout_demux_qm(f, skb, num); 1442 break; 1443 case PACKET_FANOUT_ROLLOVER: 1444 idx = fanout_demux_rollover(f, skb, 0, false, num); 1445 break; 1446 case PACKET_FANOUT_CBPF: 1447 case PACKET_FANOUT_EBPF: 1448 idx = fanout_demux_bpf(f, skb, num); 1449 break; 1450 } 1451 1452 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) 1453 idx = fanout_demux_rollover(f, skb, idx, true, num); 1454 1455 po = pkt_sk(f->arr[idx]); 1456 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1457 } 1458 1459 DEFINE_MUTEX(fanout_mutex); 1460 EXPORT_SYMBOL_GPL(fanout_mutex); 1461 static LIST_HEAD(fanout_list); 1462 static u16 fanout_next_id; 1463 1464 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1465 { 1466 struct packet_fanout *f = po->fanout; 1467 1468 spin_lock(&f->lock); 1469 f->arr[f->num_members] = sk; 1470 smp_wmb(); 1471 f->num_members++; 1472 if (f->num_members == 1) 1473 dev_add_pack(&f->prot_hook); 1474 spin_unlock(&f->lock); 1475 } 1476 1477 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1478 { 1479 struct packet_fanout *f = po->fanout; 1480 int i; 1481 1482 spin_lock(&f->lock); 1483 for (i = 0; i < f->num_members; i++) { 1484 if (f->arr[i] == sk) 1485 break; 1486 } 1487 BUG_ON(i >= f->num_members); 1488 f->arr[i] = f->arr[f->num_members - 1]; 1489 f->num_members--; 1490 if (f->num_members == 0) 1491 __dev_remove_pack(&f->prot_hook); 1492 spin_unlock(&f->lock); 1493 } 1494 1495 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1496 { 1497 if (sk->sk_family != PF_PACKET) 1498 return false; 1499 1500 return ptype->af_packet_priv == pkt_sk(sk)->fanout; 1501 } 1502 1503 static void fanout_init_data(struct packet_fanout *f) 1504 { 1505 switch (f->type) { 1506 case PACKET_FANOUT_LB: 1507 atomic_set(&f->rr_cur, 0); 1508 break; 1509 case PACKET_FANOUT_CBPF: 1510 case PACKET_FANOUT_EBPF: 1511 RCU_INIT_POINTER(f->bpf_prog, NULL); 1512 break; 1513 } 1514 } 1515 1516 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) 1517 { 1518 struct bpf_prog *old; 1519 1520 spin_lock(&f->lock); 1521 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); 1522 rcu_assign_pointer(f->bpf_prog, new); 1523 spin_unlock(&f->lock); 1524 1525 if (old) { 1526 synchronize_net(); 1527 bpf_prog_destroy(old); 1528 } 1529 } 1530 1531 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, 1532 unsigned int len) 1533 { 1534 struct bpf_prog *new; 1535 struct sock_fprog fprog; 1536 int ret; 1537 1538 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1539 return -EPERM; 1540 if (len != sizeof(fprog)) 1541 return -EINVAL; 1542 if (copy_from_user(&fprog, data, len)) 1543 return -EFAULT; 1544 1545 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); 1546 if (ret) 1547 return ret; 1548 1549 __fanout_set_data_bpf(po->fanout, new); 1550 return 0; 1551 } 1552 1553 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, 1554 unsigned int len) 1555 { 1556 struct bpf_prog *new; 1557 u32 fd; 1558 1559 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1560 return -EPERM; 1561 if (len != sizeof(fd)) 1562 return -EINVAL; 1563 if (copy_from_user(&fd, data, len)) 1564 return -EFAULT; 1565 1566 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 1567 if (IS_ERR(new)) 1568 return PTR_ERR(new); 1569 1570 __fanout_set_data_bpf(po->fanout, new); 1571 return 0; 1572 } 1573 1574 static int fanout_set_data(struct packet_sock *po, char __user *data, 1575 unsigned int len) 1576 { 1577 switch (po->fanout->type) { 1578 case PACKET_FANOUT_CBPF: 1579 return fanout_set_data_cbpf(po, data, len); 1580 case PACKET_FANOUT_EBPF: 1581 return fanout_set_data_ebpf(po, data, len); 1582 default: 1583 return -EINVAL; 1584 }; 1585 } 1586 1587 static void fanout_release_data(struct packet_fanout *f) 1588 { 1589 switch (f->type) { 1590 case PACKET_FANOUT_CBPF: 1591 case PACKET_FANOUT_EBPF: 1592 __fanout_set_data_bpf(f, NULL); 1593 }; 1594 } 1595 1596 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) 1597 { 1598 struct packet_fanout *f; 1599 1600 list_for_each_entry(f, &fanout_list, list) { 1601 if (f->id == candidate_id && 1602 read_pnet(&f->net) == sock_net(sk)) { 1603 return false; 1604 } 1605 } 1606 return true; 1607 } 1608 1609 static bool fanout_find_new_id(struct sock *sk, u16 *new_id) 1610 { 1611 u16 id = fanout_next_id; 1612 1613 do { 1614 if (__fanout_id_is_free(sk, id)) { 1615 *new_id = id; 1616 fanout_next_id = id + 1; 1617 return true; 1618 } 1619 1620 id++; 1621 } while (id != fanout_next_id); 1622 1623 return false; 1624 } 1625 1626 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1627 { 1628 struct packet_rollover *rollover = NULL; 1629 struct packet_sock *po = pkt_sk(sk); 1630 struct packet_fanout *f, *match; 1631 u8 type = type_flags & 0xff; 1632 u8 flags = type_flags >> 8; 1633 int err; 1634 1635 switch (type) { 1636 case PACKET_FANOUT_ROLLOVER: 1637 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1638 return -EINVAL; 1639 case PACKET_FANOUT_HASH: 1640 case PACKET_FANOUT_LB: 1641 case PACKET_FANOUT_CPU: 1642 case PACKET_FANOUT_RND: 1643 case PACKET_FANOUT_QM: 1644 case PACKET_FANOUT_CBPF: 1645 case PACKET_FANOUT_EBPF: 1646 break; 1647 default: 1648 return -EINVAL; 1649 } 1650 1651 mutex_lock(&fanout_mutex); 1652 1653 err = -EALREADY; 1654 if (po->fanout) 1655 goto out; 1656 1657 if (type == PACKET_FANOUT_ROLLOVER || 1658 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1659 err = -ENOMEM; 1660 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); 1661 if (!rollover) 1662 goto out; 1663 atomic_long_set(&rollover->num, 0); 1664 atomic_long_set(&rollover->num_huge, 0); 1665 atomic_long_set(&rollover->num_failed, 0); 1666 } 1667 1668 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { 1669 if (id != 0) { 1670 err = -EINVAL; 1671 goto out; 1672 } 1673 if (!fanout_find_new_id(sk, &id)) { 1674 err = -ENOMEM; 1675 goto out; 1676 } 1677 /* ephemeral flag for the first socket in the group: drop it */ 1678 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); 1679 } 1680 1681 match = NULL; 1682 list_for_each_entry(f, &fanout_list, list) { 1683 if (f->id == id && 1684 read_pnet(&f->net) == sock_net(sk)) { 1685 match = f; 1686 break; 1687 } 1688 } 1689 err = -EINVAL; 1690 if (match && match->flags != flags) 1691 goto out; 1692 if (!match) { 1693 err = -ENOMEM; 1694 match = kzalloc(sizeof(*match), GFP_KERNEL); 1695 if (!match) 1696 goto out; 1697 write_pnet(&match->net, sock_net(sk)); 1698 match->id = id; 1699 match->type = type; 1700 match->flags = flags; 1701 INIT_LIST_HEAD(&match->list); 1702 spin_lock_init(&match->lock); 1703 refcount_set(&match->sk_ref, 0); 1704 fanout_init_data(match); 1705 match->prot_hook.type = po->prot_hook.type; 1706 match->prot_hook.dev = po->prot_hook.dev; 1707 match->prot_hook.func = packet_rcv_fanout; 1708 match->prot_hook.af_packet_priv = match; 1709 match->prot_hook.id_match = match_fanout_group; 1710 list_add(&match->list, &fanout_list); 1711 } 1712 err = -EINVAL; 1713 1714 spin_lock(&po->bind_lock); 1715 if (po->running && 1716 match->type == type && 1717 match->prot_hook.type == po->prot_hook.type && 1718 match->prot_hook.dev == po->prot_hook.dev) { 1719 err = -ENOSPC; 1720 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 1721 __dev_remove_pack(&po->prot_hook); 1722 po->fanout = match; 1723 po->rollover = rollover; 1724 rollover = NULL; 1725 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1726 __fanout_link(sk, po); 1727 err = 0; 1728 } 1729 } 1730 spin_unlock(&po->bind_lock); 1731 1732 if (err && !refcount_read(&match->sk_ref)) { 1733 list_del(&match->list); 1734 kfree(match); 1735 } 1736 1737 out: 1738 kfree(rollover); 1739 mutex_unlock(&fanout_mutex); 1740 return err; 1741 } 1742 1743 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes 1744 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. 1745 * It is the responsibility of the caller to call fanout_release_data() and 1746 * free the returned packet_fanout (after synchronize_net()) 1747 */ 1748 static struct packet_fanout *fanout_release(struct sock *sk) 1749 { 1750 struct packet_sock *po = pkt_sk(sk); 1751 struct packet_fanout *f; 1752 1753 mutex_lock(&fanout_mutex); 1754 f = po->fanout; 1755 if (f) { 1756 po->fanout = NULL; 1757 1758 if (refcount_dec_and_test(&f->sk_ref)) 1759 list_del(&f->list); 1760 else 1761 f = NULL; 1762 } 1763 mutex_unlock(&fanout_mutex); 1764 1765 return f; 1766 } 1767 1768 static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1769 struct sk_buff *skb) 1770 { 1771 /* Earlier code assumed this would be a VLAN pkt, double-check 1772 * this now that we have the actual packet in hand. We can only 1773 * do this check on Ethernet devices. 1774 */ 1775 if (unlikely(dev->type != ARPHRD_ETHER)) 1776 return false; 1777 1778 skb_reset_mac_header(skb); 1779 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); 1780 } 1781 1782 static const struct proto_ops packet_ops; 1783 1784 static const struct proto_ops packet_ops_spkt; 1785 1786 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1787 struct packet_type *pt, struct net_device *orig_dev) 1788 { 1789 struct sock *sk; 1790 struct sockaddr_pkt *spkt; 1791 1792 /* 1793 * When we registered the protocol we saved the socket in the data 1794 * field for just this event. 1795 */ 1796 1797 sk = pt->af_packet_priv; 1798 1799 /* 1800 * Yank back the headers [hope the device set this 1801 * right or kerboom...] 1802 * 1803 * Incoming packets have ll header pulled, 1804 * push it back. 1805 * 1806 * For outgoing ones skb->data == skb_mac_header(skb) 1807 * so that this procedure is noop. 1808 */ 1809 1810 if (skb->pkt_type == PACKET_LOOPBACK) 1811 goto out; 1812 1813 if (!net_eq(dev_net(dev), sock_net(sk))) 1814 goto out; 1815 1816 skb = skb_share_check(skb, GFP_ATOMIC); 1817 if (skb == NULL) 1818 goto oom; 1819 1820 /* drop any routing info */ 1821 skb_dst_drop(skb); 1822 1823 /* drop conntrack reference */ 1824 nf_reset(skb); 1825 1826 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1827 1828 skb_push(skb, skb->data - skb_mac_header(skb)); 1829 1830 /* 1831 * The SOCK_PACKET socket receives _all_ frames. 1832 */ 1833 1834 spkt->spkt_family = dev->type; 1835 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1836 spkt->spkt_protocol = skb->protocol; 1837 1838 /* 1839 * Charge the memory to the socket. This is done specifically 1840 * to prevent sockets using all the memory up. 1841 */ 1842 1843 if (sock_queue_rcv_skb(sk, skb) == 0) 1844 return 0; 1845 1846 out: 1847 kfree_skb(skb); 1848 oom: 1849 return 0; 1850 } 1851 1852 1853 /* 1854 * Output a raw packet to a device layer. This bypasses all the other 1855 * protocol layers and you must therefore supply it with a complete frame 1856 */ 1857 1858 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, 1859 size_t len) 1860 { 1861 struct sock *sk = sock->sk; 1862 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1863 struct sk_buff *skb = NULL; 1864 struct net_device *dev; 1865 struct sockcm_cookie sockc; 1866 __be16 proto = 0; 1867 int err; 1868 int extra_len = 0; 1869 1870 /* 1871 * Get and verify the address. 1872 */ 1873 1874 if (saddr) { 1875 if (msg->msg_namelen < sizeof(struct sockaddr)) 1876 return -EINVAL; 1877 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1878 proto = saddr->spkt_protocol; 1879 } else 1880 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1881 1882 /* 1883 * Find the device first to size check it 1884 */ 1885 1886 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1887 retry: 1888 rcu_read_lock(); 1889 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1890 err = -ENODEV; 1891 if (dev == NULL) 1892 goto out_unlock; 1893 1894 err = -ENETDOWN; 1895 if (!(dev->flags & IFF_UP)) 1896 goto out_unlock; 1897 1898 /* 1899 * You may not queue a frame bigger than the mtu. This is the lowest level 1900 * raw protocol and you must do your own fragmentation at this level. 1901 */ 1902 1903 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1904 if (!netif_supports_nofcs(dev)) { 1905 err = -EPROTONOSUPPORT; 1906 goto out_unlock; 1907 } 1908 extra_len = 4; /* We're doing our own CRC */ 1909 } 1910 1911 err = -EMSGSIZE; 1912 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 1913 goto out_unlock; 1914 1915 if (!skb) { 1916 size_t reserved = LL_RESERVED_SPACE(dev); 1917 int tlen = dev->needed_tailroom; 1918 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 1919 1920 rcu_read_unlock(); 1921 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 1922 if (skb == NULL) 1923 return -ENOBUFS; 1924 /* FIXME: Save some space for broken drivers that write a hard 1925 * header at transmission time by themselves. PPP is the notable 1926 * one here. This should really be fixed at the driver level. 1927 */ 1928 skb_reserve(skb, reserved); 1929 skb_reset_network_header(skb); 1930 1931 /* Try to align data part correctly */ 1932 if (hhlen) { 1933 skb->data -= hhlen; 1934 skb->tail -= hhlen; 1935 if (len < hhlen) 1936 skb_reset_network_header(skb); 1937 } 1938 err = memcpy_from_msg(skb_put(skb, len), msg, len); 1939 if (err) 1940 goto out_free; 1941 goto retry; 1942 } 1943 1944 if (!dev_validate_header(dev, skb->data, len)) { 1945 err = -EINVAL; 1946 goto out_unlock; 1947 } 1948 if (len > (dev->mtu + dev->hard_header_len + extra_len) && 1949 !packet_extra_vlan_len_allowed(dev, skb)) { 1950 err = -EMSGSIZE; 1951 goto out_unlock; 1952 } 1953 1954 sockc.tsflags = sk->sk_tsflags; 1955 if (msg->msg_controllen) { 1956 err = sock_cmsg_send(sk, msg, &sockc); 1957 if (unlikely(err)) 1958 goto out_unlock; 1959 } 1960 1961 skb->protocol = proto; 1962 skb->dev = dev; 1963 skb->priority = sk->sk_priority; 1964 skb->mark = sk->sk_mark; 1965 1966 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); 1967 1968 if (unlikely(extra_len == 4)) 1969 skb->no_fcs = 1; 1970 1971 skb_probe_transport_header(skb, 0); 1972 1973 dev_queue_xmit(skb); 1974 rcu_read_unlock(); 1975 return len; 1976 1977 out_unlock: 1978 rcu_read_unlock(); 1979 out_free: 1980 kfree_skb(skb); 1981 return err; 1982 } 1983 1984 static unsigned int run_filter(struct sk_buff *skb, 1985 const struct sock *sk, 1986 unsigned int res) 1987 { 1988 struct sk_filter *filter; 1989 1990 rcu_read_lock(); 1991 filter = rcu_dereference(sk->sk_filter); 1992 if (filter != NULL) 1993 res = bpf_prog_run_clear_cb(filter->prog, skb); 1994 rcu_read_unlock(); 1995 1996 return res; 1997 } 1998 1999 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, 2000 size_t *len) 2001 { 2002 struct virtio_net_hdr vnet_hdr; 2003 2004 if (*len < sizeof(vnet_hdr)) 2005 return -EINVAL; 2006 *len -= sizeof(vnet_hdr); 2007 2008 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0)) 2009 return -EINVAL; 2010 2011 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); 2012 } 2013 2014 /* 2015 * This function makes lazy skb cloning in hope that most of packets 2016 * are discarded by BPF. 2017 * 2018 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 2019 * and skb->cb are mangled. It works because (and until) packets 2020 * falling here are owned by current CPU. Output packets are cloned 2021 * by dev_queue_xmit_nit(), input packets are processed by net_bh 2022 * sequencially, so that if we return skb to original state on exit, 2023 * we will not harm anyone. 2024 */ 2025 2026 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 2027 struct packet_type *pt, struct net_device *orig_dev) 2028 { 2029 struct sock *sk; 2030 struct sockaddr_ll *sll; 2031 struct packet_sock *po; 2032 u8 *skb_head = skb->data; 2033 int skb_len = skb->len; 2034 unsigned int snaplen, res; 2035 bool is_drop_n_account = false; 2036 2037 if (skb->pkt_type == PACKET_LOOPBACK) 2038 goto drop; 2039 2040 sk = pt->af_packet_priv; 2041 po = pkt_sk(sk); 2042 2043 if (!net_eq(dev_net(dev), sock_net(sk))) 2044 goto drop; 2045 2046 skb->dev = dev; 2047 2048 if (dev->header_ops) { 2049 /* The device has an explicit notion of ll header, 2050 * exported to higher levels. 2051 * 2052 * Otherwise, the device hides details of its frame 2053 * structure, so that corresponding packet head is 2054 * never delivered to user. 2055 */ 2056 if (sk->sk_type != SOCK_DGRAM) 2057 skb_push(skb, skb->data - skb_mac_header(skb)); 2058 else if (skb->pkt_type == PACKET_OUTGOING) { 2059 /* Special case: outgoing packets have ll header at head */ 2060 skb_pull(skb, skb_network_offset(skb)); 2061 } 2062 } 2063 2064 snaplen = skb->len; 2065 2066 res = run_filter(skb, sk, snaplen); 2067 if (!res) 2068 goto drop_n_restore; 2069 if (snaplen > res) 2070 snaplen = res; 2071 2072 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2073 goto drop_n_acct; 2074 2075 if (skb_shared(skb)) { 2076 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2077 if (nskb == NULL) 2078 goto drop_n_acct; 2079 2080 if (skb_head != skb->data) { 2081 skb->data = skb_head; 2082 skb->len = skb_len; 2083 } 2084 consume_skb(skb); 2085 skb = nskb; 2086 } 2087 2088 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); 2089 2090 sll = &PACKET_SKB_CB(skb)->sa.ll; 2091 sll->sll_hatype = dev->type; 2092 sll->sll_pkttype = skb->pkt_type; 2093 if (unlikely(po->origdev)) 2094 sll->sll_ifindex = orig_dev->ifindex; 2095 else 2096 sll->sll_ifindex = dev->ifindex; 2097 2098 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2099 2100 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). 2101 * Use their space for storing the original skb length. 2102 */ 2103 PACKET_SKB_CB(skb)->sa.origlen = skb->len; 2104 2105 if (pskb_trim(skb, snaplen)) 2106 goto drop_n_acct; 2107 2108 skb_set_owner_r(skb, sk); 2109 skb->dev = NULL; 2110 skb_dst_drop(skb); 2111 2112 /* drop conntrack reference */ 2113 nf_reset(skb); 2114 2115 spin_lock(&sk->sk_receive_queue.lock); 2116 po->stats.stats1.tp_packets++; 2117 sock_skb_set_dropcount(sk, skb); 2118 __skb_queue_tail(&sk->sk_receive_queue, skb); 2119 spin_unlock(&sk->sk_receive_queue.lock); 2120 sk->sk_data_ready(sk); 2121 return 0; 2122 2123 drop_n_acct: 2124 is_drop_n_account = true; 2125 spin_lock(&sk->sk_receive_queue.lock); 2126 po->stats.stats1.tp_drops++; 2127 atomic_inc(&sk->sk_drops); 2128 spin_unlock(&sk->sk_receive_queue.lock); 2129 2130 drop_n_restore: 2131 if (skb_head != skb->data && skb_shared(skb)) { 2132 skb->data = skb_head; 2133 skb->len = skb_len; 2134 } 2135 drop: 2136 if (!is_drop_n_account) 2137 consume_skb(skb); 2138 else 2139 kfree_skb(skb); 2140 return 0; 2141 } 2142 2143 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 2144 struct packet_type *pt, struct net_device *orig_dev) 2145 { 2146 struct sock *sk; 2147 struct packet_sock *po; 2148 struct sockaddr_ll *sll; 2149 union tpacket_uhdr h; 2150 u8 *skb_head = skb->data; 2151 int skb_len = skb->len; 2152 unsigned int snaplen, res; 2153 unsigned long status = TP_STATUS_USER; 2154 unsigned short macoff, netoff, hdrlen; 2155 struct sk_buff *copy_skb = NULL; 2156 struct timespec ts; 2157 __u32 ts_status; 2158 bool is_drop_n_account = false; 2159 bool do_vnet = false; 2160 2161 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2162 * We may add members to them until current aligned size without forcing 2163 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 2164 */ 2165 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 2166 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 2167 2168 if (skb->pkt_type == PACKET_LOOPBACK) 2169 goto drop; 2170 2171 sk = pt->af_packet_priv; 2172 po = pkt_sk(sk); 2173 2174 if (!net_eq(dev_net(dev), sock_net(sk))) 2175 goto drop; 2176 2177 if (dev->header_ops) { 2178 if (sk->sk_type != SOCK_DGRAM) 2179 skb_push(skb, skb->data - skb_mac_header(skb)); 2180 else if (skb->pkt_type == PACKET_OUTGOING) { 2181 /* Special case: outgoing packets have ll header at head */ 2182 skb_pull(skb, skb_network_offset(skb)); 2183 } 2184 } 2185 2186 snaplen = skb->len; 2187 2188 res = run_filter(skb, sk, snaplen); 2189 if (!res) 2190 goto drop_n_restore; 2191 2192 if (skb->ip_summed == CHECKSUM_PARTIAL) 2193 status |= TP_STATUS_CSUMNOTREADY; 2194 else if (skb->pkt_type != PACKET_OUTGOING && 2195 (skb->ip_summed == CHECKSUM_COMPLETE || 2196 skb_csum_unnecessary(skb))) 2197 status |= TP_STATUS_CSUM_VALID; 2198 2199 if (snaplen > res) 2200 snaplen = res; 2201 2202 if (sk->sk_type == SOCK_DGRAM) { 2203 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 2204 po->tp_reserve; 2205 } else { 2206 unsigned int maclen = skb_network_offset(skb); 2207 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2208 (maclen < 16 ? 16 : maclen)) + 2209 po->tp_reserve; 2210 if (po->has_vnet_hdr) { 2211 netoff += sizeof(struct virtio_net_hdr); 2212 do_vnet = true; 2213 } 2214 macoff = netoff - maclen; 2215 } 2216 if (po->tp_version <= TPACKET_V2) { 2217 if (macoff + snaplen > po->rx_ring.frame_size) { 2218 if (po->copy_thresh && 2219 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 2220 if (skb_shared(skb)) { 2221 copy_skb = skb_clone(skb, GFP_ATOMIC); 2222 } else { 2223 copy_skb = skb_get(skb); 2224 skb_head = skb->data; 2225 } 2226 if (copy_skb) 2227 skb_set_owner_r(copy_skb, sk); 2228 } 2229 snaplen = po->rx_ring.frame_size - macoff; 2230 if ((int)snaplen < 0) { 2231 snaplen = 0; 2232 do_vnet = false; 2233 } 2234 } 2235 } else if (unlikely(macoff + snaplen > 2236 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2237 u32 nval; 2238 2239 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 2240 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 2241 snaplen, nval, macoff); 2242 snaplen = nval; 2243 if (unlikely((int)snaplen < 0)) { 2244 snaplen = 0; 2245 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2246 do_vnet = false; 2247 } 2248 } 2249 spin_lock(&sk->sk_receive_queue.lock); 2250 h.raw = packet_current_rx_frame(po, skb, 2251 TP_STATUS_KERNEL, (macoff+snaplen)); 2252 if (!h.raw) 2253 goto drop_n_account; 2254 if (po->tp_version <= TPACKET_V2) { 2255 packet_increment_rx_head(po, &po->rx_ring); 2256 /* 2257 * LOSING will be reported till you read the stats, 2258 * because it's COR - Clear On Read. 2259 * Anyways, moving it for V1/V2 only as V3 doesn't need this 2260 * at packet level. 2261 */ 2262 if (po->stats.stats1.tp_drops) 2263 status |= TP_STATUS_LOSING; 2264 } 2265 2266 if (do_vnet && 2267 virtio_net_hdr_from_skb(skb, h.raw + macoff - 2268 sizeof(struct virtio_net_hdr), 2269 vio_le(), true, 0)) 2270 goto drop_n_account; 2271 2272 po->stats.stats1.tp_packets++; 2273 if (copy_skb) { 2274 status |= TP_STATUS_COPY; 2275 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 2276 } 2277 spin_unlock(&sk->sk_receive_queue.lock); 2278 2279 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 2280 2281 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 2282 getnstimeofday(&ts); 2283 2284 status |= ts_status; 2285 2286 switch (po->tp_version) { 2287 case TPACKET_V1: 2288 h.h1->tp_len = skb->len; 2289 h.h1->tp_snaplen = snaplen; 2290 h.h1->tp_mac = macoff; 2291 h.h1->tp_net = netoff; 2292 h.h1->tp_sec = ts.tv_sec; 2293 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 2294 hdrlen = sizeof(*h.h1); 2295 break; 2296 case TPACKET_V2: 2297 h.h2->tp_len = skb->len; 2298 h.h2->tp_snaplen = snaplen; 2299 h.h2->tp_mac = macoff; 2300 h.h2->tp_net = netoff; 2301 h.h2->tp_sec = ts.tv_sec; 2302 h.h2->tp_nsec = ts.tv_nsec; 2303 if (skb_vlan_tag_present(skb)) { 2304 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2305 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2306 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2307 } else { 2308 h.h2->tp_vlan_tci = 0; 2309 h.h2->tp_vlan_tpid = 0; 2310 } 2311 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2312 hdrlen = sizeof(*h.h2); 2313 break; 2314 case TPACKET_V3: 2315 /* tp_nxt_offset,vlan are already populated above. 2316 * So DONT clear those fields here 2317 */ 2318 h.h3->tp_status |= status; 2319 h.h3->tp_len = skb->len; 2320 h.h3->tp_snaplen = snaplen; 2321 h.h3->tp_mac = macoff; 2322 h.h3->tp_net = netoff; 2323 h.h3->tp_sec = ts.tv_sec; 2324 h.h3->tp_nsec = ts.tv_nsec; 2325 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2326 hdrlen = sizeof(*h.h3); 2327 break; 2328 default: 2329 BUG(); 2330 } 2331 2332 sll = h.raw + TPACKET_ALIGN(hdrlen); 2333 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2334 sll->sll_family = AF_PACKET; 2335 sll->sll_hatype = dev->type; 2336 sll->sll_protocol = skb->protocol; 2337 sll->sll_pkttype = skb->pkt_type; 2338 if (unlikely(po->origdev)) 2339 sll->sll_ifindex = orig_dev->ifindex; 2340 else 2341 sll->sll_ifindex = dev->ifindex; 2342 2343 smp_mb(); 2344 2345 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2346 if (po->tp_version <= TPACKET_V2) { 2347 u8 *start, *end; 2348 2349 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2350 macoff + snaplen); 2351 2352 for (start = h.raw; start < end; start += PAGE_SIZE) 2353 flush_dcache_page(pgv_to_page(start)); 2354 } 2355 smp_wmb(); 2356 #endif 2357 2358 if (po->tp_version <= TPACKET_V2) { 2359 __packet_set_status(po, h.raw, status); 2360 sk->sk_data_ready(sk); 2361 } else { 2362 prb_clear_blk_fill_status(&po->rx_ring); 2363 } 2364 2365 drop_n_restore: 2366 if (skb_head != skb->data && skb_shared(skb)) { 2367 skb->data = skb_head; 2368 skb->len = skb_len; 2369 } 2370 drop: 2371 if (!is_drop_n_account) 2372 consume_skb(skb); 2373 else 2374 kfree_skb(skb); 2375 return 0; 2376 2377 drop_n_account: 2378 is_drop_n_account = true; 2379 po->stats.stats1.tp_drops++; 2380 spin_unlock(&sk->sk_receive_queue.lock); 2381 2382 sk->sk_data_ready(sk); 2383 kfree_skb(copy_skb); 2384 goto drop_n_restore; 2385 } 2386 2387 static void tpacket_destruct_skb(struct sk_buff *skb) 2388 { 2389 struct packet_sock *po = pkt_sk(skb->sk); 2390 2391 if (likely(po->tx_ring.pg_vec)) { 2392 void *ph; 2393 __u32 ts; 2394 2395 ph = skb_shinfo(skb)->destructor_arg; 2396 packet_dec_pending(&po->tx_ring); 2397 2398 ts = __packet_set_timestamp(po, ph, skb); 2399 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2400 } 2401 2402 sock_wfree(skb); 2403 } 2404 2405 static void tpacket_set_protocol(const struct net_device *dev, 2406 struct sk_buff *skb) 2407 { 2408 if (dev->type == ARPHRD_ETHER) { 2409 skb_reset_mac_header(skb); 2410 skb->protocol = eth_hdr(skb)->h_proto; 2411 } 2412 } 2413 2414 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) 2415 { 2416 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2417 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2418 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > 2419 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) 2420 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), 2421 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2422 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); 2423 2424 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) 2425 return -EINVAL; 2426 2427 return 0; 2428 } 2429 2430 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, 2431 struct virtio_net_hdr *vnet_hdr) 2432 { 2433 if (*len < sizeof(*vnet_hdr)) 2434 return -EINVAL; 2435 *len -= sizeof(*vnet_hdr); 2436 2437 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) 2438 return -EFAULT; 2439 2440 return __packet_snd_vnet_parse(vnet_hdr, *len); 2441 } 2442 2443 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2444 void *frame, struct net_device *dev, void *data, int tp_len, 2445 __be16 proto, unsigned char *addr, int hlen, int copylen, 2446 const struct sockcm_cookie *sockc) 2447 { 2448 union tpacket_uhdr ph; 2449 int to_write, offset, len, nr_frags, len_max; 2450 struct socket *sock = po->sk.sk_socket; 2451 struct page *page; 2452 int err; 2453 2454 ph.raw = frame; 2455 2456 skb->protocol = proto; 2457 skb->dev = dev; 2458 skb->priority = po->sk.sk_priority; 2459 skb->mark = po->sk.sk_mark; 2460 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); 2461 skb_shinfo(skb)->destructor_arg = ph.raw; 2462 2463 skb_reserve(skb, hlen); 2464 skb_reset_network_header(skb); 2465 2466 to_write = tp_len; 2467 2468 if (sock->type == SOCK_DGRAM) { 2469 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2470 NULL, tp_len); 2471 if (unlikely(err < 0)) 2472 return -EINVAL; 2473 } else if (copylen) { 2474 int hdrlen = min_t(int, copylen, tp_len); 2475 2476 skb_push(skb, dev->hard_header_len); 2477 skb_put(skb, copylen - dev->hard_header_len); 2478 err = skb_store_bits(skb, 0, data, hdrlen); 2479 if (unlikely(err)) 2480 return err; 2481 if (!dev_validate_header(dev, skb->data, hdrlen)) 2482 return -EINVAL; 2483 if (!skb->protocol) 2484 tpacket_set_protocol(dev, skb); 2485 2486 data += hdrlen; 2487 to_write -= hdrlen; 2488 } 2489 2490 offset = offset_in_page(data); 2491 len_max = PAGE_SIZE - offset; 2492 len = ((to_write > len_max) ? len_max : to_write); 2493 2494 skb->data_len = to_write; 2495 skb->len += to_write; 2496 skb->truesize += to_write; 2497 refcount_add(to_write, &po->sk.sk_wmem_alloc); 2498 2499 while (likely(to_write)) { 2500 nr_frags = skb_shinfo(skb)->nr_frags; 2501 2502 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2503 pr_err("Packet exceed the number of skb frags(%lu)\n", 2504 MAX_SKB_FRAGS); 2505 return -EFAULT; 2506 } 2507 2508 page = pgv_to_page(data); 2509 data += len; 2510 flush_dcache_page(page); 2511 get_page(page); 2512 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2513 to_write -= len; 2514 offset = 0; 2515 len_max = PAGE_SIZE; 2516 len = ((to_write > len_max) ? len_max : to_write); 2517 } 2518 2519 skb_probe_transport_header(skb, 0); 2520 2521 return tp_len; 2522 } 2523 2524 static int tpacket_parse_header(struct packet_sock *po, void *frame, 2525 int size_max, void **data) 2526 { 2527 union tpacket_uhdr ph; 2528 int tp_len, off; 2529 2530 ph.raw = frame; 2531 2532 switch (po->tp_version) { 2533 case TPACKET_V3: 2534 if (ph.h3->tp_next_offset != 0) { 2535 pr_warn_once("variable sized slot not supported"); 2536 return -EINVAL; 2537 } 2538 tp_len = ph.h3->tp_len; 2539 break; 2540 case TPACKET_V2: 2541 tp_len = ph.h2->tp_len; 2542 break; 2543 default: 2544 tp_len = ph.h1->tp_len; 2545 break; 2546 } 2547 if (unlikely(tp_len > size_max)) { 2548 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2549 return -EMSGSIZE; 2550 } 2551 2552 if (unlikely(po->tp_tx_has_off)) { 2553 int off_min, off_max; 2554 2555 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2556 off_max = po->tx_ring.frame_size - tp_len; 2557 if (po->sk.sk_type == SOCK_DGRAM) { 2558 switch (po->tp_version) { 2559 case TPACKET_V3: 2560 off = ph.h3->tp_net; 2561 break; 2562 case TPACKET_V2: 2563 off = ph.h2->tp_net; 2564 break; 2565 default: 2566 off = ph.h1->tp_net; 2567 break; 2568 } 2569 } else { 2570 switch (po->tp_version) { 2571 case TPACKET_V3: 2572 off = ph.h3->tp_mac; 2573 break; 2574 case TPACKET_V2: 2575 off = ph.h2->tp_mac; 2576 break; 2577 default: 2578 off = ph.h1->tp_mac; 2579 break; 2580 } 2581 } 2582 if (unlikely((off < off_min) || (off_max < off))) 2583 return -EINVAL; 2584 } else { 2585 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2586 } 2587 2588 *data = frame + off; 2589 return tp_len; 2590 } 2591 2592 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2593 { 2594 struct sk_buff *skb; 2595 struct net_device *dev; 2596 struct virtio_net_hdr *vnet_hdr = NULL; 2597 struct sockcm_cookie sockc; 2598 __be16 proto; 2599 int err, reserve = 0; 2600 void *ph; 2601 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2602 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2603 int tp_len, size_max; 2604 unsigned char *addr; 2605 void *data; 2606 int len_sum = 0; 2607 int status = TP_STATUS_AVAILABLE; 2608 int hlen, tlen, copylen = 0; 2609 2610 mutex_lock(&po->pg_vec_lock); 2611 2612 if (likely(saddr == NULL)) { 2613 dev = packet_cached_dev_get(po); 2614 proto = po->num; 2615 addr = NULL; 2616 } else { 2617 err = -EINVAL; 2618 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2619 goto out; 2620 if (msg->msg_namelen < (saddr->sll_halen 2621 + offsetof(struct sockaddr_ll, 2622 sll_addr))) 2623 goto out; 2624 proto = saddr->sll_protocol; 2625 addr = saddr->sll_addr; 2626 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2627 } 2628 2629 err = -ENXIO; 2630 if (unlikely(dev == NULL)) 2631 goto out; 2632 err = -ENETDOWN; 2633 if (unlikely(!(dev->flags & IFF_UP))) 2634 goto out_put; 2635 2636 sockc.tsflags = po->sk.sk_tsflags; 2637 if (msg->msg_controllen) { 2638 err = sock_cmsg_send(&po->sk, msg, &sockc); 2639 if (unlikely(err)) 2640 goto out_put; 2641 } 2642 2643 if (po->sk.sk_socket->type == SOCK_RAW) 2644 reserve = dev->hard_header_len; 2645 size_max = po->tx_ring.frame_size 2646 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2647 2648 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) 2649 size_max = dev->mtu + reserve + VLAN_HLEN; 2650 2651 do { 2652 ph = packet_current_frame(po, &po->tx_ring, 2653 TP_STATUS_SEND_REQUEST); 2654 if (unlikely(ph == NULL)) { 2655 if (need_wait && need_resched()) 2656 schedule(); 2657 continue; 2658 } 2659 2660 skb = NULL; 2661 tp_len = tpacket_parse_header(po, ph, size_max, &data); 2662 if (tp_len < 0) 2663 goto tpacket_error; 2664 2665 status = TP_STATUS_SEND_REQUEST; 2666 hlen = LL_RESERVED_SPACE(dev); 2667 tlen = dev->needed_tailroom; 2668 if (po->has_vnet_hdr) { 2669 vnet_hdr = data; 2670 data += sizeof(*vnet_hdr); 2671 tp_len -= sizeof(*vnet_hdr); 2672 if (tp_len < 0 || 2673 __packet_snd_vnet_parse(vnet_hdr, tp_len)) { 2674 tp_len = -EINVAL; 2675 goto tpacket_error; 2676 } 2677 copylen = __virtio16_to_cpu(vio_le(), 2678 vnet_hdr->hdr_len); 2679 } 2680 copylen = max_t(int, copylen, dev->hard_header_len); 2681 skb = sock_alloc_send_skb(&po->sk, 2682 hlen + tlen + sizeof(struct sockaddr_ll) + 2683 (copylen - dev->hard_header_len), 2684 !need_wait, &err); 2685 2686 if (unlikely(skb == NULL)) { 2687 /* we assume the socket was initially writeable ... */ 2688 if (likely(len_sum > 0)) 2689 err = len_sum; 2690 goto out_status; 2691 } 2692 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, 2693 addr, hlen, copylen, &sockc); 2694 if (likely(tp_len >= 0) && 2695 tp_len > dev->mtu + reserve && 2696 !po->has_vnet_hdr && 2697 !packet_extra_vlan_len_allowed(dev, skb)) 2698 tp_len = -EMSGSIZE; 2699 2700 if (unlikely(tp_len < 0)) { 2701 tpacket_error: 2702 if (po->tp_loss) { 2703 __packet_set_status(po, ph, 2704 TP_STATUS_AVAILABLE); 2705 packet_increment_head(&po->tx_ring); 2706 kfree_skb(skb); 2707 continue; 2708 } else { 2709 status = TP_STATUS_WRONG_FORMAT; 2710 err = tp_len; 2711 goto out_status; 2712 } 2713 } 2714 2715 if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, 2716 vio_le())) { 2717 tp_len = -EINVAL; 2718 goto tpacket_error; 2719 } 2720 2721 skb->destructor = tpacket_destruct_skb; 2722 __packet_set_status(po, ph, TP_STATUS_SENDING); 2723 packet_inc_pending(&po->tx_ring); 2724 2725 status = TP_STATUS_SEND_REQUEST; 2726 err = po->xmit(skb); 2727 if (unlikely(err > 0)) { 2728 err = net_xmit_errno(err); 2729 if (err && __packet_get_status(po, ph) == 2730 TP_STATUS_AVAILABLE) { 2731 /* skb was destructed already */ 2732 skb = NULL; 2733 goto out_status; 2734 } 2735 /* 2736 * skb was dropped but not destructed yet; 2737 * let's treat it like congestion or err < 0 2738 */ 2739 err = 0; 2740 } 2741 packet_increment_head(&po->tx_ring); 2742 len_sum += tp_len; 2743 } while (likely((ph != NULL) || 2744 /* Note: packet_read_pending() might be slow if we have 2745 * to call it as it's per_cpu variable, but in fast-path 2746 * we already short-circuit the loop with the first 2747 * condition, and luckily don't have to go that path 2748 * anyway. 2749 */ 2750 (need_wait && packet_read_pending(&po->tx_ring)))); 2751 2752 err = len_sum; 2753 goto out_put; 2754 2755 out_status: 2756 __packet_set_status(po, ph, status); 2757 kfree_skb(skb); 2758 out_put: 2759 dev_put(dev); 2760 out: 2761 mutex_unlock(&po->pg_vec_lock); 2762 return err; 2763 } 2764 2765 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2766 size_t reserve, size_t len, 2767 size_t linear, int noblock, 2768 int *err) 2769 { 2770 struct sk_buff *skb; 2771 2772 /* Under a page? Don't bother with paged skb. */ 2773 if (prepad + len < PAGE_SIZE || !linear) 2774 linear = len; 2775 2776 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2777 err, 0); 2778 if (!skb) 2779 return NULL; 2780 2781 skb_reserve(skb, reserve); 2782 skb_put(skb, linear); 2783 skb->data_len = len - linear; 2784 skb->len += len - linear; 2785 2786 return skb; 2787 } 2788 2789 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2790 { 2791 struct sock *sk = sock->sk; 2792 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2793 struct sk_buff *skb; 2794 struct net_device *dev; 2795 __be16 proto; 2796 unsigned char *addr; 2797 int err, reserve = 0; 2798 struct sockcm_cookie sockc; 2799 struct virtio_net_hdr vnet_hdr = { 0 }; 2800 int offset = 0; 2801 struct packet_sock *po = pkt_sk(sk); 2802 bool has_vnet_hdr = false; 2803 int hlen, tlen, linear; 2804 int extra_len = 0; 2805 2806 /* 2807 * Get and verify the address. 2808 */ 2809 2810 if (likely(saddr == NULL)) { 2811 dev = packet_cached_dev_get(po); 2812 proto = po->num; 2813 addr = NULL; 2814 } else { 2815 err = -EINVAL; 2816 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2817 goto out; 2818 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2819 goto out; 2820 proto = saddr->sll_protocol; 2821 addr = saddr->sll_addr; 2822 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2823 } 2824 2825 err = -ENXIO; 2826 if (unlikely(dev == NULL)) 2827 goto out_unlock; 2828 err = -ENETDOWN; 2829 if (unlikely(!(dev->flags & IFF_UP))) 2830 goto out_unlock; 2831 2832 sockc.tsflags = sk->sk_tsflags; 2833 sockc.mark = sk->sk_mark; 2834 if (msg->msg_controllen) { 2835 err = sock_cmsg_send(sk, msg, &sockc); 2836 if (unlikely(err)) 2837 goto out_unlock; 2838 } 2839 2840 if (sock->type == SOCK_RAW) 2841 reserve = dev->hard_header_len; 2842 if (po->has_vnet_hdr) { 2843 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); 2844 if (err) 2845 goto out_unlock; 2846 has_vnet_hdr = true; 2847 } 2848 2849 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 2850 if (!netif_supports_nofcs(dev)) { 2851 err = -EPROTONOSUPPORT; 2852 goto out_unlock; 2853 } 2854 extra_len = 4; /* We're doing our own CRC */ 2855 } 2856 2857 err = -EMSGSIZE; 2858 if (!vnet_hdr.gso_type && 2859 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 2860 goto out_unlock; 2861 2862 err = -ENOBUFS; 2863 hlen = LL_RESERVED_SPACE(dev); 2864 tlen = dev->needed_tailroom; 2865 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 2866 linear = max(linear, min_t(int, len, dev->hard_header_len)); 2867 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 2868 msg->msg_flags & MSG_DONTWAIT, &err); 2869 if (skb == NULL) 2870 goto out_unlock; 2871 2872 skb_reset_network_header(skb); 2873 2874 err = -EINVAL; 2875 if (sock->type == SOCK_DGRAM) { 2876 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2877 if (unlikely(offset < 0)) 2878 goto out_free; 2879 } else if (reserve) { 2880 skb_reserve(skb, -reserve); 2881 } 2882 2883 /* Returns -EFAULT on error */ 2884 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 2885 if (err) 2886 goto out_free; 2887 2888 if (sock->type == SOCK_RAW && 2889 !dev_validate_header(dev, skb->data, len)) { 2890 err = -EINVAL; 2891 goto out_free; 2892 } 2893 2894 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); 2895 2896 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 2897 !packet_extra_vlan_len_allowed(dev, skb)) { 2898 err = -EMSGSIZE; 2899 goto out_free; 2900 } 2901 2902 skb->protocol = proto; 2903 skb->dev = dev; 2904 skb->priority = sk->sk_priority; 2905 skb->mark = sockc.mark; 2906 2907 if (has_vnet_hdr) { 2908 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 2909 if (err) 2910 goto out_free; 2911 len += sizeof(vnet_hdr); 2912 } 2913 2914 skb_probe_transport_header(skb, reserve); 2915 2916 if (unlikely(extra_len == 4)) 2917 skb->no_fcs = 1; 2918 2919 err = po->xmit(skb); 2920 if (err > 0 && (err = net_xmit_errno(err)) != 0) 2921 goto out_unlock; 2922 2923 dev_put(dev); 2924 2925 return len; 2926 2927 out_free: 2928 kfree_skb(skb); 2929 out_unlock: 2930 if (dev) 2931 dev_put(dev); 2932 out: 2933 return err; 2934 } 2935 2936 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 2937 { 2938 struct sock *sk = sock->sk; 2939 struct packet_sock *po = pkt_sk(sk); 2940 2941 if (po->tx_ring.pg_vec) 2942 return tpacket_snd(po, msg); 2943 else 2944 return packet_snd(sock, msg, len); 2945 } 2946 2947 /* 2948 * Close a PACKET socket. This is fairly simple. We immediately go 2949 * to 'closed' state and remove our protocol entry in the device list. 2950 */ 2951 2952 static int packet_release(struct socket *sock) 2953 { 2954 struct sock *sk = sock->sk; 2955 struct packet_sock *po; 2956 struct packet_fanout *f; 2957 struct net *net; 2958 union tpacket_req_u req_u; 2959 2960 if (!sk) 2961 return 0; 2962 2963 net = sock_net(sk); 2964 po = pkt_sk(sk); 2965 2966 mutex_lock(&net->packet.sklist_lock); 2967 sk_del_node_init_rcu(sk); 2968 mutex_unlock(&net->packet.sklist_lock); 2969 2970 preempt_disable(); 2971 sock_prot_inuse_add(net, sk->sk_prot, -1); 2972 preempt_enable(); 2973 2974 spin_lock(&po->bind_lock); 2975 unregister_prot_hook(sk, false); 2976 packet_cached_dev_reset(po); 2977 2978 if (po->prot_hook.dev) { 2979 dev_put(po->prot_hook.dev); 2980 po->prot_hook.dev = NULL; 2981 } 2982 spin_unlock(&po->bind_lock); 2983 2984 packet_flush_mclist(sk); 2985 2986 lock_sock(sk); 2987 if (po->rx_ring.pg_vec) { 2988 memset(&req_u, 0, sizeof(req_u)); 2989 packet_set_ring(sk, &req_u, 1, 0); 2990 } 2991 2992 if (po->tx_ring.pg_vec) { 2993 memset(&req_u, 0, sizeof(req_u)); 2994 packet_set_ring(sk, &req_u, 1, 1); 2995 } 2996 release_sock(sk); 2997 2998 f = fanout_release(sk); 2999 3000 synchronize_net(); 3001 3002 if (f) { 3003 kfree(po->rollover); 3004 fanout_release_data(f); 3005 kfree(f); 3006 } 3007 /* 3008 * Now the socket is dead. No more input will appear. 3009 */ 3010 sock_orphan(sk); 3011 sock->sk = NULL; 3012 3013 /* Purge queues */ 3014 3015 skb_queue_purge(&sk->sk_receive_queue); 3016 packet_free_pending(po); 3017 sk_refcnt_debug_release(sk); 3018 3019 sock_put(sk); 3020 return 0; 3021 } 3022 3023 /* 3024 * Attach a packet hook. 3025 */ 3026 3027 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 3028 __be16 proto) 3029 { 3030 struct packet_sock *po = pkt_sk(sk); 3031 struct net_device *dev_curr; 3032 __be16 proto_curr; 3033 bool need_rehook; 3034 struct net_device *dev = NULL; 3035 int ret = 0; 3036 bool unlisted = false; 3037 3038 lock_sock(sk); 3039 spin_lock(&po->bind_lock); 3040 rcu_read_lock(); 3041 3042 if (po->fanout) { 3043 ret = -EINVAL; 3044 goto out_unlock; 3045 } 3046 3047 if (name) { 3048 dev = dev_get_by_name_rcu(sock_net(sk), name); 3049 if (!dev) { 3050 ret = -ENODEV; 3051 goto out_unlock; 3052 } 3053 } else if (ifindex) { 3054 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3055 if (!dev) { 3056 ret = -ENODEV; 3057 goto out_unlock; 3058 } 3059 } 3060 3061 if (dev) 3062 dev_hold(dev); 3063 3064 proto_curr = po->prot_hook.type; 3065 dev_curr = po->prot_hook.dev; 3066 3067 need_rehook = proto_curr != proto || dev_curr != dev; 3068 3069 if (need_rehook) { 3070 if (po->running) { 3071 rcu_read_unlock(); 3072 /* prevents packet_notifier() from calling 3073 * register_prot_hook() 3074 */ 3075 po->num = 0; 3076 __unregister_prot_hook(sk, true); 3077 rcu_read_lock(); 3078 dev_curr = po->prot_hook.dev; 3079 if (dev) 3080 unlisted = !dev_get_by_index_rcu(sock_net(sk), 3081 dev->ifindex); 3082 } 3083 3084 BUG_ON(po->running); 3085 po->num = proto; 3086 po->prot_hook.type = proto; 3087 3088 if (unlikely(unlisted)) { 3089 dev_put(dev); 3090 po->prot_hook.dev = NULL; 3091 po->ifindex = -1; 3092 packet_cached_dev_reset(po); 3093 } else { 3094 po->prot_hook.dev = dev; 3095 po->ifindex = dev ? dev->ifindex : 0; 3096 packet_cached_dev_assign(po, dev); 3097 } 3098 } 3099 if (dev_curr) 3100 dev_put(dev_curr); 3101 3102 if (proto == 0 || !need_rehook) 3103 goto out_unlock; 3104 3105 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 3106 register_prot_hook(sk); 3107 } else { 3108 sk->sk_err = ENETDOWN; 3109 if (!sock_flag(sk, SOCK_DEAD)) 3110 sk->sk_error_report(sk); 3111 } 3112 3113 out_unlock: 3114 rcu_read_unlock(); 3115 spin_unlock(&po->bind_lock); 3116 release_sock(sk); 3117 return ret; 3118 } 3119 3120 /* 3121 * Bind a packet socket to a device 3122 */ 3123 3124 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 3125 int addr_len) 3126 { 3127 struct sock *sk = sock->sk; 3128 char name[sizeof(uaddr->sa_data) + 1]; 3129 3130 /* 3131 * Check legality 3132 */ 3133 3134 if (addr_len != sizeof(struct sockaddr)) 3135 return -EINVAL; 3136 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be 3137 * zero-terminated. 3138 */ 3139 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); 3140 name[sizeof(uaddr->sa_data)] = 0; 3141 3142 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3143 } 3144 3145 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3146 { 3147 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3148 struct sock *sk = sock->sk; 3149 3150 /* 3151 * Check legality 3152 */ 3153 3154 if (addr_len < sizeof(struct sockaddr_ll)) 3155 return -EINVAL; 3156 if (sll->sll_family != AF_PACKET) 3157 return -EINVAL; 3158 3159 return packet_do_bind(sk, NULL, sll->sll_ifindex, 3160 sll->sll_protocol ? : pkt_sk(sk)->num); 3161 } 3162 3163 static struct proto packet_proto = { 3164 .name = "PACKET", 3165 .owner = THIS_MODULE, 3166 .obj_size = sizeof(struct packet_sock), 3167 }; 3168 3169 /* 3170 * Create a packet of type SOCK_PACKET. 3171 */ 3172 3173 static int packet_create(struct net *net, struct socket *sock, int protocol, 3174 int kern) 3175 { 3176 struct sock *sk; 3177 struct packet_sock *po; 3178 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 3179 int err; 3180 3181 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 3182 return -EPERM; 3183 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 3184 sock->type != SOCK_PACKET) 3185 return -ESOCKTNOSUPPORT; 3186 3187 sock->state = SS_UNCONNECTED; 3188 3189 err = -ENOBUFS; 3190 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); 3191 if (sk == NULL) 3192 goto out; 3193 3194 sock->ops = &packet_ops; 3195 if (sock->type == SOCK_PACKET) 3196 sock->ops = &packet_ops_spkt; 3197 3198 sock_init_data(sock, sk); 3199 3200 po = pkt_sk(sk); 3201 sk->sk_family = PF_PACKET; 3202 po->num = proto; 3203 po->xmit = dev_queue_xmit; 3204 3205 err = packet_alloc_pending(po); 3206 if (err) 3207 goto out2; 3208 3209 packet_cached_dev_reset(po); 3210 3211 sk->sk_destruct = packet_sock_destruct; 3212 sk_refcnt_debug_inc(sk); 3213 3214 /* 3215 * Attach a protocol block 3216 */ 3217 3218 spin_lock_init(&po->bind_lock); 3219 mutex_init(&po->pg_vec_lock); 3220 po->rollover = NULL; 3221 po->prot_hook.func = packet_rcv; 3222 3223 if (sock->type == SOCK_PACKET) 3224 po->prot_hook.func = packet_rcv_spkt; 3225 3226 po->prot_hook.af_packet_priv = sk; 3227 3228 if (proto) { 3229 po->prot_hook.type = proto; 3230 __register_prot_hook(sk); 3231 } 3232 3233 mutex_lock(&net->packet.sklist_lock); 3234 sk_add_node_rcu(sk, &net->packet.sklist); 3235 mutex_unlock(&net->packet.sklist_lock); 3236 3237 preempt_disable(); 3238 sock_prot_inuse_add(net, &packet_proto, 1); 3239 preempt_enable(); 3240 3241 return 0; 3242 out2: 3243 sk_free(sk); 3244 out: 3245 return err; 3246 } 3247 3248 /* 3249 * Pull a packet from our receive queue and hand it to the user. 3250 * If necessary we block. 3251 */ 3252 3253 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 3254 int flags) 3255 { 3256 struct sock *sk = sock->sk; 3257 struct sk_buff *skb; 3258 int copied, err; 3259 int vnet_hdr_len = 0; 3260 unsigned int origlen = 0; 3261 3262 err = -EINVAL; 3263 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 3264 goto out; 3265 3266 #if 0 3267 /* What error should we return now? EUNATTACH? */ 3268 if (pkt_sk(sk)->ifindex < 0) 3269 return -ENODEV; 3270 #endif 3271 3272 if (flags & MSG_ERRQUEUE) { 3273 err = sock_recv_errqueue(sk, msg, len, 3274 SOL_PACKET, PACKET_TX_TIMESTAMP); 3275 goto out; 3276 } 3277 3278 /* 3279 * Call the generic datagram receiver. This handles all sorts 3280 * of horrible races and re-entrancy so we can forget about it 3281 * in the protocol layers. 3282 * 3283 * Now it will return ENETDOWN, if device have just gone down, 3284 * but then it will block. 3285 */ 3286 3287 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); 3288 3289 /* 3290 * An error occurred so return it. Because skb_recv_datagram() 3291 * handles the blocking we don't see and worry about blocking 3292 * retries. 3293 */ 3294 3295 if (skb == NULL) 3296 goto out; 3297 3298 if (pkt_sk(sk)->pressure) 3299 packet_rcv_has_room(pkt_sk(sk), NULL); 3300 3301 if (pkt_sk(sk)->has_vnet_hdr) { 3302 err = packet_rcv_vnet(msg, skb, &len); 3303 if (err) 3304 goto out_free; 3305 vnet_hdr_len = sizeof(struct virtio_net_hdr); 3306 } 3307 3308 /* You lose any data beyond the buffer you gave. If it worries 3309 * a user program they can ask the device for its MTU 3310 * anyway. 3311 */ 3312 copied = skb->len; 3313 if (copied > len) { 3314 copied = len; 3315 msg->msg_flags |= MSG_TRUNC; 3316 } 3317 3318 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3319 if (err) 3320 goto out_free; 3321 3322 if (sock->type != SOCK_PACKET) { 3323 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3324 3325 /* Original length was stored in sockaddr_ll fields */ 3326 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3327 sll->sll_family = AF_PACKET; 3328 sll->sll_protocol = skb->protocol; 3329 } 3330 3331 sock_recv_ts_and_drops(msg, sk, skb); 3332 3333 if (msg->msg_name) { 3334 /* If the address length field is there to be filled 3335 * in, we fill it in now. 3336 */ 3337 if (sock->type == SOCK_PACKET) { 3338 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3339 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3340 } else { 3341 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3342 3343 msg->msg_namelen = sll->sll_halen + 3344 offsetof(struct sockaddr_ll, sll_addr); 3345 } 3346 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, 3347 msg->msg_namelen); 3348 } 3349 3350 if (pkt_sk(sk)->auxdata) { 3351 struct tpacket_auxdata aux; 3352 3353 aux.tp_status = TP_STATUS_USER; 3354 if (skb->ip_summed == CHECKSUM_PARTIAL) 3355 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3356 else if (skb->pkt_type != PACKET_OUTGOING && 3357 (skb->ip_summed == CHECKSUM_COMPLETE || 3358 skb_csum_unnecessary(skb))) 3359 aux.tp_status |= TP_STATUS_CSUM_VALID; 3360 3361 aux.tp_len = origlen; 3362 aux.tp_snaplen = skb->len; 3363 aux.tp_mac = 0; 3364 aux.tp_net = skb_network_offset(skb); 3365 if (skb_vlan_tag_present(skb)) { 3366 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3367 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3368 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3369 } else { 3370 aux.tp_vlan_tci = 0; 3371 aux.tp_vlan_tpid = 0; 3372 } 3373 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3374 } 3375 3376 /* 3377 * Free or return the buffer as appropriate. Again this 3378 * hides all the races and re-entrancy issues from us. 3379 */ 3380 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3381 3382 out_free: 3383 skb_free_datagram(sk, skb); 3384 out: 3385 return err; 3386 } 3387 3388 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3389 int peer) 3390 { 3391 struct net_device *dev; 3392 struct sock *sk = sock->sk; 3393 3394 if (peer) 3395 return -EOPNOTSUPP; 3396 3397 uaddr->sa_family = AF_PACKET; 3398 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); 3399 rcu_read_lock(); 3400 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 3401 if (dev) 3402 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); 3403 rcu_read_unlock(); 3404 3405 return sizeof(*uaddr); 3406 } 3407 3408 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3409 int peer) 3410 { 3411 struct net_device *dev; 3412 struct sock *sk = sock->sk; 3413 struct packet_sock *po = pkt_sk(sk); 3414 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3415 3416 if (peer) 3417 return -EOPNOTSUPP; 3418 3419 sll->sll_family = AF_PACKET; 3420 sll->sll_ifindex = po->ifindex; 3421 sll->sll_protocol = po->num; 3422 sll->sll_pkttype = 0; 3423 rcu_read_lock(); 3424 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 3425 if (dev) { 3426 sll->sll_hatype = dev->type; 3427 sll->sll_halen = dev->addr_len; 3428 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 3429 } else { 3430 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3431 sll->sll_halen = 0; 3432 } 3433 rcu_read_unlock(); 3434 3435 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3436 } 3437 3438 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3439 int what) 3440 { 3441 switch (i->type) { 3442 case PACKET_MR_MULTICAST: 3443 if (i->alen != dev->addr_len) 3444 return -EINVAL; 3445 if (what > 0) 3446 return dev_mc_add(dev, i->addr); 3447 else 3448 return dev_mc_del(dev, i->addr); 3449 break; 3450 case PACKET_MR_PROMISC: 3451 return dev_set_promiscuity(dev, what); 3452 case PACKET_MR_ALLMULTI: 3453 return dev_set_allmulti(dev, what); 3454 case PACKET_MR_UNICAST: 3455 if (i->alen != dev->addr_len) 3456 return -EINVAL; 3457 if (what > 0) 3458 return dev_uc_add(dev, i->addr); 3459 else 3460 return dev_uc_del(dev, i->addr); 3461 break; 3462 default: 3463 break; 3464 } 3465 return 0; 3466 } 3467 3468 static void packet_dev_mclist_delete(struct net_device *dev, 3469 struct packet_mclist **mlp) 3470 { 3471 struct packet_mclist *ml; 3472 3473 while ((ml = *mlp) != NULL) { 3474 if (ml->ifindex == dev->ifindex) { 3475 packet_dev_mc(dev, ml, -1); 3476 *mlp = ml->next; 3477 kfree(ml); 3478 } else 3479 mlp = &ml->next; 3480 } 3481 } 3482 3483 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3484 { 3485 struct packet_sock *po = pkt_sk(sk); 3486 struct packet_mclist *ml, *i; 3487 struct net_device *dev; 3488 int err; 3489 3490 rtnl_lock(); 3491 3492 err = -ENODEV; 3493 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3494 if (!dev) 3495 goto done; 3496 3497 err = -EINVAL; 3498 if (mreq->mr_alen > dev->addr_len) 3499 goto done; 3500 3501 err = -ENOBUFS; 3502 i = kmalloc(sizeof(*i), GFP_KERNEL); 3503 if (i == NULL) 3504 goto done; 3505 3506 err = 0; 3507 for (ml = po->mclist; ml; ml = ml->next) { 3508 if (ml->ifindex == mreq->mr_ifindex && 3509 ml->type == mreq->mr_type && 3510 ml->alen == mreq->mr_alen && 3511 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3512 ml->count++; 3513 /* Free the new element ... */ 3514 kfree(i); 3515 goto done; 3516 } 3517 } 3518 3519 i->type = mreq->mr_type; 3520 i->ifindex = mreq->mr_ifindex; 3521 i->alen = mreq->mr_alen; 3522 memcpy(i->addr, mreq->mr_address, i->alen); 3523 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3524 i->count = 1; 3525 i->next = po->mclist; 3526 po->mclist = i; 3527 err = packet_dev_mc(dev, i, 1); 3528 if (err) { 3529 po->mclist = i->next; 3530 kfree(i); 3531 } 3532 3533 done: 3534 rtnl_unlock(); 3535 return err; 3536 } 3537 3538 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3539 { 3540 struct packet_mclist *ml, **mlp; 3541 3542 rtnl_lock(); 3543 3544 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3545 if (ml->ifindex == mreq->mr_ifindex && 3546 ml->type == mreq->mr_type && 3547 ml->alen == mreq->mr_alen && 3548 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3549 if (--ml->count == 0) { 3550 struct net_device *dev; 3551 *mlp = ml->next; 3552 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3553 if (dev) 3554 packet_dev_mc(dev, ml, -1); 3555 kfree(ml); 3556 } 3557 break; 3558 } 3559 } 3560 rtnl_unlock(); 3561 return 0; 3562 } 3563 3564 static void packet_flush_mclist(struct sock *sk) 3565 { 3566 struct packet_sock *po = pkt_sk(sk); 3567 struct packet_mclist *ml; 3568 3569 if (!po->mclist) 3570 return; 3571 3572 rtnl_lock(); 3573 while ((ml = po->mclist) != NULL) { 3574 struct net_device *dev; 3575 3576 po->mclist = ml->next; 3577 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3578 if (dev != NULL) 3579 packet_dev_mc(dev, ml, -1); 3580 kfree(ml); 3581 } 3582 rtnl_unlock(); 3583 } 3584 3585 static int 3586 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 3587 { 3588 struct sock *sk = sock->sk; 3589 struct packet_sock *po = pkt_sk(sk); 3590 int ret; 3591 3592 if (level != SOL_PACKET) 3593 return -ENOPROTOOPT; 3594 3595 switch (optname) { 3596 case PACKET_ADD_MEMBERSHIP: 3597 case PACKET_DROP_MEMBERSHIP: 3598 { 3599 struct packet_mreq_max mreq; 3600 int len = optlen; 3601 memset(&mreq, 0, sizeof(mreq)); 3602 if (len < sizeof(struct packet_mreq)) 3603 return -EINVAL; 3604 if (len > sizeof(mreq)) 3605 len = sizeof(mreq); 3606 if (copy_from_user(&mreq, optval, len)) 3607 return -EFAULT; 3608 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3609 return -EINVAL; 3610 if (optname == PACKET_ADD_MEMBERSHIP) 3611 ret = packet_mc_add(sk, &mreq); 3612 else 3613 ret = packet_mc_drop(sk, &mreq); 3614 return ret; 3615 } 3616 3617 case PACKET_RX_RING: 3618 case PACKET_TX_RING: 3619 { 3620 union tpacket_req_u req_u; 3621 int len; 3622 3623 lock_sock(sk); 3624 switch (po->tp_version) { 3625 case TPACKET_V1: 3626 case TPACKET_V2: 3627 len = sizeof(req_u.req); 3628 break; 3629 case TPACKET_V3: 3630 default: 3631 len = sizeof(req_u.req3); 3632 break; 3633 } 3634 if (optlen < len) { 3635 ret = -EINVAL; 3636 } else { 3637 if (copy_from_user(&req_u.req, optval, len)) 3638 ret = -EFAULT; 3639 else 3640 ret = packet_set_ring(sk, &req_u, 0, 3641 optname == PACKET_TX_RING); 3642 } 3643 release_sock(sk); 3644 return ret; 3645 } 3646 case PACKET_COPY_THRESH: 3647 { 3648 int val; 3649 3650 if (optlen != sizeof(val)) 3651 return -EINVAL; 3652 if (copy_from_user(&val, optval, sizeof(val))) 3653 return -EFAULT; 3654 3655 pkt_sk(sk)->copy_thresh = val; 3656 return 0; 3657 } 3658 case PACKET_VERSION: 3659 { 3660 int val; 3661 3662 if (optlen != sizeof(val)) 3663 return -EINVAL; 3664 if (copy_from_user(&val, optval, sizeof(val))) 3665 return -EFAULT; 3666 switch (val) { 3667 case TPACKET_V1: 3668 case TPACKET_V2: 3669 case TPACKET_V3: 3670 break; 3671 default: 3672 return -EINVAL; 3673 } 3674 lock_sock(sk); 3675 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3676 ret = -EBUSY; 3677 } else { 3678 po->tp_version = val; 3679 ret = 0; 3680 } 3681 release_sock(sk); 3682 return ret; 3683 } 3684 case PACKET_RESERVE: 3685 { 3686 unsigned int val; 3687 3688 if (optlen != sizeof(val)) 3689 return -EINVAL; 3690 if (copy_from_user(&val, optval, sizeof(val))) 3691 return -EFAULT; 3692 if (val > INT_MAX) 3693 return -EINVAL; 3694 lock_sock(sk); 3695 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3696 ret = -EBUSY; 3697 } else { 3698 po->tp_reserve = val; 3699 ret = 0; 3700 } 3701 release_sock(sk); 3702 return ret; 3703 } 3704 case PACKET_LOSS: 3705 { 3706 unsigned int val; 3707 3708 if (optlen != sizeof(val)) 3709 return -EINVAL; 3710 if (copy_from_user(&val, optval, sizeof(val))) 3711 return -EFAULT; 3712 3713 lock_sock(sk); 3714 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3715 ret = -EBUSY; 3716 } else { 3717 po->tp_loss = !!val; 3718 ret = 0; 3719 } 3720 release_sock(sk); 3721 return ret; 3722 } 3723 case PACKET_AUXDATA: 3724 { 3725 int val; 3726 3727 if (optlen < sizeof(val)) 3728 return -EINVAL; 3729 if (copy_from_user(&val, optval, sizeof(val))) 3730 return -EFAULT; 3731 3732 lock_sock(sk); 3733 po->auxdata = !!val; 3734 release_sock(sk); 3735 return 0; 3736 } 3737 case PACKET_ORIGDEV: 3738 { 3739 int val; 3740 3741 if (optlen < sizeof(val)) 3742 return -EINVAL; 3743 if (copy_from_user(&val, optval, sizeof(val))) 3744 return -EFAULT; 3745 3746 lock_sock(sk); 3747 po->origdev = !!val; 3748 release_sock(sk); 3749 return 0; 3750 } 3751 case PACKET_VNET_HDR: 3752 { 3753 int val; 3754 3755 if (sock->type != SOCK_RAW) 3756 return -EINVAL; 3757 if (optlen < sizeof(val)) 3758 return -EINVAL; 3759 if (copy_from_user(&val, optval, sizeof(val))) 3760 return -EFAULT; 3761 3762 lock_sock(sk); 3763 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3764 ret = -EBUSY; 3765 } else { 3766 po->has_vnet_hdr = !!val; 3767 ret = 0; 3768 } 3769 release_sock(sk); 3770 return ret; 3771 } 3772 case PACKET_TIMESTAMP: 3773 { 3774 int val; 3775 3776 if (optlen != sizeof(val)) 3777 return -EINVAL; 3778 if (copy_from_user(&val, optval, sizeof(val))) 3779 return -EFAULT; 3780 3781 po->tp_tstamp = val; 3782 return 0; 3783 } 3784 case PACKET_FANOUT: 3785 { 3786 int val; 3787 3788 if (optlen != sizeof(val)) 3789 return -EINVAL; 3790 if (copy_from_user(&val, optval, sizeof(val))) 3791 return -EFAULT; 3792 3793 return fanout_add(sk, val & 0xffff, val >> 16); 3794 } 3795 case PACKET_FANOUT_DATA: 3796 { 3797 if (!po->fanout) 3798 return -EINVAL; 3799 3800 return fanout_set_data(po, optval, optlen); 3801 } 3802 case PACKET_TX_HAS_OFF: 3803 { 3804 unsigned int val; 3805 3806 if (optlen != sizeof(val)) 3807 return -EINVAL; 3808 if (copy_from_user(&val, optval, sizeof(val))) 3809 return -EFAULT; 3810 3811 lock_sock(sk); 3812 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3813 ret = -EBUSY; 3814 } else { 3815 po->tp_tx_has_off = !!val; 3816 ret = 0; 3817 } 3818 release_sock(sk); 3819 return 0; 3820 } 3821 case PACKET_QDISC_BYPASS: 3822 { 3823 int val; 3824 3825 if (optlen != sizeof(val)) 3826 return -EINVAL; 3827 if (copy_from_user(&val, optval, sizeof(val))) 3828 return -EFAULT; 3829 3830 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; 3831 return 0; 3832 } 3833 default: 3834 return -ENOPROTOOPT; 3835 } 3836 } 3837 3838 static int packet_getsockopt(struct socket *sock, int level, int optname, 3839 char __user *optval, int __user *optlen) 3840 { 3841 int len; 3842 int val, lv = sizeof(val); 3843 struct sock *sk = sock->sk; 3844 struct packet_sock *po = pkt_sk(sk); 3845 void *data = &val; 3846 union tpacket_stats_u st; 3847 struct tpacket_rollover_stats rstats; 3848 3849 if (level != SOL_PACKET) 3850 return -ENOPROTOOPT; 3851 3852 if (get_user(len, optlen)) 3853 return -EFAULT; 3854 3855 if (len < 0) 3856 return -EINVAL; 3857 3858 switch (optname) { 3859 case PACKET_STATISTICS: 3860 spin_lock_bh(&sk->sk_receive_queue.lock); 3861 memcpy(&st, &po->stats, sizeof(st)); 3862 memset(&po->stats, 0, sizeof(po->stats)); 3863 spin_unlock_bh(&sk->sk_receive_queue.lock); 3864 3865 if (po->tp_version == TPACKET_V3) { 3866 lv = sizeof(struct tpacket_stats_v3); 3867 st.stats3.tp_packets += st.stats3.tp_drops; 3868 data = &st.stats3; 3869 } else { 3870 lv = sizeof(struct tpacket_stats); 3871 st.stats1.tp_packets += st.stats1.tp_drops; 3872 data = &st.stats1; 3873 } 3874 3875 break; 3876 case PACKET_AUXDATA: 3877 val = po->auxdata; 3878 break; 3879 case PACKET_ORIGDEV: 3880 val = po->origdev; 3881 break; 3882 case PACKET_VNET_HDR: 3883 val = po->has_vnet_hdr; 3884 break; 3885 case PACKET_VERSION: 3886 val = po->tp_version; 3887 break; 3888 case PACKET_HDRLEN: 3889 if (len > sizeof(int)) 3890 len = sizeof(int); 3891 if (len < sizeof(int)) 3892 return -EINVAL; 3893 if (copy_from_user(&val, optval, len)) 3894 return -EFAULT; 3895 switch (val) { 3896 case TPACKET_V1: 3897 val = sizeof(struct tpacket_hdr); 3898 break; 3899 case TPACKET_V2: 3900 val = sizeof(struct tpacket2_hdr); 3901 break; 3902 case TPACKET_V3: 3903 val = sizeof(struct tpacket3_hdr); 3904 break; 3905 default: 3906 return -EINVAL; 3907 } 3908 break; 3909 case PACKET_RESERVE: 3910 val = po->tp_reserve; 3911 break; 3912 case PACKET_LOSS: 3913 val = po->tp_loss; 3914 break; 3915 case PACKET_TIMESTAMP: 3916 val = po->tp_tstamp; 3917 break; 3918 case PACKET_FANOUT: 3919 val = (po->fanout ? 3920 ((u32)po->fanout->id | 3921 ((u32)po->fanout->type << 16) | 3922 ((u32)po->fanout->flags << 24)) : 3923 0); 3924 break; 3925 case PACKET_ROLLOVER_STATS: 3926 if (!po->rollover) 3927 return -EINVAL; 3928 rstats.tp_all = atomic_long_read(&po->rollover->num); 3929 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 3930 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 3931 data = &rstats; 3932 lv = sizeof(rstats); 3933 break; 3934 case PACKET_TX_HAS_OFF: 3935 val = po->tp_tx_has_off; 3936 break; 3937 case PACKET_QDISC_BYPASS: 3938 val = packet_use_direct_xmit(po); 3939 break; 3940 default: 3941 return -ENOPROTOOPT; 3942 } 3943 3944 if (len > lv) 3945 len = lv; 3946 if (put_user(len, optlen)) 3947 return -EFAULT; 3948 if (copy_to_user(optval, data, len)) 3949 return -EFAULT; 3950 return 0; 3951 } 3952 3953 3954 #ifdef CONFIG_COMPAT 3955 static int compat_packet_setsockopt(struct socket *sock, int level, int optname, 3956 char __user *optval, unsigned int optlen) 3957 { 3958 struct packet_sock *po = pkt_sk(sock->sk); 3959 3960 if (level != SOL_PACKET) 3961 return -ENOPROTOOPT; 3962 3963 if (optname == PACKET_FANOUT_DATA && 3964 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { 3965 optval = (char __user *)get_compat_bpf_fprog(optval); 3966 if (!optval) 3967 return -EFAULT; 3968 optlen = sizeof(struct sock_fprog); 3969 } 3970 3971 return packet_setsockopt(sock, level, optname, optval, optlen); 3972 } 3973 #endif 3974 3975 static int packet_notifier(struct notifier_block *this, 3976 unsigned long msg, void *ptr) 3977 { 3978 struct sock *sk; 3979 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3980 struct net *net = dev_net(dev); 3981 3982 rcu_read_lock(); 3983 sk_for_each_rcu(sk, &net->packet.sklist) { 3984 struct packet_sock *po = pkt_sk(sk); 3985 3986 switch (msg) { 3987 case NETDEV_UNREGISTER: 3988 if (po->mclist) 3989 packet_dev_mclist_delete(dev, &po->mclist); 3990 /* fallthrough */ 3991 3992 case NETDEV_DOWN: 3993 if (dev->ifindex == po->ifindex) { 3994 spin_lock(&po->bind_lock); 3995 if (po->running) { 3996 __unregister_prot_hook(sk, false); 3997 sk->sk_err = ENETDOWN; 3998 if (!sock_flag(sk, SOCK_DEAD)) 3999 sk->sk_error_report(sk); 4000 } 4001 if (msg == NETDEV_UNREGISTER) { 4002 packet_cached_dev_reset(po); 4003 po->ifindex = -1; 4004 if (po->prot_hook.dev) 4005 dev_put(po->prot_hook.dev); 4006 po->prot_hook.dev = NULL; 4007 } 4008 spin_unlock(&po->bind_lock); 4009 } 4010 break; 4011 case NETDEV_UP: 4012 if (dev->ifindex == po->ifindex) { 4013 spin_lock(&po->bind_lock); 4014 if (po->num) 4015 register_prot_hook(sk); 4016 spin_unlock(&po->bind_lock); 4017 } 4018 break; 4019 } 4020 } 4021 rcu_read_unlock(); 4022 return NOTIFY_DONE; 4023 } 4024 4025 4026 static int packet_ioctl(struct socket *sock, unsigned int cmd, 4027 unsigned long arg) 4028 { 4029 struct sock *sk = sock->sk; 4030 4031 switch (cmd) { 4032 case SIOCOUTQ: 4033 { 4034 int amount = sk_wmem_alloc_get(sk); 4035 4036 return put_user(amount, (int __user *)arg); 4037 } 4038 case SIOCINQ: 4039 { 4040 struct sk_buff *skb; 4041 int amount = 0; 4042 4043 spin_lock_bh(&sk->sk_receive_queue.lock); 4044 skb = skb_peek(&sk->sk_receive_queue); 4045 if (skb) 4046 amount = skb->len; 4047 spin_unlock_bh(&sk->sk_receive_queue.lock); 4048 return put_user(amount, (int __user *)arg); 4049 } 4050 case SIOCGSTAMP: 4051 return sock_get_timestamp(sk, (struct timeval __user *)arg); 4052 case SIOCGSTAMPNS: 4053 return sock_get_timestampns(sk, (struct timespec __user *)arg); 4054 4055 #ifdef CONFIG_INET 4056 case SIOCADDRT: 4057 case SIOCDELRT: 4058 case SIOCDARP: 4059 case SIOCGARP: 4060 case SIOCSARP: 4061 case SIOCGIFADDR: 4062 case SIOCSIFADDR: 4063 case SIOCGIFBRDADDR: 4064 case SIOCSIFBRDADDR: 4065 case SIOCGIFNETMASK: 4066 case SIOCSIFNETMASK: 4067 case SIOCGIFDSTADDR: 4068 case SIOCSIFDSTADDR: 4069 case SIOCSIFFLAGS: 4070 return inet_dgram_ops.ioctl(sock, cmd, arg); 4071 #endif 4072 4073 default: 4074 return -ENOIOCTLCMD; 4075 } 4076 return 0; 4077 } 4078 4079 static __poll_t packet_poll(struct file *file, struct socket *sock, 4080 poll_table *wait) 4081 { 4082 struct sock *sk = sock->sk; 4083 struct packet_sock *po = pkt_sk(sk); 4084 __poll_t mask = datagram_poll(file, sock, wait); 4085 4086 spin_lock_bh(&sk->sk_receive_queue.lock); 4087 if (po->rx_ring.pg_vec) { 4088 if (!packet_previous_rx_frame(po, &po->rx_ring, 4089 TP_STATUS_KERNEL)) 4090 mask |= EPOLLIN | EPOLLRDNORM; 4091 } 4092 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 4093 po->pressure = 0; 4094 spin_unlock_bh(&sk->sk_receive_queue.lock); 4095 spin_lock_bh(&sk->sk_write_queue.lock); 4096 if (po->tx_ring.pg_vec) { 4097 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4098 mask |= EPOLLOUT | EPOLLWRNORM; 4099 } 4100 spin_unlock_bh(&sk->sk_write_queue.lock); 4101 return mask; 4102 } 4103 4104 4105 /* Dirty? Well, I still did not learn better way to account 4106 * for user mmaps. 4107 */ 4108 4109 static void packet_mm_open(struct vm_area_struct *vma) 4110 { 4111 struct file *file = vma->vm_file; 4112 struct socket *sock = file->private_data; 4113 struct sock *sk = sock->sk; 4114 4115 if (sk) 4116 atomic_inc(&pkt_sk(sk)->mapped); 4117 } 4118 4119 static void packet_mm_close(struct vm_area_struct *vma) 4120 { 4121 struct file *file = vma->vm_file; 4122 struct socket *sock = file->private_data; 4123 struct sock *sk = sock->sk; 4124 4125 if (sk) 4126 atomic_dec(&pkt_sk(sk)->mapped); 4127 } 4128 4129 static const struct vm_operations_struct packet_mmap_ops = { 4130 .open = packet_mm_open, 4131 .close = packet_mm_close, 4132 }; 4133 4134 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 4135 unsigned int len) 4136 { 4137 int i; 4138 4139 for (i = 0; i < len; i++) { 4140 if (likely(pg_vec[i].buffer)) { 4141 if (is_vmalloc_addr(pg_vec[i].buffer)) 4142 vfree(pg_vec[i].buffer); 4143 else 4144 free_pages((unsigned long)pg_vec[i].buffer, 4145 order); 4146 pg_vec[i].buffer = NULL; 4147 } 4148 } 4149 kfree(pg_vec); 4150 } 4151 4152 static char *alloc_one_pg_vec_page(unsigned long order) 4153 { 4154 char *buffer; 4155 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 4156 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 4157 4158 buffer = (char *) __get_free_pages(gfp_flags, order); 4159 if (buffer) 4160 return buffer; 4161 4162 /* __get_free_pages failed, fall back to vmalloc */ 4163 buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); 4164 if (buffer) 4165 return buffer; 4166 4167 /* vmalloc failed, lets dig into swap here */ 4168 gfp_flags &= ~__GFP_NORETRY; 4169 buffer = (char *) __get_free_pages(gfp_flags, order); 4170 if (buffer) 4171 return buffer; 4172 4173 /* complete and utter failure */ 4174 return NULL; 4175 } 4176 4177 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 4178 { 4179 unsigned int block_nr = req->tp_block_nr; 4180 struct pgv *pg_vec; 4181 int i; 4182 4183 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); 4184 if (unlikely(!pg_vec)) 4185 goto out; 4186 4187 for (i = 0; i < block_nr; i++) { 4188 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 4189 if (unlikely(!pg_vec[i].buffer)) 4190 goto out_free_pgvec; 4191 } 4192 4193 out: 4194 return pg_vec; 4195 4196 out_free_pgvec: 4197 free_pg_vec(pg_vec, order, block_nr); 4198 pg_vec = NULL; 4199 goto out; 4200 } 4201 4202 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 4203 int closing, int tx_ring) 4204 { 4205 struct pgv *pg_vec = NULL; 4206 struct packet_sock *po = pkt_sk(sk); 4207 int was_running, order = 0; 4208 struct packet_ring_buffer *rb; 4209 struct sk_buff_head *rb_queue; 4210 __be16 num; 4211 int err = -EINVAL; 4212 /* Added to avoid minimal code churn */ 4213 struct tpacket_req *req = &req_u->req; 4214 4215 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4216 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4217 4218 err = -EBUSY; 4219 if (!closing) { 4220 if (atomic_read(&po->mapped)) 4221 goto out; 4222 if (packet_read_pending(rb)) 4223 goto out; 4224 } 4225 4226 if (req->tp_block_nr) { 4227 /* Sanity tests and some calculations */ 4228 err = -EBUSY; 4229 if (unlikely(rb->pg_vec)) 4230 goto out; 4231 4232 switch (po->tp_version) { 4233 case TPACKET_V1: 4234 po->tp_hdrlen = TPACKET_HDRLEN; 4235 break; 4236 case TPACKET_V2: 4237 po->tp_hdrlen = TPACKET2_HDRLEN; 4238 break; 4239 case TPACKET_V3: 4240 po->tp_hdrlen = TPACKET3_HDRLEN; 4241 break; 4242 } 4243 4244 err = -EINVAL; 4245 if (unlikely((int)req->tp_block_size <= 0)) 4246 goto out; 4247 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4248 goto out; 4249 if (po->tp_version >= TPACKET_V3 && 4250 req->tp_block_size <= 4251 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr)) 4252 goto out; 4253 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4254 po->tp_reserve)) 4255 goto out; 4256 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4257 goto out; 4258 4259 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4260 if (unlikely(rb->frames_per_block == 0)) 4261 goto out; 4262 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) 4263 goto out; 4264 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4265 req->tp_frame_nr)) 4266 goto out; 4267 4268 err = -ENOMEM; 4269 order = get_order(req->tp_block_size); 4270 pg_vec = alloc_pg_vec(req, order); 4271 if (unlikely(!pg_vec)) 4272 goto out; 4273 switch (po->tp_version) { 4274 case TPACKET_V3: 4275 /* Block transmit is not supported yet */ 4276 if (!tx_ring) { 4277 init_prb_bdqc(po, rb, pg_vec, req_u); 4278 } else { 4279 struct tpacket_req3 *req3 = &req_u->req3; 4280 4281 if (req3->tp_retire_blk_tov || 4282 req3->tp_sizeof_priv || 4283 req3->tp_feature_req_word) { 4284 err = -EINVAL; 4285 goto out; 4286 } 4287 } 4288 break; 4289 default: 4290 break; 4291 } 4292 } 4293 /* Done */ 4294 else { 4295 err = -EINVAL; 4296 if (unlikely(req->tp_frame_nr)) 4297 goto out; 4298 } 4299 4300 4301 /* Detach socket from network */ 4302 spin_lock(&po->bind_lock); 4303 was_running = po->running; 4304 num = po->num; 4305 if (was_running) { 4306 po->num = 0; 4307 __unregister_prot_hook(sk, false); 4308 } 4309 spin_unlock(&po->bind_lock); 4310 4311 synchronize_net(); 4312 4313 err = -EBUSY; 4314 mutex_lock(&po->pg_vec_lock); 4315 if (closing || atomic_read(&po->mapped) == 0) { 4316 err = 0; 4317 spin_lock_bh(&rb_queue->lock); 4318 swap(rb->pg_vec, pg_vec); 4319 rb->frame_max = (req->tp_frame_nr - 1); 4320 rb->head = 0; 4321 rb->frame_size = req->tp_frame_size; 4322 spin_unlock_bh(&rb_queue->lock); 4323 4324 swap(rb->pg_vec_order, order); 4325 swap(rb->pg_vec_len, req->tp_block_nr); 4326 4327 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4328 po->prot_hook.func = (po->rx_ring.pg_vec) ? 4329 tpacket_rcv : packet_rcv; 4330 skb_queue_purge(rb_queue); 4331 if (atomic_read(&po->mapped)) 4332 pr_err("packet_mmap: vma is busy: %d\n", 4333 atomic_read(&po->mapped)); 4334 } 4335 mutex_unlock(&po->pg_vec_lock); 4336 4337 spin_lock(&po->bind_lock); 4338 if (was_running) { 4339 po->num = num; 4340 register_prot_hook(sk); 4341 } 4342 spin_unlock(&po->bind_lock); 4343 if (pg_vec && (po->tp_version > TPACKET_V2)) { 4344 /* Because we don't support block-based V3 on tx-ring */ 4345 if (!tx_ring) 4346 prb_shutdown_retire_blk_timer(po, rb_queue); 4347 } 4348 4349 if (pg_vec) 4350 free_pg_vec(pg_vec, order, req->tp_block_nr); 4351 out: 4352 return err; 4353 } 4354 4355 static int packet_mmap(struct file *file, struct socket *sock, 4356 struct vm_area_struct *vma) 4357 { 4358 struct sock *sk = sock->sk; 4359 struct packet_sock *po = pkt_sk(sk); 4360 unsigned long size, expected_size; 4361 struct packet_ring_buffer *rb; 4362 unsigned long start; 4363 int err = -EINVAL; 4364 int i; 4365 4366 if (vma->vm_pgoff) 4367 return -EINVAL; 4368 4369 mutex_lock(&po->pg_vec_lock); 4370 4371 expected_size = 0; 4372 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4373 if (rb->pg_vec) { 4374 expected_size += rb->pg_vec_len 4375 * rb->pg_vec_pages 4376 * PAGE_SIZE; 4377 } 4378 } 4379 4380 if (expected_size == 0) 4381 goto out; 4382 4383 size = vma->vm_end - vma->vm_start; 4384 if (size != expected_size) 4385 goto out; 4386 4387 start = vma->vm_start; 4388 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4389 if (rb->pg_vec == NULL) 4390 continue; 4391 4392 for (i = 0; i < rb->pg_vec_len; i++) { 4393 struct page *page; 4394 void *kaddr = rb->pg_vec[i].buffer; 4395 int pg_num; 4396 4397 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 4398 page = pgv_to_page(kaddr); 4399 err = vm_insert_page(vma, start, page); 4400 if (unlikely(err)) 4401 goto out; 4402 start += PAGE_SIZE; 4403 kaddr += PAGE_SIZE; 4404 } 4405 } 4406 } 4407 4408 atomic_inc(&po->mapped); 4409 vma->vm_ops = &packet_mmap_ops; 4410 err = 0; 4411 4412 out: 4413 mutex_unlock(&po->pg_vec_lock); 4414 return err; 4415 } 4416 4417 static const struct proto_ops packet_ops_spkt = { 4418 .family = PF_PACKET, 4419 .owner = THIS_MODULE, 4420 .release = packet_release, 4421 .bind = packet_bind_spkt, 4422 .connect = sock_no_connect, 4423 .socketpair = sock_no_socketpair, 4424 .accept = sock_no_accept, 4425 .getname = packet_getname_spkt, 4426 .poll = datagram_poll, 4427 .ioctl = packet_ioctl, 4428 .listen = sock_no_listen, 4429 .shutdown = sock_no_shutdown, 4430 .setsockopt = sock_no_setsockopt, 4431 .getsockopt = sock_no_getsockopt, 4432 .sendmsg = packet_sendmsg_spkt, 4433 .recvmsg = packet_recvmsg, 4434 .mmap = sock_no_mmap, 4435 .sendpage = sock_no_sendpage, 4436 }; 4437 4438 static const struct proto_ops packet_ops = { 4439 .family = PF_PACKET, 4440 .owner = THIS_MODULE, 4441 .release = packet_release, 4442 .bind = packet_bind, 4443 .connect = sock_no_connect, 4444 .socketpair = sock_no_socketpair, 4445 .accept = sock_no_accept, 4446 .getname = packet_getname, 4447 .poll = packet_poll, 4448 .ioctl = packet_ioctl, 4449 .listen = sock_no_listen, 4450 .shutdown = sock_no_shutdown, 4451 .setsockopt = packet_setsockopt, 4452 .getsockopt = packet_getsockopt, 4453 #ifdef CONFIG_COMPAT 4454 .compat_setsockopt = compat_packet_setsockopt, 4455 #endif 4456 .sendmsg = packet_sendmsg, 4457 .recvmsg = packet_recvmsg, 4458 .mmap = packet_mmap, 4459 .sendpage = sock_no_sendpage, 4460 }; 4461 4462 static const struct net_proto_family packet_family_ops = { 4463 .family = PF_PACKET, 4464 .create = packet_create, 4465 .owner = THIS_MODULE, 4466 }; 4467 4468 static struct notifier_block packet_netdev_notifier = { 4469 .notifier_call = packet_notifier, 4470 }; 4471 4472 #ifdef CONFIG_PROC_FS 4473 4474 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4475 __acquires(RCU) 4476 { 4477 struct net *net = seq_file_net(seq); 4478 4479 rcu_read_lock(); 4480 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4481 } 4482 4483 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4484 { 4485 struct net *net = seq_file_net(seq); 4486 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4487 } 4488 4489 static void packet_seq_stop(struct seq_file *seq, void *v) 4490 __releases(RCU) 4491 { 4492 rcu_read_unlock(); 4493 } 4494 4495 static int packet_seq_show(struct seq_file *seq, void *v) 4496 { 4497 if (v == SEQ_START_TOKEN) 4498 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 4499 else { 4500 struct sock *s = sk_entry(v); 4501 const struct packet_sock *po = pkt_sk(s); 4502 4503 seq_printf(seq, 4504 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 4505 s, 4506 refcount_read(&s->sk_refcnt), 4507 s->sk_type, 4508 ntohs(po->num), 4509 po->ifindex, 4510 po->running, 4511 atomic_read(&s->sk_rmem_alloc), 4512 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4513 sock_i_ino(s)); 4514 } 4515 4516 return 0; 4517 } 4518 4519 static const struct seq_operations packet_seq_ops = { 4520 .start = packet_seq_start, 4521 .next = packet_seq_next, 4522 .stop = packet_seq_stop, 4523 .show = packet_seq_show, 4524 }; 4525 #endif 4526 4527 static int __net_init packet_net_init(struct net *net) 4528 { 4529 mutex_init(&net->packet.sklist_lock); 4530 INIT_HLIST_HEAD(&net->packet.sklist); 4531 4532 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, 4533 sizeof(struct seq_net_private))) 4534 return -ENOMEM; 4535 4536 return 0; 4537 } 4538 4539 static void __net_exit packet_net_exit(struct net *net) 4540 { 4541 remove_proc_entry("packet", net->proc_net); 4542 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); 4543 } 4544 4545 static struct pernet_operations packet_net_ops = { 4546 .init = packet_net_init, 4547 .exit = packet_net_exit, 4548 }; 4549 4550 4551 static void __exit packet_exit(void) 4552 { 4553 unregister_netdevice_notifier(&packet_netdev_notifier); 4554 unregister_pernet_subsys(&packet_net_ops); 4555 sock_unregister(PF_PACKET); 4556 proto_unregister(&packet_proto); 4557 } 4558 4559 static int __init packet_init(void) 4560 { 4561 int rc = proto_register(&packet_proto, 0); 4562 4563 if (rc != 0) 4564 goto out; 4565 4566 sock_register(&packet_family_ops); 4567 register_pernet_subsys(&packet_net_ops); 4568 register_netdevice_notifier(&packet_netdev_notifier); 4569 out: 4570 return rc; 4571 } 4572 4573 module_init(packet_init); 4574 module_exit(packet_exit); 4575 MODULE_LICENSE("GPL"); 4576 MODULE_ALIAS_NETPROTO(PF_PACKET); 4577