1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * PACKET - implements raw packet sockets. 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org> 11 * 12 * Fixes: 13 * Alan Cox : verify_area() now used correctly 14 * Alan Cox : new skbuff lists, look ma no backlogs! 15 * Alan Cox : tidied skbuff lists. 16 * Alan Cox : Now uses generic datagram routines I 17 * added. Also fixed the peek/read crash 18 * from all old Linux datagram code. 19 * Alan Cox : Uses the improved datagram code. 20 * Alan Cox : Added NULL's for socket options. 21 * Alan Cox : Re-commented the code. 22 * Alan Cox : Use new kernel side addressing 23 * Rob Janssen : Correct MTU usage. 24 * Dave Platt : Counter leaks caused by incorrect 25 * interrupt locking and some slightly 26 * dubious gcc output. Can you read 27 * compiler: it said _VOLATILE_ 28 * Richard Kooijman : Timestamp fixes. 29 * Alan Cox : New buffers. Use sk->mac.raw. 30 * Alan Cox : sendmsg/recvmsg support. 31 * Alan Cox : Protocol setting support 32 * Alexey Kuznetsov : Untied from IPv4 stack. 33 * Cyrus Durgin : Fixed kerneld for kmod. 34 * Michal Ostrowski : Module initialization cleanup. 35 * Ulises Alonso : Frame number limit removal and 36 * packet_set_ring memory leak. 37 * Eric Biederman : Allow for > 8 byte hardware addresses. 38 * The convention is that longer addresses 39 * will simply extend the hardware address 40 * byte arrays at the end of sockaddr_ll 41 * and packet_mreq. 42 * Johann Baudy : Added TX RING. 43 * Chetan Loke : Implemented TPACKET_V3 block abstraction 44 * layer. 45 * Copyright (C) 2011, <lokec@ccs.neu.edu> 46 * 47 * 48 * This program is free software; you can redistribute it and/or 49 * modify it under the terms of the GNU General Public License 50 * as published by the Free Software Foundation; either version 51 * 2 of the License, or (at your option) any later version. 52 * 53 */ 54 55 #include <linux/types.h> 56 #include <linux/mm.h> 57 #include <linux/capability.h> 58 #include <linux/fcntl.h> 59 #include <linux/socket.h> 60 #include <linux/in.h> 61 #include <linux/inet.h> 62 #include <linux/netdevice.h> 63 #include <linux/if_packet.h> 64 #include <linux/wireless.h> 65 #include <linux/kernel.h> 66 #include <linux/kmod.h> 67 #include <linux/slab.h> 68 #include <linux/vmalloc.h> 69 #include <net/net_namespace.h> 70 #include <net/ip.h> 71 #include <net/protocol.h> 72 #include <linux/skbuff.h> 73 #include <net/sock.h> 74 #include <linux/errno.h> 75 #include <linux/timer.h> 76 #include <asm/uaccess.h> 77 #include <asm/ioctls.h> 78 #include <asm/page.h> 79 #include <asm/cacheflush.h> 80 #include <asm/io.h> 81 #include <linux/proc_fs.h> 82 #include <linux/seq_file.h> 83 #include <linux/poll.h> 84 #include <linux/module.h> 85 #include <linux/init.h> 86 #include <linux/mutex.h> 87 #include <linux/if_vlan.h> 88 #include <linux/virtio_net.h> 89 #include <linux/errqueue.h> 90 #include <linux/net_tstamp.h> 91 #include <linux/percpu.h> 92 #ifdef CONFIG_INET 93 #include <net/inet_common.h> 94 #endif 95 96 #include "internal.h" 97 98 /* 99 Assumptions: 100 - if device has no dev->hard_header routine, it adds and removes ll header 101 inside itself. In this case ll header is invisible outside of device, 102 but higher levels still should reserve dev->hard_header_len. 103 Some devices are enough clever to reallocate skb, when header 104 will not fit to reserved space (tunnel), another ones are silly 105 (PPP). 106 - packet socket receives packets with pulled ll header, 107 so that SOCK_RAW should push it back. 108 109 On receive: 110 ----------- 111 112 Incoming, dev->hard_header!=NULL 113 mac_header -> ll header 114 data -> data 115 116 Outgoing, dev->hard_header!=NULL 117 mac_header -> ll header 118 data -> ll header 119 120 Incoming, dev->hard_header==NULL 121 mac_header -> UNKNOWN position. It is very likely, that it points to ll 122 header. PPP makes it, that is wrong, because introduce 123 assymetry between rx and tx paths. 124 data -> data 125 126 Outgoing, dev->hard_header==NULL 127 mac_header -> data. ll header is still not built! 128 data -> data 129 130 Resume 131 If dev->hard_header==NULL we are unlikely to restore sensible ll header. 132 133 134 On transmit: 135 ------------ 136 137 dev->hard_header != NULL 138 mac_header -> ll header 139 data -> ll header 140 141 dev->hard_header == NULL (ll header is added by device, we cannot control it) 142 mac_header -> data 143 data -> data 144 145 We should set nh.raw on output to correct posistion, 146 packet classifier depends on it. 147 */ 148 149 /* Private packet socket structures. */ 150 151 /* identical to struct packet_mreq except it has 152 * a longer address field. 153 */ 154 struct packet_mreq_max { 155 int mr_ifindex; 156 unsigned short mr_type; 157 unsigned short mr_alen; 158 unsigned char mr_address[MAX_ADDR_LEN]; 159 }; 160 161 union tpacket_uhdr { 162 struct tpacket_hdr *h1; 163 struct tpacket2_hdr *h2; 164 struct tpacket3_hdr *h3; 165 void *raw; 166 }; 167 168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 169 int closing, int tx_ring); 170 171 #define V3_ALIGNMENT (8) 172 173 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 174 175 #define BLK_PLUS_PRIV(sz_of_priv) \ 176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 177 178 #define PGV_FROM_VMALLOC 1 179 180 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 181 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 182 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 183 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 184 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 185 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 186 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) 187 188 struct packet_sock; 189 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 190 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 191 struct packet_type *pt, struct net_device *orig_dev); 192 193 static void *packet_previous_frame(struct packet_sock *po, 194 struct packet_ring_buffer *rb, 195 int status); 196 static void packet_increment_head(struct packet_ring_buffer *buff); 197 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *, 198 struct tpacket_block_desc *); 199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 200 struct packet_sock *); 201 static void prb_retire_current_block(struct tpacket_kbdq_core *, 202 struct packet_sock *, unsigned int status); 203 static int prb_queue_frozen(struct tpacket_kbdq_core *); 204 static void prb_open_block(struct tpacket_kbdq_core *, 205 struct tpacket_block_desc *); 206 static void prb_retire_rx_blk_timer_expired(unsigned long); 207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); 208 static void prb_init_blk_timer(struct packet_sock *, 209 struct tpacket_kbdq_core *, 210 void (*func) (unsigned long)); 211 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 212 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 213 struct tpacket3_hdr *); 214 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 215 struct tpacket3_hdr *); 216 static void packet_flush_mclist(struct sock *sk); 217 218 struct packet_skb_cb { 219 unsigned int origlen; 220 union { 221 struct sockaddr_pkt pkt; 222 struct sockaddr_ll ll; 223 } sa; 224 }; 225 226 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 227 228 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 229 #define GET_PBLOCK_DESC(x, bid) \ 230 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 231 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 232 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 233 #define GET_NEXT_PRB_BLK_NUM(x) \ 234 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 235 ((x)->kactive_blk_num+1) : 0) 236 237 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 238 static void __fanout_link(struct sock *sk, struct packet_sock *po); 239 240 static int packet_direct_xmit(struct sk_buff *skb) 241 { 242 struct net_device *dev = skb->dev; 243 netdev_features_t features; 244 struct netdev_queue *txq; 245 int ret = NETDEV_TX_BUSY; 246 247 if (unlikely(!netif_running(dev) || 248 !netif_carrier_ok(dev))) 249 goto drop; 250 251 features = netif_skb_features(skb); 252 if (skb_needs_linearize(skb, features) && 253 __skb_linearize(skb)) 254 goto drop; 255 256 txq = skb_get_tx_queue(dev, skb); 257 258 local_bh_disable(); 259 260 HARD_TX_LOCK(dev, txq, smp_processor_id()); 261 if (!netif_xmit_frozen_or_drv_stopped(txq)) 262 ret = netdev_start_xmit(skb, dev, txq, false); 263 HARD_TX_UNLOCK(dev, txq); 264 265 local_bh_enable(); 266 267 if (!dev_xmit_complete(ret)) 268 kfree_skb(skb); 269 270 return ret; 271 drop: 272 atomic_long_inc(&dev->tx_dropped); 273 kfree_skb(skb); 274 return NET_XMIT_DROP; 275 } 276 277 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 278 { 279 struct net_device *dev; 280 281 rcu_read_lock(); 282 dev = rcu_dereference(po->cached_dev); 283 if (likely(dev)) 284 dev_hold(dev); 285 rcu_read_unlock(); 286 287 return dev; 288 } 289 290 static void packet_cached_dev_assign(struct packet_sock *po, 291 struct net_device *dev) 292 { 293 rcu_assign_pointer(po->cached_dev, dev); 294 } 295 296 static void packet_cached_dev_reset(struct packet_sock *po) 297 { 298 RCU_INIT_POINTER(po->cached_dev, NULL); 299 } 300 301 static bool packet_use_direct_xmit(const struct packet_sock *po) 302 { 303 return po->xmit == packet_direct_xmit; 304 } 305 306 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 307 { 308 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; 309 } 310 311 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 312 { 313 const struct net_device_ops *ops = dev->netdev_ops; 314 u16 queue_index; 315 316 if (ops->ndo_select_queue) { 317 queue_index = ops->ndo_select_queue(dev, skb, NULL, 318 __packet_pick_tx_queue); 319 queue_index = netdev_cap_txqueue(dev, queue_index); 320 } else { 321 queue_index = __packet_pick_tx_queue(dev, skb); 322 } 323 324 skb_set_queue_mapping(skb, queue_index); 325 } 326 327 /* register_prot_hook must be invoked with the po->bind_lock held, 328 * or from a context in which asynchronous accesses to the packet 329 * socket is not possible (packet_create()). 330 */ 331 static void register_prot_hook(struct sock *sk) 332 { 333 struct packet_sock *po = pkt_sk(sk); 334 335 if (!po->running) { 336 if (po->fanout) 337 __fanout_link(sk, po); 338 else 339 dev_add_pack(&po->prot_hook); 340 341 sock_hold(sk); 342 po->running = 1; 343 } 344 } 345 346 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock 347 * held. If the sync parameter is true, we will temporarily drop 348 * the po->bind_lock and do a synchronize_net to make sure no 349 * asynchronous packet processing paths still refer to the elements 350 * of po->prot_hook. If the sync parameter is false, it is the 351 * callers responsibility to take care of this. 352 */ 353 static void __unregister_prot_hook(struct sock *sk, bool sync) 354 { 355 struct packet_sock *po = pkt_sk(sk); 356 357 po->running = 0; 358 359 if (po->fanout) 360 __fanout_unlink(sk, po); 361 else 362 __dev_remove_pack(&po->prot_hook); 363 364 __sock_put(sk); 365 366 if (sync) { 367 spin_unlock(&po->bind_lock); 368 synchronize_net(); 369 spin_lock(&po->bind_lock); 370 } 371 } 372 373 static void unregister_prot_hook(struct sock *sk, bool sync) 374 { 375 struct packet_sock *po = pkt_sk(sk); 376 377 if (po->running) 378 __unregister_prot_hook(sk, sync); 379 } 380 381 static inline struct page * __pure pgv_to_page(void *addr) 382 { 383 if (is_vmalloc_addr(addr)) 384 return vmalloc_to_page(addr); 385 return virt_to_page(addr); 386 } 387 388 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 389 { 390 union tpacket_uhdr h; 391 392 h.raw = frame; 393 switch (po->tp_version) { 394 case TPACKET_V1: 395 h.h1->tp_status = status; 396 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 397 break; 398 case TPACKET_V2: 399 h.h2->tp_status = status; 400 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 401 break; 402 case TPACKET_V3: 403 default: 404 WARN(1, "TPACKET version not supported.\n"); 405 BUG(); 406 } 407 408 smp_wmb(); 409 } 410 411 static int __packet_get_status(struct packet_sock *po, void *frame) 412 { 413 union tpacket_uhdr h; 414 415 smp_rmb(); 416 417 h.raw = frame; 418 switch (po->tp_version) { 419 case TPACKET_V1: 420 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 421 return h.h1->tp_status; 422 case TPACKET_V2: 423 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 424 return h.h2->tp_status; 425 case TPACKET_V3: 426 default: 427 WARN(1, "TPACKET version not supported.\n"); 428 BUG(); 429 return 0; 430 } 431 } 432 433 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, 434 unsigned int flags) 435 { 436 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 437 438 if (shhwtstamps && 439 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 440 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) 441 return TP_STATUS_TS_RAW_HARDWARE; 442 443 if (ktime_to_timespec_cond(skb->tstamp, ts)) 444 return TP_STATUS_TS_SOFTWARE; 445 446 return 0; 447 } 448 449 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 450 struct sk_buff *skb) 451 { 452 union tpacket_uhdr h; 453 struct timespec ts; 454 __u32 ts_status; 455 456 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 457 return 0; 458 459 h.raw = frame; 460 switch (po->tp_version) { 461 case TPACKET_V1: 462 h.h1->tp_sec = ts.tv_sec; 463 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 464 break; 465 case TPACKET_V2: 466 h.h2->tp_sec = ts.tv_sec; 467 h.h2->tp_nsec = ts.tv_nsec; 468 break; 469 case TPACKET_V3: 470 default: 471 WARN(1, "TPACKET version not supported.\n"); 472 BUG(); 473 } 474 475 /* one flush is safe, as both fields always lie on the same cacheline */ 476 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 477 smp_wmb(); 478 479 return ts_status; 480 } 481 482 static void *packet_lookup_frame(struct packet_sock *po, 483 struct packet_ring_buffer *rb, 484 unsigned int position, 485 int status) 486 { 487 unsigned int pg_vec_pos, frame_offset; 488 union tpacket_uhdr h; 489 490 pg_vec_pos = position / rb->frames_per_block; 491 frame_offset = position % rb->frames_per_block; 492 493 h.raw = rb->pg_vec[pg_vec_pos].buffer + 494 (frame_offset * rb->frame_size); 495 496 if (status != __packet_get_status(po, h.raw)) 497 return NULL; 498 499 return h.raw; 500 } 501 502 static void *packet_current_frame(struct packet_sock *po, 503 struct packet_ring_buffer *rb, 504 int status) 505 { 506 return packet_lookup_frame(po, rb, rb->head, status); 507 } 508 509 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 510 { 511 del_timer_sync(&pkc->retire_blk_timer); 512 } 513 514 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 515 int tx_ring, 516 struct sk_buff_head *rb_queue) 517 { 518 struct tpacket_kbdq_core *pkc; 519 520 pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) : 521 GET_PBDQC_FROM_RB(&po->rx_ring); 522 523 spin_lock_bh(&rb_queue->lock); 524 pkc->delete_blk_timer = 1; 525 spin_unlock_bh(&rb_queue->lock); 526 527 prb_del_retire_blk_timer(pkc); 528 } 529 530 static void prb_init_blk_timer(struct packet_sock *po, 531 struct tpacket_kbdq_core *pkc, 532 void (*func) (unsigned long)) 533 { 534 init_timer(&pkc->retire_blk_timer); 535 pkc->retire_blk_timer.data = (long)po; 536 pkc->retire_blk_timer.function = func; 537 pkc->retire_blk_timer.expires = jiffies; 538 } 539 540 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring) 541 { 542 struct tpacket_kbdq_core *pkc; 543 544 if (tx_ring) 545 BUG(); 546 547 pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) : 548 GET_PBDQC_FROM_RB(&po->rx_ring); 549 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); 550 } 551 552 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 553 int blk_size_in_bytes) 554 { 555 struct net_device *dev; 556 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; 557 struct ethtool_cmd ecmd; 558 int err; 559 u32 speed; 560 561 rtnl_lock(); 562 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 563 if (unlikely(!dev)) { 564 rtnl_unlock(); 565 return DEFAULT_PRB_RETIRE_TOV; 566 } 567 err = __ethtool_get_settings(dev, &ecmd); 568 speed = ethtool_cmd_speed(&ecmd); 569 rtnl_unlock(); 570 if (!err) { 571 /* 572 * If the link speed is so slow you don't really 573 * need to worry about perf anyways 574 */ 575 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) { 576 return DEFAULT_PRB_RETIRE_TOV; 577 } else { 578 msec = 1; 579 div = speed / 1000; 580 } 581 } 582 583 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 584 585 if (div) 586 mbits /= div; 587 588 tmo = mbits * msec; 589 590 if (div) 591 return tmo+1; 592 return tmo; 593 } 594 595 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 596 union tpacket_req_u *req_u) 597 { 598 p1->feature_req_word = req_u->req3.tp_feature_req_word; 599 } 600 601 static void init_prb_bdqc(struct packet_sock *po, 602 struct packet_ring_buffer *rb, 603 struct pgv *pg_vec, 604 union tpacket_req_u *req_u, int tx_ring) 605 { 606 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 607 struct tpacket_block_desc *pbd; 608 609 memset(p1, 0x0, sizeof(*p1)); 610 611 p1->knxt_seq_num = 1; 612 p1->pkbdq = pg_vec; 613 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 614 p1->pkblk_start = pg_vec[0].buffer; 615 p1->kblk_size = req_u->req3.tp_block_size; 616 p1->knum_blocks = req_u->req3.tp_block_nr; 617 p1->hdrlen = po->tp_hdrlen; 618 p1->version = po->tp_version; 619 p1->last_kactive_blk_num = 0; 620 po->stats.stats3.tp_freeze_q_cnt = 0; 621 if (req_u->req3.tp_retire_blk_tov) 622 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; 623 else 624 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, 625 req_u->req3.tp_block_size); 626 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); 627 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 628 629 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 630 prb_init_ft_ops(p1, req_u); 631 prb_setup_retire_blk_timer(po, tx_ring); 632 prb_open_block(p1, pbd); 633 } 634 635 /* Do NOT update the last_blk_num first. 636 * Assumes sk_buff_head lock is held. 637 */ 638 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) 639 { 640 mod_timer(&pkc->retire_blk_timer, 641 jiffies + pkc->tov_in_jiffies); 642 pkc->last_kactive_blk_num = pkc->kactive_blk_num; 643 } 644 645 /* 646 * Timer logic: 647 * 1) We refresh the timer only when we open a block. 648 * By doing this we don't waste cycles refreshing the timer 649 * on packet-by-packet basis. 650 * 651 * With a 1MB block-size, on a 1Gbps line, it will take 652 * i) ~8 ms to fill a block + ii) memcpy etc. 653 * In this cut we are not accounting for the memcpy time. 654 * 655 * So, if the user sets the 'tmo' to 10ms then the timer 656 * will never fire while the block is still getting filled 657 * (which is what we want). However, the user could choose 658 * to close a block early and that's fine. 659 * 660 * But when the timer does fire, we check whether or not to refresh it. 661 * Since the tmo granularity is in msecs, it is not too expensive 662 * to refresh the timer, lets say every '8' msecs. 663 * Either the user can set the 'tmo' or we can derive it based on 664 * a) line-speed and b) block-size. 665 * prb_calc_retire_blk_tmo() calculates the tmo. 666 * 667 */ 668 static void prb_retire_rx_blk_timer_expired(unsigned long data) 669 { 670 struct packet_sock *po = (struct packet_sock *)data; 671 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 672 unsigned int frozen; 673 struct tpacket_block_desc *pbd; 674 675 spin_lock(&po->sk.sk_receive_queue.lock); 676 677 frozen = prb_queue_frozen(pkc); 678 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 679 680 if (unlikely(pkc->delete_blk_timer)) 681 goto out; 682 683 /* We only need to plug the race when the block is partially filled. 684 * tpacket_rcv: 685 * lock(); increment BLOCK_NUM_PKTS; unlock() 686 * copy_bits() is in progress ... 687 * timer fires on other cpu: 688 * we can't retire the current block because copy_bits 689 * is in progress. 690 * 691 */ 692 if (BLOCK_NUM_PKTS(pbd)) { 693 while (atomic_read(&pkc->blk_fill_in_prog)) { 694 /* Waiting for skb_copy_bits to finish... */ 695 cpu_relax(); 696 } 697 } 698 699 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 700 if (!frozen) { 701 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 702 if (!prb_dispatch_next_block(pkc, po)) 703 goto refresh_timer; 704 else 705 goto out; 706 } else { 707 /* Case 1. Queue was frozen because user-space was 708 * lagging behind. 709 */ 710 if (prb_curr_blk_in_use(pkc, pbd)) { 711 /* 712 * Ok, user-space is still behind. 713 * So just refresh the timer. 714 */ 715 goto refresh_timer; 716 } else { 717 /* Case 2. queue was frozen,user-space caught up, 718 * now the link went idle && the timer fired. 719 * We don't have a block to close.So we open this 720 * block and restart the timer. 721 * opening a block thaws the queue,restarts timer 722 * Thawing/timer-refresh is a side effect. 723 */ 724 prb_open_block(pkc, pbd); 725 goto out; 726 } 727 } 728 } 729 730 refresh_timer: 731 _prb_refresh_rx_retire_blk_timer(pkc); 732 733 out: 734 spin_unlock(&po->sk.sk_receive_queue.lock); 735 } 736 737 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 738 struct tpacket_block_desc *pbd1, __u32 status) 739 { 740 /* Flush everything minus the block header */ 741 742 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 743 u8 *start, *end; 744 745 start = (u8 *)pbd1; 746 747 /* Skip the block header(we know header WILL fit in 4K) */ 748 start += PAGE_SIZE; 749 750 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 751 for (; start < end; start += PAGE_SIZE) 752 flush_dcache_page(pgv_to_page(start)); 753 754 smp_wmb(); 755 #endif 756 757 /* Now update the block status. */ 758 759 BLOCK_STATUS(pbd1) = status; 760 761 /* Flush the block header */ 762 763 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 764 start = (u8 *)pbd1; 765 flush_dcache_page(pgv_to_page(start)); 766 767 smp_wmb(); 768 #endif 769 } 770 771 /* 772 * Side effect: 773 * 774 * 1) flush the block 775 * 2) Increment active_blk_num 776 * 777 * Note:We DONT refresh the timer on purpose. 778 * Because almost always the next block will be opened. 779 */ 780 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 781 struct tpacket_block_desc *pbd1, 782 struct packet_sock *po, unsigned int stat) 783 { 784 __u32 status = TP_STATUS_USER | stat; 785 786 struct tpacket3_hdr *last_pkt; 787 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 788 struct sock *sk = &po->sk; 789 790 if (po->stats.stats3.tp_drops) 791 status |= TP_STATUS_LOSING; 792 793 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 794 last_pkt->tp_next_offset = 0; 795 796 /* Get the ts of the last pkt */ 797 if (BLOCK_NUM_PKTS(pbd1)) { 798 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 799 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 800 } else { 801 /* Ok, we tmo'd - so get the current time */ 802 struct timespec ts; 803 getnstimeofday(&ts); 804 h1->ts_last_pkt.ts_sec = ts.tv_sec; 805 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 806 } 807 808 smp_wmb(); 809 810 /* Flush the block */ 811 prb_flush_block(pkc1, pbd1, status); 812 813 sk->sk_data_ready(sk); 814 815 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 816 } 817 818 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 819 { 820 pkc->reset_pending_on_curr_blk = 0; 821 } 822 823 /* 824 * Side effect of opening a block: 825 * 826 * 1) prb_queue is thawed. 827 * 2) retire_blk_timer is refreshed. 828 * 829 */ 830 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 831 struct tpacket_block_desc *pbd1) 832 { 833 struct timespec ts; 834 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 835 836 smp_rmb(); 837 838 /* We could have just memset this but we will lose the 839 * flexibility of making the priv area sticky 840 */ 841 842 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 843 BLOCK_NUM_PKTS(pbd1) = 0; 844 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 845 846 getnstimeofday(&ts); 847 848 h1->ts_first_pkt.ts_sec = ts.tv_sec; 849 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 850 851 pkc1->pkblk_start = (char *)pbd1; 852 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 853 854 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 855 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 856 857 pbd1->version = pkc1->version; 858 pkc1->prev = pkc1->nxt_offset; 859 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 860 861 prb_thaw_queue(pkc1); 862 _prb_refresh_rx_retire_blk_timer(pkc1); 863 864 smp_wmb(); 865 } 866 867 /* 868 * Queue freeze logic: 869 * 1) Assume tp_block_nr = 8 blocks. 870 * 2) At time 't0', user opens Rx ring. 871 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 872 * 4) user-space is either sleeping or processing block '0'. 873 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 874 * it will close block-7,loop around and try to fill block '0'. 875 * call-flow: 876 * __packet_lookup_frame_in_block 877 * prb_retire_current_block() 878 * prb_dispatch_next_block() 879 * |->(BLOCK_STATUS == USER) evaluates to true 880 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 881 * 6) Now there are two cases: 882 * 6.1) Link goes idle right after the queue is frozen. 883 * But remember, the last open_block() refreshed the timer. 884 * When this timer expires,it will refresh itself so that we can 885 * re-open block-0 in near future. 886 * 6.2) Link is busy and keeps on receiving packets. This is a simple 887 * case and __packet_lookup_frame_in_block will check if block-0 888 * is free and can now be re-used. 889 */ 890 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 891 struct packet_sock *po) 892 { 893 pkc->reset_pending_on_curr_blk = 1; 894 po->stats.stats3.tp_freeze_q_cnt++; 895 } 896 897 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 898 899 /* 900 * If the next block is free then we will dispatch it 901 * and return a good offset. 902 * Else, we will freeze the queue. 903 * So, caller must check the return value. 904 */ 905 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 906 struct packet_sock *po) 907 { 908 struct tpacket_block_desc *pbd; 909 910 smp_rmb(); 911 912 /* 1. Get current block num */ 913 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 914 915 /* 2. If this block is currently in_use then freeze the queue */ 916 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 917 prb_freeze_queue(pkc, po); 918 return NULL; 919 } 920 921 /* 922 * 3. 923 * open this block and return the offset where the first packet 924 * needs to get stored. 925 */ 926 prb_open_block(pkc, pbd); 927 return (void *)pkc->nxt_offset; 928 } 929 930 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 931 struct packet_sock *po, unsigned int status) 932 { 933 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 934 935 /* retire/close the current block */ 936 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 937 /* 938 * Plug the case where copy_bits() is in progress on 939 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 940 * have space to copy the pkt in the current block and 941 * called prb_retire_current_block() 942 * 943 * We don't need to worry about the TMO case because 944 * the timer-handler already handled this case. 945 */ 946 if (!(status & TP_STATUS_BLK_TMO)) { 947 while (atomic_read(&pkc->blk_fill_in_prog)) { 948 /* Waiting for skb_copy_bits to finish... */ 949 cpu_relax(); 950 } 951 } 952 prb_close_block(pkc, pbd, po, status); 953 return; 954 } 955 } 956 957 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, 958 struct tpacket_block_desc *pbd) 959 { 960 return TP_STATUS_USER & BLOCK_STATUS(pbd); 961 } 962 963 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 964 { 965 return pkc->reset_pending_on_curr_blk; 966 } 967 968 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 969 { 970 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 971 atomic_dec(&pkc->blk_fill_in_prog); 972 } 973 974 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 975 struct tpacket3_hdr *ppd) 976 { 977 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 978 } 979 980 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 981 struct tpacket3_hdr *ppd) 982 { 983 ppd->hv1.tp_rxhash = 0; 984 } 985 986 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 987 struct tpacket3_hdr *ppd) 988 { 989 if (vlan_tx_tag_present(pkc->skb)) { 990 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb); 991 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 992 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 993 } else { 994 ppd->hv1.tp_vlan_tci = 0; 995 ppd->hv1.tp_vlan_tpid = 0; 996 ppd->tp_status = TP_STATUS_AVAILABLE; 997 } 998 } 999 1000 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 1001 struct tpacket3_hdr *ppd) 1002 { 1003 ppd->hv1.tp_padding = 0; 1004 prb_fill_vlan_info(pkc, ppd); 1005 1006 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 1007 prb_fill_rxhash(pkc, ppd); 1008 else 1009 prb_clear_rxhash(pkc, ppd); 1010 } 1011 1012 static void prb_fill_curr_block(char *curr, 1013 struct tpacket_kbdq_core *pkc, 1014 struct tpacket_block_desc *pbd, 1015 unsigned int len) 1016 { 1017 struct tpacket3_hdr *ppd; 1018 1019 ppd = (struct tpacket3_hdr *)curr; 1020 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 1021 pkc->prev = curr; 1022 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1023 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1024 BLOCK_NUM_PKTS(pbd) += 1; 1025 atomic_inc(&pkc->blk_fill_in_prog); 1026 prb_run_all_ft_ops(pkc, ppd); 1027 } 1028 1029 /* Assumes caller has the sk->rx_queue.lock */ 1030 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1031 struct sk_buff *skb, 1032 int status, 1033 unsigned int len 1034 ) 1035 { 1036 struct tpacket_kbdq_core *pkc; 1037 struct tpacket_block_desc *pbd; 1038 char *curr, *end; 1039 1040 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1041 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1042 1043 /* Queue is frozen when user space is lagging behind */ 1044 if (prb_queue_frozen(pkc)) { 1045 /* 1046 * Check if that last block which caused the queue to freeze, 1047 * is still in_use by user-space. 1048 */ 1049 if (prb_curr_blk_in_use(pkc, pbd)) { 1050 /* Can't record this packet */ 1051 return NULL; 1052 } else { 1053 /* 1054 * Ok, the block was released by user-space. 1055 * Now let's open that block. 1056 * opening a block also thaws the queue. 1057 * Thawing is a side effect. 1058 */ 1059 prb_open_block(pkc, pbd); 1060 } 1061 } 1062 1063 smp_mb(); 1064 curr = pkc->nxt_offset; 1065 pkc->skb = skb; 1066 end = (char *)pbd + pkc->kblk_size; 1067 1068 /* first try the current block */ 1069 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1070 prb_fill_curr_block(curr, pkc, pbd, len); 1071 return (void *)curr; 1072 } 1073 1074 /* Ok, close the current block */ 1075 prb_retire_current_block(pkc, po, 0); 1076 1077 /* Now, try to dispatch the next block */ 1078 curr = (char *)prb_dispatch_next_block(pkc, po); 1079 if (curr) { 1080 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1081 prb_fill_curr_block(curr, pkc, pbd, len); 1082 return (void *)curr; 1083 } 1084 1085 /* 1086 * No free blocks are available.user_space hasn't caught up yet. 1087 * Queue was just frozen and now this packet will get dropped. 1088 */ 1089 return NULL; 1090 } 1091 1092 static void *packet_current_rx_frame(struct packet_sock *po, 1093 struct sk_buff *skb, 1094 int status, unsigned int len) 1095 { 1096 char *curr = NULL; 1097 switch (po->tp_version) { 1098 case TPACKET_V1: 1099 case TPACKET_V2: 1100 curr = packet_lookup_frame(po, &po->rx_ring, 1101 po->rx_ring.head, status); 1102 return curr; 1103 case TPACKET_V3: 1104 return __packet_lookup_frame_in_block(po, skb, status, len); 1105 default: 1106 WARN(1, "TPACKET version not supported\n"); 1107 BUG(); 1108 return NULL; 1109 } 1110 } 1111 1112 static void *prb_lookup_block(struct packet_sock *po, 1113 struct packet_ring_buffer *rb, 1114 unsigned int idx, 1115 int status) 1116 { 1117 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1118 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1119 1120 if (status != BLOCK_STATUS(pbd)) 1121 return NULL; 1122 return pbd; 1123 } 1124 1125 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1126 { 1127 unsigned int prev; 1128 if (rb->prb_bdqc.kactive_blk_num) 1129 prev = rb->prb_bdqc.kactive_blk_num-1; 1130 else 1131 prev = rb->prb_bdqc.knum_blocks-1; 1132 return prev; 1133 } 1134 1135 /* Assumes caller has held the rx_queue.lock */ 1136 static void *__prb_previous_block(struct packet_sock *po, 1137 struct packet_ring_buffer *rb, 1138 int status) 1139 { 1140 unsigned int previous = prb_previous_blk_num(rb); 1141 return prb_lookup_block(po, rb, previous, status); 1142 } 1143 1144 static void *packet_previous_rx_frame(struct packet_sock *po, 1145 struct packet_ring_buffer *rb, 1146 int status) 1147 { 1148 if (po->tp_version <= TPACKET_V2) 1149 return packet_previous_frame(po, rb, status); 1150 1151 return __prb_previous_block(po, rb, status); 1152 } 1153 1154 static void packet_increment_rx_head(struct packet_sock *po, 1155 struct packet_ring_buffer *rb) 1156 { 1157 switch (po->tp_version) { 1158 case TPACKET_V1: 1159 case TPACKET_V2: 1160 return packet_increment_head(rb); 1161 case TPACKET_V3: 1162 default: 1163 WARN(1, "TPACKET version not supported.\n"); 1164 BUG(); 1165 return; 1166 } 1167 } 1168 1169 static void *packet_previous_frame(struct packet_sock *po, 1170 struct packet_ring_buffer *rb, 1171 int status) 1172 { 1173 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1174 return packet_lookup_frame(po, rb, previous, status); 1175 } 1176 1177 static void packet_increment_head(struct packet_ring_buffer *buff) 1178 { 1179 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1180 } 1181 1182 static void packet_inc_pending(struct packet_ring_buffer *rb) 1183 { 1184 this_cpu_inc(*rb->pending_refcnt); 1185 } 1186 1187 static void packet_dec_pending(struct packet_ring_buffer *rb) 1188 { 1189 this_cpu_dec(*rb->pending_refcnt); 1190 } 1191 1192 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1193 { 1194 unsigned int refcnt = 0; 1195 int cpu; 1196 1197 /* We don't use pending refcount in rx_ring. */ 1198 if (rb->pending_refcnt == NULL) 1199 return 0; 1200 1201 for_each_possible_cpu(cpu) 1202 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1203 1204 return refcnt; 1205 } 1206 1207 static int packet_alloc_pending(struct packet_sock *po) 1208 { 1209 po->rx_ring.pending_refcnt = NULL; 1210 1211 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1212 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1213 return -ENOBUFS; 1214 1215 return 0; 1216 } 1217 1218 static void packet_free_pending(struct packet_sock *po) 1219 { 1220 free_percpu(po->tx_ring.pending_refcnt); 1221 } 1222 1223 static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1224 { 1225 struct sock *sk = &po->sk; 1226 bool has_room; 1227 1228 if (po->prot_hook.func != tpacket_rcv) 1229 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize) 1230 <= sk->sk_rcvbuf; 1231 1232 spin_lock(&sk->sk_receive_queue.lock); 1233 if (po->tp_version == TPACKET_V3) 1234 has_room = prb_lookup_block(po, &po->rx_ring, 1235 po->rx_ring.prb_bdqc.kactive_blk_num, 1236 TP_STATUS_KERNEL); 1237 else 1238 has_room = packet_lookup_frame(po, &po->rx_ring, 1239 po->rx_ring.head, 1240 TP_STATUS_KERNEL); 1241 spin_unlock(&sk->sk_receive_queue.lock); 1242 1243 return has_room; 1244 } 1245 1246 static void packet_sock_destruct(struct sock *sk) 1247 { 1248 skb_queue_purge(&sk->sk_error_queue); 1249 1250 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1251 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 1252 1253 if (!sock_flag(sk, SOCK_DEAD)) { 1254 pr_err("Attempt to release alive packet socket: %p\n", sk); 1255 return; 1256 } 1257 1258 sk_refcnt_debug_dec(sk); 1259 } 1260 1261 static int fanout_rr_next(struct packet_fanout *f, unsigned int num) 1262 { 1263 int x = atomic_read(&f->rr_cur) + 1; 1264 1265 if (x >= num) 1266 x = 0; 1267 1268 return x; 1269 } 1270 1271 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1272 struct sk_buff *skb, 1273 unsigned int num) 1274 { 1275 return reciprocal_scale(skb_get_hash(skb), num); 1276 } 1277 1278 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1279 struct sk_buff *skb, 1280 unsigned int num) 1281 { 1282 int cur, old; 1283 1284 cur = atomic_read(&f->rr_cur); 1285 while ((old = atomic_cmpxchg(&f->rr_cur, cur, 1286 fanout_rr_next(f, num))) != cur) 1287 cur = old; 1288 return cur; 1289 } 1290 1291 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1292 struct sk_buff *skb, 1293 unsigned int num) 1294 { 1295 return smp_processor_id() % num; 1296 } 1297 1298 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1299 struct sk_buff *skb, 1300 unsigned int num) 1301 { 1302 return prandom_u32_max(num); 1303 } 1304 1305 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1306 struct sk_buff *skb, 1307 unsigned int idx, unsigned int skip, 1308 unsigned int num) 1309 { 1310 unsigned int i, j; 1311 1312 i = j = min_t(int, f->next[idx], num - 1); 1313 do { 1314 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) { 1315 if (i != j) 1316 f->next[idx] = i; 1317 return i; 1318 } 1319 if (++i == num) 1320 i = 0; 1321 } while (i != j); 1322 1323 return idx; 1324 } 1325 1326 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1327 struct sk_buff *skb, 1328 unsigned int num) 1329 { 1330 return skb_get_queue_mapping(skb) % num; 1331 } 1332 1333 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1334 { 1335 return f->flags & (flag >> 8); 1336 } 1337 1338 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1339 struct packet_type *pt, struct net_device *orig_dev) 1340 { 1341 struct packet_fanout *f = pt->af_packet_priv; 1342 unsigned int num = f->num_members; 1343 struct packet_sock *po; 1344 unsigned int idx; 1345 1346 if (!net_eq(dev_net(dev), read_pnet(&f->net)) || 1347 !num) { 1348 kfree_skb(skb); 1349 return 0; 1350 } 1351 1352 switch (f->type) { 1353 case PACKET_FANOUT_HASH: 1354 default: 1355 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1356 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); 1357 if (!skb) 1358 return 0; 1359 } 1360 idx = fanout_demux_hash(f, skb, num); 1361 break; 1362 case PACKET_FANOUT_LB: 1363 idx = fanout_demux_lb(f, skb, num); 1364 break; 1365 case PACKET_FANOUT_CPU: 1366 idx = fanout_demux_cpu(f, skb, num); 1367 break; 1368 case PACKET_FANOUT_RND: 1369 idx = fanout_demux_rnd(f, skb, num); 1370 break; 1371 case PACKET_FANOUT_QM: 1372 idx = fanout_demux_qm(f, skb, num); 1373 break; 1374 case PACKET_FANOUT_ROLLOVER: 1375 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num); 1376 break; 1377 } 1378 1379 po = pkt_sk(f->arr[idx]); 1380 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) && 1381 unlikely(!packet_rcv_has_room(po, skb))) { 1382 idx = fanout_demux_rollover(f, skb, idx, idx, num); 1383 po = pkt_sk(f->arr[idx]); 1384 } 1385 1386 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1387 } 1388 1389 DEFINE_MUTEX(fanout_mutex); 1390 EXPORT_SYMBOL_GPL(fanout_mutex); 1391 static LIST_HEAD(fanout_list); 1392 1393 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1394 { 1395 struct packet_fanout *f = po->fanout; 1396 1397 spin_lock(&f->lock); 1398 f->arr[f->num_members] = sk; 1399 smp_wmb(); 1400 f->num_members++; 1401 spin_unlock(&f->lock); 1402 } 1403 1404 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1405 { 1406 struct packet_fanout *f = po->fanout; 1407 int i; 1408 1409 spin_lock(&f->lock); 1410 for (i = 0; i < f->num_members; i++) { 1411 if (f->arr[i] == sk) 1412 break; 1413 } 1414 BUG_ON(i >= f->num_members); 1415 f->arr[i] = f->arr[f->num_members - 1]; 1416 f->num_members--; 1417 spin_unlock(&f->lock); 1418 } 1419 1420 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1421 { 1422 if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout) 1423 return true; 1424 1425 return false; 1426 } 1427 1428 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1429 { 1430 struct packet_sock *po = pkt_sk(sk); 1431 struct packet_fanout *f, *match; 1432 u8 type = type_flags & 0xff; 1433 u8 flags = type_flags >> 8; 1434 int err; 1435 1436 switch (type) { 1437 case PACKET_FANOUT_ROLLOVER: 1438 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1439 return -EINVAL; 1440 case PACKET_FANOUT_HASH: 1441 case PACKET_FANOUT_LB: 1442 case PACKET_FANOUT_CPU: 1443 case PACKET_FANOUT_RND: 1444 case PACKET_FANOUT_QM: 1445 break; 1446 default: 1447 return -EINVAL; 1448 } 1449 1450 if (!po->running) 1451 return -EINVAL; 1452 1453 if (po->fanout) 1454 return -EALREADY; 1455 1456 mutex_lock(&fanout_mutex); 1457 match = NULL; 1458 list_for_each_entry(f, &fanout_list, list) { 1459 if (f->id == id && 1460 read_pnet(&f->net) == sock_net(sk)) { 1461 match = f; 1462 break; 1463 } 1464 } 1465 err = -EINVAL; 1466 if (match && match->flags != flags) 1467 goto out; 1468 if (!match) { 1469 err = -ENOMEM; 1470 match = kzalloc(sizeof(*match), GFP_KERNEL); 1471 if (!match) 1472 goto out; 1473 write_pnet(&match->net, sock_net(sk)); 1474 match->id = id; 1475 match->type = type; 1476 match->flags = flags; 1477 atomic_set(&match->rr_cur, 0); 1478 INIT_LIST_HEAD(&match->list); 1479 spin_lock_init(&match->lock); 1480 atomic_set(&match->sk_ref, 0); 1481 match->prot_hook.type = po->prot_hook.type; 1482 match->prot_hook.dev = po->prot_hook.dev; 1483 match->prot_hook.func = packet_rcv_fanout; 1484 match->prot_hook.af_packet_priv = match; 1485 match->prot_hook.id_match = match_fanout_group; 1486 dev_add_pack(&match->prot_hook); 1487 list_add(&match->list, &fanout_list); 1488 } 1489 err = -EINVAL; 1490 if (match->type == type && 1491 match->prot_hook.type == po->prot_hook.type && 1492 match->prot_hook.dev == po->prot_hook.dev) { 1493 err = -ENOSPC; 1494 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 1495 __dev_remove_pack(&po->prot_hook); 1496 po->fanout = match; 1497 atomic_inc(&match->sk_ref); 1498 __fanout_link(sk, po); 1499 err = 0; 1500 } 1501 } 1502 out: 1503 mutex_unlock(&fanout_mutex); 1504 return err; 1505 } 1506 1507 static void fanout_release(struct sock *sk) 1508 { 1509 struct packet_sock *po = pkt_sk(sk); 1510 struct packet_fanout *f; 1511 1512 f = po->fanout; 1513 if (!f) 1514 return; 1515 1516 mutex_lock(&fanout_mutex); 1517 po->fanout = NULL; 1518 1519 if (atomic_dec_and_test(&f->sk_ref)) { 1520 list_del(&f->list); 1521 dev_remove_pack(&f->prot_hook); 1522 kfree(f); 1523 } 1524 mutex_unlock(&fanout_mutex); 1525 } 1526 1527 static const struct proto_ops packet_ops; 1528 1529 static const struct proto_ops packet_ops_spkt; 1530 1531 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1532 struct packet_type *pt, struct net_device *orig_dev) 1533 { 1534 struct sock *sk; 1535 struct sockaddr_pkt *spkt; 1536 1537 /* 1538 * When we registered the protocol we saved the socket in the data 1539 * field for just this event. 1540 */ 1541 1542 sk = pt->af_packet_priv; 1543 1544 /* 1545 * Yank back the headers [hope the device set this 1546 * right or kerboom...] 1547 * 1548 * Incoming packets have ll header pulled, 1549 * push it back. 1550 * 1551 * For outgoing ones skb->data == skb_mac_header(skb) 1552 * so that this procedure is noop. 1553 */ 1554 1555 if (skb->pkt_type == PACKET_LOOPBACK) 1556 goto out; 1557 1558 if (!net_eq(dev_net(dev), sock_net(sk))) 1559 goto out; 1560 1561 skb = skb_share_check(skb, GFP_ATOMIC); 1562 if (skb == NULL) 1563 goto oom; 1564 1565 /* drop any routing info */ 1566 skb_dst_drop(skb); 1567 1568 /* drop conntrack reference */ 1569 nf_reset(skb); 1570 1571 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1572 1573 skb_push(skb, skb->data - skb_mac_header(skb)); 1574 1575 /* 1576 * The SOCK_PACKET socket receives _all_ frames. 1577 */ 1578 1579 spkt->spkt_family = dev->type; 1580 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1581 spkt->spkt_protocol = skb->protocol; 1582 1583 /* 1584 * Charge the memory to the socket. This is done specifically 1585 * to prevent sockets using all the memory up. 1586 */ 1587 1588 if (sock_queue_rcv_skb(sk, skb) == 0) 1589 return 0; 1590 1591 out: 1592 kfree_skb(skb); 1593 oom: 1594 return 0; 1595 } 1596 1597 1598 /* 1599 * Output a raw packet to a device layer. This bypasses all the other 1600 * protocol layers and you must therefore supply it with a complete frame 1601 */ 1602 1603 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, 1604 struct msghdr *msg, size_t len) 1605 { 1606 struct sock *sk = sock->sk; 1607 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1608 struct sk_buff *skb = NULL; 1609 struct net_device *dev; 1610 __be16 proto = 0; 1611 int err; 1612 int extra_len = 0; 1613 1614 /* 1615 * Get and verify the address. 1616 */ 1617 1618 if (saddr) { 1619 if (msg->msg_namelen < sizeof(struct sockaddr)) 1620 return -EINVAL; 1621 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1622 proto = saddr->spkt_protocol; 1623 } else 1624 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1625 1626 /* 1627 * Find the device first to size check it 1628 */ 1629 1630 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1631 retry: 1632 rcu_read_lock(); 1633 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1634 err = -ENODEV; 1635 if (dev == NULL) 1636 goto out_unlock; 1637 1638 err = -ENETDOWN; 1639 if (!(dev->flags & IFF_UP)) 1640 goto out_unlock; 1641 1642 /* 1643 * You may not queue a frame bigger than the mtu. This is the lowest level 1644 * raw protocol and you must do your own fragmentation at this level. 1645 */ 1646 1647 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1648 if (!netif_supports_nofcs(dev)) { 1649 err = -EPROTONOSUPPORT; 1650 goto out_unlock; 1651 } 1652 extra_len = 4; /* We're doing our own CRC */ 1653 } 1654 1655 err = -EMSGSIZE; 1656 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 1657 goto out_unlock; 1658 1659 if (!skb) { 1660 size_t reserved = LL_RESERVED_SPACE(dev); 1661 int tlen = dev->needed_tailroom; 1662 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 1663 1664 rcu_read_unlock(); 1665 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 1666 if (skb == NULL) 1667 return -ENOBUFS; 1668 /* FIXME: Save some space for broken drivers that write a hard 1669 * header at transmission time by themselves. PPP is the notable 1670 * one here. This should really be fixed at the driver level. 1671 */ 1672 skb_reserve(skb, reserved); 1673 skb_reset_network_header(skb); 1674 1675 /* Try to align data part correctly */ 1676 if (hhlen) { 1677 skb->data -= hhlen; 1678 skb->tail -= hhlen; 1679 if (len < hhlen) 1680 skb_reset_network_header(skb); 1681 } 1682 err = memcpy_from_msg(skb_put(skb, len), msg, len); 1683 if (err) 1684 goto out_free; 1685 goto retry; 1686 } 1687 1688 if (len > (dev->mtu + dev->hard_header_len + extra_len)) { 1689 /* Earlier code assumed this would be a VLAN pkt, 1690 * double-check this now that we have the actual 1691 * packet in hand. 1692 */ 1693 struct ethhdr *ehdr; 1694 skb_reset_mac_header(skb); 1695 ehdr = eth_hdr(skb); 1696 if (ehdr->h_proto != htons(ETH_P_8021Q)) { 1697 err = -EMSGSIZE; 1698 goto out_unlock; 1699 } 1700 } 1701 1702 skb->protocol = proto; 1703 skb->dev = dev; 1704 skb->priority = sk->sk_priority; 1705 skb->mark = sk->sk_mark; 1706 1707 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); 1708 1709 if (unlikely(extra_len == 4)) 1710 skb->no_fcs = 1; 1711 1712 skb_probe_transport_header(skb, 0); 1713 1714 dev_queue_xmit(skb); 1715 rcu_read_unlock(); 1716 return len; 1717 1718 out_unlock: 1719 rcu_read_unlock(); 1720 out_free: 1721 kfree_skb(skb); 1722 return err; 1723 } 1724 1725 static unsigned int run_filter(const struct sk_buff *skb, 1726 const struct sock *sk, 1727 unsigned int res) 1728 { 1729 struct sk_filter *filter; 1730 1731 rcu_read_lock(); 1732 filter = rcu_dereference(sk->sk_filter); 1733 if (filter != NULL) 1734 res = SK_RUN_FILTER(filter, skb); 1735 rcu_read_unlock(); 1736 1737 return res; 1738 } 1739 1740 /* 1741 * This function makes lazy skb cloning in hope that most of packets 1742 * are discarded by BPF. 1743 * 1744 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 1745 * and skb->cb are mangled. It works because (and until) packets 1746 * falling here are owned by current CPU. Output packets are cloned 1747 * by dev_queue_xmit_nit(), input packets are processed by net_bh 1748 * sequencially, so that if we return skb to original state on exit, 1749 * we will not harm anyone. 1750 */ 1751 1752 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 1753 struct packet_type *pt, struct net_device *orig_dev) 1754 { 1755 struct sock *sk; 1756 struct sockaddr_ll *sll; 1757 struct packet_sock *po; 1758 u8 *skb_head = skb->data; 1759 int skb_len = skb->len; 1760 unsigned int snaplen, res; 1761 1762 if (skb->pkt_type == PACKET_LOOPBACK) 1763 goto drop; 1764 1765 sk = pt->af_packet_priv; 1766 po = pkt_sk(sk); 1767 1768 if (!net_eq(dev_net(dev), sock_net(sk))) 1769 goto drop; 1770 1771 skb->dev = dev; 1772 1773 if (dev->header_ops) { 1774 /* The device has an explicit notion of ll header, 1775 * exported to higher levels. 1776 * 1777 * Otherwise, the device hides details of its frame 1778 * structure, so that corresponding packet head is 1779 * never delivered to user. 1780 */ 1781 if (sk->sk_type != SOCK_DGRAM) 1782 skb_push(skb, skb->data - skb_mac_header(skb)); 1783 else if (skb->pkt_type == PACKET_OUTGOING) { 1784 /* Special case: outgoing packets have ll header at head */ 1785 skb_pull(skb, skb_network_offset(skb)); 1786 } 1787 } 1788 1789 snaplen = skb->len; 1790 1791 res = run_filter(skb, sk, snaplen); 1792 if (!res) 1793 goto drop_n_restore; 1794 if (snaplen > res) 1795 snaplen = res; 1796 1797 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 1798 goto drop_n_acct; 1799 1800 if (skb_shared(skb)) { 1801 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1802 if (nskb == NULL) 1803 goto drop_n_acct; 1804 1805 if (skb_head != skb->data) { 1806 skb->data = skb_head; 1807 skb->len = skb_len; 1808 } 1809 consume_skb(skb); 1810 skb = nskb; 1811 } 1812 1813 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 > 1814 sizeof(skb->cb)); 1815 1816 sll = &PACKET_SKB_CB(skb)->sa.ll; 1817 sll->sll_family = AF_PACKET; 1818 sll->sll_hatype = dev->type; 1819 sll->sll_protocol = skb->protocol; 1820 sll->sll_pkttype = skb->pkt_type; 1821 if (unlikely(po->origdev)) 1822 sll->sll_ifindex = orig_dev->ifindex; 1823 else 1824 sll->sll_ifindex = dev->ifindex; 1825 1826 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 1827 1828 PACKET_SKB_CB(skb)->origlen = skb->len; 1829 1830 if (pskb_trim(skb, snaplen)) 1831 goto drop_n_acct; 1832 1833 skb_set_owner_r(skb, sk); 1834 skb->dev = NULL; 1835 skb_dst_drop(skb); 1836 1837 /* drop conntrack reference */ 1838 nf_reset(skb); 1839 1840 spin_lock(&sk->sk_receive_queue.lock); 1841 po->stats.stats1.tp_packets++; 1842 skb->dropcount = atomic_read(&sk->sk_drops); 1843 __skb_queue_tail(&sk->sk_receive_queue, skb); 1844 spin_unlock(&sk->sk_receive_queue.lock); 1845 sk->sk_data_ready(sk); 1846 return 0; 1847 1848 drop_n_acct: 1849 spin_lock(&sk->sk_receive_queue.lock); 1850 po->stats.stats1.tp_drops++; 1851 atomic_inc(&sk->sk_drops); 1852 spin_unlock(&sk->sk_receive_queue.lock); 1853 1854 drop_n_restore: 1855 if (skb_head != skb->data && skb_shared(skb)) { 1856 skb->data = skb_head; 1857 skb->len = skb_len; 1858 } 1859 drop: 1860 consume_skb(skb); 1861 return 0; 1862 } 1863 1864 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 1865 struct packet_type *pt, struct net_device *orig_dev) 1866 { 1867 struct sock *sk; 1868 struct packet_sock *po; 1869 struct sockaddr_ll *sll; 1870 union tpacket_uhdr h; 1871 u8 *skb_head = skb->data; 1872 int skb_len = skb->len; 1873 unsigned int snaplen, res; 1874 unsigned long status = TP_STATUS_USER; 1875 unsigned short macoff, netoff, hdrlen; 1876 struct sk_buff *copy_skb = NULL; 1877 struct timespec ts; 1878 __u32 ts_status; 1879 1880 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 1881 * We may add members to them until current aligned size without forcing 1882 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 1883 */ 1884 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 1885 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 1886 1887 if (skb->pkt_type == PACKET_LOOPBACK) 1888 goto drop; 1889 1890 sk = pt->af_packet_priv; 1891 po = pkt_sk(sk); 1892 1893 if (!net_eq(dev_net(dev), sock_net(sk))) 1894 goto drop; 1895 1896 if (dev->header_ops) { 1897 if (sk->sk_type != SOCK_DGRAM) 1898 skb_push(skb, skb->data - skb_mac_header(skb)); 1899 else if (skb->pkt_type == PACKET_OUTGOING) { 1900 /* Special case: outgoing packets have ll header at head */ 1901 skb_pull(skb, skb_network_offset(skb)); 1902 } 1903 } 1904 1905 if (skb->ip_summed == CHECKSUM_PARTIAL) 1906 status |= TP_STATUS_CSUMNOTREADY; 1907 1908 snaplen = skb->len; 1909 1910 res = run_filter(skb, sk, snaplen); 1911 if (!res) 1912 goto drop_n_restore; 1913 if (snaplen > res) 1914 snaplen = res; 1915 1916 if (sk->sk_type == SOCK_DGRAM) { 1917 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 1918 po->tp_reserve; 1919 } else { 1920 unsigned int maclen = skb_network_offset(skb); 1921 netoff = TPACKET_ALIGN(po->tp_hdrlen + 1922 (maclen < 16 ? 16 : maclen)) + 1923 po->tp_reserve; 1924 macoff = netoff - maclen; 1925 } 1926 if (po->tp_version <= TPACKET_V2) { 1927 if (macoff + snaplen > po->rx_ring.frame_size) { 1928 if (po->copy_thresh && 1929 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1930 if (skb_shared(skb)) { 1931 copy_skb = skb_clone(skb, GFP_ATOMIC); 1932 } else { 1933 copy_skb = skb_get(skb); 1934 skb_head = skb->data; 1935 } 1936 if (copy_skb) 1937 skb_set_owner_r(copy_skb, sk); 1938 } 1939 snaplen = po->rx_ring.frame_size - macoff; 1940 if ((int)snaplen < 0) 1941 snaplen = 0; 1942 } 1943 } else if (unlikely(macoff + snaplen > 1944 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 1945 u32 nval; 1946 1947 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 1948 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 1949 snaplen, nval, macoff); 1950 snaplen = nval; 1951 if (unlikely((int)snaplen < 0)) { 1952 snaplen = 0; 1953 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 1954 } 1955 } 1956 spin_lock(&sk->sk_receive_queue.lock); 1957 h.raw = packet_current_rx_frame(po, skb, 1958 TP_STATUS_KERNEL, (macoff+snaplen)); 1959 if (!h.raw) 1960 goto ring_is_full; 1961 if (po->tp_version <= TPACKET_V2) { 1962 packet_increment_rx_head(po, &po->rx_ring); 1963 /* 1964 * LOSING will be reported till you read the stats, 1965 * because it's COR - Clear On Read. 1966 * Anyways, moving it for V1/V2 only as V3 doesn't need this 1967 * at packet level. 1968 */ 1969 if (po->stats.stats1.tp_drops) 1970 status |= TP_STATUS_LOSING; 1971 } 1972 po->stats.stats1.tp_packets++; 1973 if (copy_skb) { 1974 status |= TP_STATUS_COPY; 1975 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 1976 } 1977 spin_unlock(&sk->sk_receive_queue.lock); 1978 1979 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 1980 1981 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 1982 getnstimeofday(&ts); 1983 1984 status |= ts_status; 1985 1986 switch (po->tp_version) { 1987 case TPACKET_V1: 1988 h.h1->tp_len = skb->len; 1989 h.h1->tp_snaplen = snaplen; 1990 h.h1->tp_mac = macoff; 1991 h.h1->tp_net = netoff; 1992 h.h1->tp_sec = ts.tv_sec; 1993 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 1994 hdrlen = sizeof(*h.h1); 1995 break; 1996 case TPACKET_V2: 1997 h.h2->tp_len = skb->len; 1998 h.h2->tp_snaplen = snaplen; 1999 h.h2->tp_mac = macoff; 2000 h.h2->tp_net = netoff; 2001 h.h2->tp_sec = ts.tv_sec; 2002 h.h2->tp_nsec = ts.tv_nsec; 2003 if (vlan_tx_tag_present(skb)) { 2004 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); 2005 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2006 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2007 } else { 2008 h.h2->tp_vlan_tci = 0; 2009 h.h2->tp_vlan_tpid = 0; 2010 } 2011 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2012 hdrlen = sizeof(*h.h2); 2013 break; 2014 case TPACKET_V3: 2015 /* tp_nxt_offset,vlan are already populated above. 2016 * So DONT clear those fields here 2017 */ 2018 h.h3->tp_status |= status; 2019 h.h3->tp_len = skb->len; 2020 h.h3->tp_snaplen = snaplen; 2021 h.h3->tp_mac = macoff; 2022 h.h3->tp_net = netoff; 2023 h.h3->tp_sec = ts.tv_sec; 2024 h.h3->tp_nsec = ts.tv_nsec; 2025 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2026 hdrlen = sizeof(*h.h3); 2027 break; 2028 default: 2029 BUG(); 2030 } 2031 2032 sll = h.raw + TPACKET_ALIGN(hdrlen); 2033 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2034 sll->sll_family = AF_PACKET; 2035 sll->sll_hatype = dev->type; 2036 sll->sll_protocol = skb->protocol; 2037 sll->sll_pkttype = skb->pkt_type; 2038 if (unlikely(po->origdev)) 2039 sll->sll_ifindex = orig_dev->ifindex; 2040 else 2041 sll->sll_ifindex = dev->ifindex; 2042 2043 smp_mb(); 2044 2045 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2046 if (po->tp_version <= TPACKET_V2) { 2047 u8 *start, *end; 2048 2049 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2050 macoff + snaplen); 2051 2052 for (start = h.raw; start < end; start += PAGE_SIZE) 2053 flush_dcache_page(pgv_to_page(start)); 2054 } 2055 smp_wmb(); 2056 #endif 2057 2058 if (po->tp_version <= TPACKET_V2) { 2059 __packet_set_status(po, h.raw, status); 2060 sk->sk_data_ready(sk); 2061 } else { 2062 prb_clear_blk_fill_status(&po->rx_ring); 2063 } 2064 2065 drop_n_restore: 2066 if (skb_head != skb->data && skb_shared(skb)) { 2067 skb->data = skb_head; 2068 skb->len = skb_len; 2069 } 2070 drop: 2071 kfree_skb(skb); 2072 return 0; 2073 2074 ring_is_full: 2075 po->stats.stats1.tp_drops++; 2076 spin_unlock(&sk->sk_receive_queue.lock); 2077 2078 sk->sk_data_ready(sk); 2079 kfree_skb(copy_skb); 2080 goto drop_n_restore; 2081 } 2082 2083 static void tpacket_destruct_skb(struct sk_buff *skb) 2084 { 2085 struct packet_sock *po = pkt_sk(skb->sk); 2086 2087 if (likely(po->tx_ring.pg_vec)) { 2088 void *ph; 2089 __u32 ts; 2090 2091 ph = skb_shinfo(skb)->destructor_arg; 2092 packet_dec_pending(&po->tx_ring); 2093 2094 ts = __packet_set_timestamp(po, ph, skb); 2095 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2096 } 2097 2098 sock_wfree(skb); 2099 } 2100 2101 static bool ll_header_truncated(const struct net_device *dev, int len) 2102 { 2103 /* net device doesn't like empty head */ 2104 if (unlikely(len <= dev->hard_header_len)) { 2105 net_warn_ratelimited("%s: packet size is too short (%d < %d)\n", 2106 current->comm, len, dev->hard_header_len); 2107 return true; 2108 } 2109 2110 return false; 2111 } 2112 2113 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2114 void *frame, struct net_device *dev, int size_max, 2115 __be16 proto, unsigned char *addr, int hlen) 2116 { 2117 union tpacket_uhdr ph; 2118 int to_write, offset, len, tp_len, nr_frags, len_max; 2119 struct socket *sock = po->sk.sk_socket; 2120 struct page *page; 2121 void *data; 2122 int err; 2123 2124 ph.raw = frame; 2125 2126 skb->protocol = proto; 2127 skb->dev = dev; 2128 skb->priority = po->sk.sk_priority; 2129 skb->mark = po->sk.sk_mark; 2130 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags); 2131 skb_shinfo(skb)->destructor_arg = ph.raw; 2132 2133 switch (po->tp_version) { 2134 case TPACKET_V2: 2135 tp_len = ph.h2->tp_len; 2136 break; 2137 default: 2138 tp_len = ph.h1->tp_len; 2139 break; 2140 } 2141 if (unlikely(tp_len > size_max)) { 2142 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2143 return -EMSGSIZE; 2144 } 2145 2146 skb_reserve(skb, hlen); 2147 skb_reset_network_header(skb); 2148 2149 if (!packet_use_direct_xmit(po)) 2150 skb_probe_transport_header(skb, 0); 2151 if (unlikely(po->tp_tx_has_off)) { 2152 int off_min, off_max, off; 2153 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2154 off_max = po->tx_ring.frame_size - tp_len; 2155 if (sock->type == SOCK_DGRAM) { 2156 switch (po->tp_version) { 2157 case TPACKET_V2: 2158 off = ph.h2->tp_net; 2159 break; 2160 default: 2161 off = ph.h1->tp_net; 2162 break; 2163 } 2164 } else { 2165 switch (po->tp_version) { 2166 case TPACKET_V2: 2167 off = ph.h2->tp_mac; 2168 break; 2169 default: 2170 off = ph.h1->tp_mac; 2171 break; 2172 } 2173 } 2174 if (unlikely((off < off_min) || (off_max < off))) 2175 return -EINVAL; 2176 data = ph.raw + off; 2177 } else { 2178 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); 2179 } 2180 to_write = tp_len; 2181 2182 if (sock->type == SOCK_DGRAM) { 2183 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2184 NULL, tp_len); 2185 if (unlikely(err < 0)) 2186 return -EINVAL; 2187 } else if (dev->hard_header_len) { 2188 if (ll_header_truncated(dev, tp_len)) 2189 return -EINVAL; 2190 2191 skb_push(skb, dev->hard_header_len); 2192 err = skb_store_bits(skb, 0, data, 2193 dev->hard_header_len); 2194 if (unlikely(err)) 2195 return err; 2196 2197 data += dev->hard_header_len; 2198 to_write -= dev->hard_header_len; 2199 } 2200 2201 offset = offset_in_page(data); 2202 len_max = PAGE_SIZE - offset; 2203 len = ((to_write > len_max) ? len_max : to_write); 2204 2205 skb->data_len = to_write; 2206 skb->len += to_write; 2207 skb->truesize += to_write; 2208 atomic_add(to_write, &po->sk.sk_wmem_alloc); 2209 2210 while (likely(to_write)) { 2211 nr_frags = skb_shinfo(skb)->nr_frags; 2212 2213 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2214 pr_err("Packet exceed the number of skb frags(%lu)\n", 2215 MAX_SKB_FRAGS); 2216 return -EFAULT; 2217 } 2218 2219 page = pgv_to_page(data); 2220 data += len; 2221 flush_dcache_page(page); 2222 get_page(page); 2223 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2224 to_write -= len; 2225 offset = 0; 2226 len_max = PAGE_SIZE; 2227 len = ((to_write > len_max) ? len_max : to_write); 2228 } 2229 2230 return tp_len; 2231 } 2232 2233 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2234 { 2235 struct sk_buff *skb; 2236 struct net_device *dev; 2237 __be16 proto; 2238 int err, reserve = 0; 2239 void *ph; 2240 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2241 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2242 int tp_len, size_max; 2243 unsigned char *addr; 2244 int len_sum = 0; 2245 int status = TP_STATUS_AVAILABLE; 2246 int hlen, tlen; 2247 2248 mutex_lock(&po->pg_vec_lock); 2249 2250 if (likely(saddr == NULL)) { 2251 dev = packet_cached_dev_get(po); 2252 proto = po->num; 2253 addr = NULL; 2254 } else { 2255 err = -EINVAL; 2256 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2257 goto out; 2258 if (msg->msg_namelen < (saddr->sll_halen 2259 + offsetof(struct sockaddr_ll, 2260 sll_addr))) 2261 goto out; 2262 proto = saddr->sll_protocol; 2263 addr = saddr->sll_addr; 2264 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2265 } 2266 2267 err = -ENXIO; 2268 if (unlikely(dev == NULL)) 2269 goto out; 2270 err = -ENETDOWN; 2271 if (unlikely(!(dev->flags & IFF_UP))) 2272 goto out_put; 2273 2274 reserve = dev->hard_header_len + VLAN_HLEN; 2275 size_max = po->tx_ring.frame_size 2276 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2277 2278 if (size_max > dev->mtu + reserve) 2279 size_max = dev->mtu + reserve; 2280 2281 do { 2282 ph = packet_current_frame(po, &po->tx_ring, 2283 TP_STATUS_SEND_REQUEST); 2284 if (unlikely(ph == NULL)) { 2285 if (need_wait && need_resched()) 2286 schedule(); 2287 continue; 2288 } 2289 2290 status = TP_STATUS_SEND_REQUEST; 2291 hlen = LL_RESERVED_SPACE(dev); 2292 tlen = dev->needed_tailroom; 2293 skb = sock_alloc_send_skb(&po->sk, 2294 hlen + tlen + sizeof(struct sockaddr_ll), 2295 0, &err); 2296 2297 if (unlikely(skb == NULL)) 2298 goto out_status; 2299 2300 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2301 addr, hlen); 2302 if (tp_len > dev->mtu + dev->hard_header_len) { 2303 struct ethhdr *ehdr; 2304 /* Earlier code assumed this would be a VLAN pkt, 2305 * double-check this now that we have the actual 2306 * packet in hand. 2307 */ 2308 2309 skb_reset_mac_header(skb); 2310 ehdr = eth_hdr(skb); 2311 if (ehdr->h_proto != htons(ETH_P_8021Q)) 2312 tp_len = -EMSGSIZE; 2313 } 2314 if (unlikely(tp_len < 0)) { 2315 if (po->tp_loss) { 2316 __packet_set_status(po, ph, 2317 TP_STATUS_AVAILABLE); 2318 packet_increment_head(&po->tx_ring); 2319 kfree_skb(skb); 2320 continue; 2321 } else { 2322 status = TP_STATUS_WRONG_FORMAT; 2323 err = tp_len; 2324 goto out_status; 2325 } 2326 } 2327 2328 packet_pick_tx_queue(dev, skb); 2329 2330 skb->destructor = tpacket_destruct_skb; 2331 __packet_set_status(po, ph, TP_STATUS_SENDING); 2332 packet_inc_pending(&po->tx_ring); 2333 2334 status = TP_STATUS_SEND_REQUEST; 2335 err = po->xmit(skb); 2336 if (unlikely(err > 0)) { 2337 err = net_xmit_errno(err); 2338 if (err && __packet_get_status(po, ph) == 2339 TP_STATUS_AVAILABLE) { 2340 /* skb was destructed already */ 2341 skb = NULL; 2342 goto out_status; 2343 } 2344 /* 2345 * skb was dropped but not destructed yet; 2346 * let's treat it like congestion or err < 0 2347 */ 2348 err = 0; 2349 } 2350 packet_increment_head(&po->tx_ring); 2351 len_sum += tp_len; 2352 } while (likely((ph != NULL) || 2353 /* Note: packet_read_pending() might be slow if we have 2354 * to call it as it's per_cpu variable, but in fast-path 2355 * we already short-circuit the loop with the first 2356 * condition, and luckily don't have to go that path 2357 * anyway. 2358 */ 2359 (need_wait && packet_read_pending(&po->tx_ring)))); 2360 2361 err = len_sum; 2362 goto out_put; 2363 2364 out_status: 2365 __packet_set_status(po, ph, status); 2366 kfree_skb(skb); 2367 out_put: 2368 dev_put(dev); 2369 out: 2370 mutex_unlock(&po->pg_vec_lock); 2371 return err; 2372 } 2373 2374 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2375 size_t reserve, size_t len, 2376 size_t linear, int noblock, 2377 int *err) 2378 { 2379 struct sk_buff *skb; 2380 2381 /* Under a page? Don't bother with paged skb. */ 2382 if (prepad + len < PAGE_SIZE || !linear) 2383 linear = len; 2384 2385 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2386 err, 0); 2387 if (!skb) 2388 return NULL; 2389 2390 skb_reserve(skb, reserve); 2391 skb_put(skb, linear); 2392 skb->data_len = len - linear; 2393 skb->len += len - linear; 2394 2395 return skb; 2396 } 2397 2398 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2399 { 2400 struct sock *sk = sock->sk; 2401 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2402 struct sk_buff *skb; 2403 struct net_device *dev; 2404 __be16 proto; 2405 unsigned char *addr; 2406 int err, reserve = 0; 2407 struct virtio_net_hdr vnet_hdr = { 0 }; 2408 int offset = 0; 2409 int vnet_hdr_len; 2410 struct packet_sock *po = pkt_sk(sk); 2411 unsigned short gso_type = 0; 2412 int hlen, tlen; 2413 int extra_len = 0; 2414 ssize_t n; 2415 2416 /* 2417 * Get and verify the address. 2418 */ 2419 2420 if (likely(saddr == NULL)) { 2421 dev = packet_cached_dev_get(po); 2422 proto = po->num; 2423 addr = NULL; 2424 } else { 2425 err = -EINVAL; 2426 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2427 goto out; 2428 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2429 goto out; 2430 proto = saddr->sll_protocol; 2431 addr = saddr->sll_addr; 2432 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2433 } 2434 2435 err = -ENXIO; 2436 if (unlikely(dev == NULL)) 2437 goto out_unlock; 2438 err = -ENETDOWN; 2439 if (unlikely(!(dev->flags & IFF_UP))) 2440 goto out_unlock; 2441 2442 if (sock->type == SOCK_RAW) 2443 reserve = dev->hard_header_len; 2444 if (po->has_vnet_hdr) { 2445 vnet_hdr_len = sizeof(vnet_hdr); 2446 2447 err = -EINVAL; 2448 if (len < vnet_hdr_len) 2449 goto out_unlock; 2450 2451 len -= vnet_hdr_len; 2452 2453 err = -EFAULT; 2454 n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter); 2455 if (n != vnet_hdr_len) 2456 goto out_unlock; 2457 2458 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2459 (__virtio16_to_cpu(false, vnet_hdr.csum_start) + 2460 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > 2461 __virtio16_to_cpu(false, vnet_hdr.hdr_len))) 2462 vnet_hdr.hdr_len = __cpu_to_virtio16(false, 2463 __virtio16_to_cpu(false, vnet_hdr.csum_start) + 2464 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); 2465 2466 err = -EINVAL; 2467 if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) 2468 goto out_unlock; 2469 2470 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 2471 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 2472 case VIRTIO_NET_HDR_GSO_TCPV4: 2473 gso_type = SKB_GSO_TCPV4; 2474 break; 2475 case VIRTIO_NET_HDR_GSO_TCPV6: 2476 gso_type = SKB_GSO_TCPV6; 2477 break; 2478 case VIRTIO_NET_HDR_GSO_UDP: 2479 gso_type = SKB_GSO_UDP; 2480 break; 2481 default: 2482 goto out_unlock; 2483 } 2484 2485 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) 2486 gso_type |= SKB_GSO_TCP_ECN; 2487 2488 if (vnet_hdr.gso_size == 0) 2489 goto out_unlock; 2490 2491 } 2492 } 2493 2494 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 2495 if (!netif_supports_nofcs(dev)) { 2496 err = -EPROTONOSUPPORT; 2497 goto out_unlock; 2498 } 2499 extra_len = 4; /* We're doing our own CRC */ 2500 } 2501 2502 err = -EMSGSIZE; 2503 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 2504 goto out_unlock; 2505 2506 err = -ENOBUFS; 2507 hlen = LL_RESERVED_SPACE(dev); 2508 tlen = dev->needed_tailroom; 2509 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, 2510 __virtio16_to_cpu(false, vnet_hdr.hdr_len), 2511 msg->msg_flags & MSG_DONTWAIT, &err); 2512 if (skb == NULL) 2513 goto out_unlock; 2514 2515 skb_set_network_header(skb, reserve); 2516 2517 err = -EINVAL; 2518 if (sock->type == SOCK_DGRAM) { 2519 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2520 if (unlikely(offset < 0)) 2521 goto out_free; 2522 } else { 2523 if (ll_header_truncated(dev, len)) 2524 goto out_free; 2525 } 2526 2527 /* Returns -EFAULT on error */ 2528 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 2529 if (err) 2530 goto out_free; 2531 2532 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); 2533 2534 if (!gso_type && (len > dev->mtu + reserve + extra_len)) { 2535 /* Earlier code assumed this would be a VLAN pkt, 2536 * double-check this now that we have the actual 2537 * packet in hand. 2538 */ 2539 struct ethhdr *ehdr; 2540 skb_reset_mac_header(skb); 2541 ehdr = eth_hdr(skb); 2542 if (ehdr->h_proto != htons(ETH_P_8021Q)) { 2543 err = -EMSGSIZE; 2544 goto out_free; 2545 } 2546 } 2547 2548 skb->protocol = proto; 2549 skb->dev = dev; 2550 skb->priority = sk->sk_priority; 2551 skb->mark = sk->sk_mark; 2552 2553 packet_pick_tx_queue(dev, skb); 2554 2555 if (po->has_vnet_hdr) { 2556 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 2557 u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); 2558 u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); 2559 if (!skb_partial_csum_set(skb, s, o)) { 2560 err = -EINVAL; 2561 goto out_free; 2562 } 2563 } 2564 2565 skb_shinfo(skb)->gso_size = 2566 __virtio16_to_cpu(false, vnet_hdr.gso_size); 2567 skb_shinfo(skb)->gso_type = gso_type; 2568 2569 /* Header must be checked, and gso_segs computed. */ 2570 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2571 skb_shinfo(skb)->gso_segs = 0; 2572 2573 len += vnet_hdr_len; 2574 } 2575 2576 if (!packet_use_direct_xmit(po)) 2577 skb_probe_transport_header(skb, reserve); 2578 if (unlikely(extra_len == 4)) 2579 skb->no_fcs = 1; 2580 2581 err = po->xmit(skb); 2582 if (err > 0 && (err = net_xmit_errno(err)) != 0) 2583 goto out_unlock; 2584 2585 dev_put(dev); 2586 2587 return len; 2588 2589 out_free: 2590 kfree_skb(skb); 2591 out_unlock: 2592 if (dev) 2593 dev_put(dev); 2594 out: 2595 return err; 2596 } 2597 2598 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, 2599 struct msghdr *msg, size_t len) 2600 { 2601 struct sock *sk = sock->sk; 2602 struct packet_sock *po = pkt_sk(sk); 2603 2604 if (po->tx_ring.pg_vec) 2605 return tpacket_snd(po, msg); 2606 else 2607 return packet_snd(sock, msg, len); 2608 } 2609 2610 /* 2611 * Close a PACKET socket. This is fairly simple. We immediately go 2612 * to 'closed' state and remove our protocol entry in the device list. 2613 */ 2614 2615 static int packet_release(struct socket *sock) 2616 { 2617 struct sock *sk = sock->sk; 2618 struct packet_sock *po; 2619 struct net *net; 2620 union tpacket_req_u req_u; 2621 2622 if (!sk) 2623 return 0; 2624 2625 net = sock_net(sk); 2626 po = pkt_sk(sk); 2627 2628 mutex_lock(&net->packet.sklist_lock); 2629 sk_del_node_init_rcu(sk); 2630 mutex_unlock(&net->packet.sklist_lock); 2631 2632 preempt_disable(); 2633 sock_prot_inuse_add(net, sk->sk_prot, -1); 2634 preempt_enable(); 2635 2636 spin_lock(&po->bind_lock); 2637 unregister_prot_hook(sk, false); 2638 packet_cached_dev_reset(po); 2639 2640 if (po->prot_hook.dev) { 2641 dev_put(po->prot_hook.dev); 2642 po->prot_hook.dev = NULL; 2643 } 2644 spin_unlock(&po->bind_lock); 2645 2646 packet_flush_mclist(sk); 2647 2648 if (po->rx_ring.pg_vec) { 2649 memset(&req_u, 0, sizeof(req_u)); 2650 packet_set_ring(sk, &req_u, 1, 0); 2651 } 2652 2653 if (po->tx_ring.pg_vec) { 2654 memset(&req_u, 0, sizeof(req_u)); 2655 packet_set_ring(sk, &req_u, 1, 1); 2656 } 2657 2658 fanout_release(sk); 2659 2660 synchronize_net(); 2661 /* 2662 * Now the socket is dead. No more input will appear. 2663 */ 2664 sock_orphan(sk); 2665 sock->sk = NULL; 2666 2667 /* Purge queues */ 2668 2669 skb_queue_purge(&sk->sk_receive_queue); 2670 packet_free_pending(po); 2671 sk_refcnt_debug_release(sk); 2672 2673 sock_put(sk); 2674 return 0; 2675 } 2676 2677 /* 2678 * Attach a packet hook. 2679 */ 2680 2681 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) 2682 { 2683 struct packet_sock *po = pkt_sk(sk); 2684 const struct net_device *dev_curr; 2685 __be16 proto_curr; 2686 bool need_rehook; 2687 2688 if (po->fanout) { 2689 if (dev) 2690 dev_put(dev); 2691 2692 return -EINVAL; 2693 } 2694 2695 lock_sock(sk); 2696 spin_lock(&po->bind_lock); 2697 2698 proto_curr = po->prot_hook.type; 2699 dev_curr = po->prot_hook.dev; 2700 2701 need_rehook = proto_curr != proto || dev_curr != dev; 2702 2703 if (need_rehook) { 2704 unregister_prot_hook(sk, true); 2705 2706 po->num = proto; 2707 po->prot_hook.type = proto; 2708 2709 if (po->prot_hook.dev) 2710 dev_put(po->prot_hook.dev); 2711 2712 po->prot_hook.dev = dev; 2713 2714 po->ifindex = dev ? dev->ifindex : 0; 2715 packet_cached_dev_assign(po, dev); 2716 } 2717 2718 if (proto == 0 || !need_rehook) 2719 goto out_unlock; 2720 2721 if (!dev || (dev->flags & IFF_UP)) { 2722 register_prot_hook(sk); 2723 } else { 2724 sk->sk_err = ENETDOWN; 2725 if (!sock_flag(sk, SOCK_DEAD)) 2726 sk->sk_error_report(sk); 2727 } 2728 2729 out_unlock: 2730 spin_unlock(&po->bind_lock); 2731 release_sock(sk); 2732 return 0; 2733 } 2734 2735 /* 2736 * Bind a packet socket to a device 2737 */ 2738 2739 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 2740 int addr_len) 2741 { 2742 struct sock *sk = sock->sk; 2743 char name[15]; 2744 struct net_device *dev; 2745 int err = -ENODEV; 2746 2747 /* 2748 * Check legality 2749 */ 2750 2751 if (addr_len != sizeof(struct sockaddr)) 2752 return -EINVAL; 2753 strlcpy(name, uaddr->sa_data, sizeof(name)); 2754 2755 dev = dev_get_by_name(sock_net(sk), name); 2756 if (dev) 2757 err = packet_do_bind(sk, dev, pkt_sk(sk)->num); 2758 return err; 2759 } 2760 2761 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 2762 { 2763 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 2764 struct sock *sk = sock->sk; 2765 struct net_device *dev = NULL; 2766 int err; 2767 2768 2769 /* 2770 * Check legality 2771 */ 2772 2773 if (addr_len < sizeof(struct sockaddr_ll)) 2774 return -EINVAL; 2775 if (sll->sll_family != AF_PACKET) 2776 return -EINVAL; 2777 2778 if (sll->sll_ifindex) { 2779 err = -ENODEV; 2780 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex); 2781 if (dev == NULL) 2782 goto out; 2783 } 2784 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); 2785 2786 out: 2787 return err; 2788 } 2789 2790 static struct proto packet_proto = { 2791 .name = "PACKET", 2792 .owner = THIS_MODULE, 2793 .obj_size = sizeof(struct packet_sock), 2794 }; 2795 2796 /* 2797 * Create a packet of type SOCK_PACKET. 2798 */ 2799 2800 static int packet_create(struct net *net, struct socket *sock, int protocol, 2801 int kern) 2802 { 2803 struct sock *sk; 2804 struct packet_sock *po; 2805 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 2806 int err; 2807 2808 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 2809 return -EPERM; 2810 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 2811 sock->type != SOCK_PACKET) 2812 return -ESOCKTNOSUPPORT; 2813 2814 sock->state = SS_UNCONNECTED; 2815 2816 err = -ENOBUFS; 2817 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto); 2818 if (sk == NULL) 2819 goto out; 2820 2821 sock->ops = &packet_ops; 2822 if (sock->type == SOCK_PACKET) 2823 sock->ops = &packet_ops_spkt; 2824 2825 sock_init_data(sock, sk); 2826 2827 po = pkt_sk(sk); 2828 sk->sk_family = PF_PACKET; 2829 po->num = proto; 2830 po->xmit = dev_queue_xmit; 2831 2832 err = packet_alloc_pending(po); 2833 if (err) 2834 goto out2; 2835 2836 packet_cached_dev_reset(po); 2837 2838 sk->sk_destruct = packet_sock_destruct; 2839 sk_refcnt_debug_inc(sk); 2840 2841 /* 2842 * Attach a protocol block 2843 */ 2844 2845 spin_lock_init(&po->bind_lock); 2846 mutex_init(&po->pg_vec_lock); 2847 po->prot_hook.func = packet_rcv; 2848 2849 if (sock->type == SOCK_PACKET) 2850 po->prot_hook.func = packet_rcv_spkt; 2851 2852 po->prot_hook.af_packet_priv = sk; 2853 2854 if (proto) { 2855 po->prot_hook.type = proto; 2856 register_prot_hook(sk); 2857 } 2858 2859 mutex_lock(&net->packet.sklist_lock); 2860 sk_add_node_rcu(sk, &net->packet.sklist); 2861 mutex_unlock(&net->packet.sklist_lock); 2862 2863 preempt_disable(); 2864 sock_prot_inuse_add(net, &packet_proto, 1); 2865 preempt_enable(); 2866 2867 return 0; 2868 out2: 2869 sk_free(sk); 2870 out: 2871 return err; 2872 } 2873 2874 /* 2875 * Pull a packet from our receive queue and hand it to the user. 2876 * If necessary we block. 2877 */ 2878 2879 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, 2880 struct msghdr *msg, size_t len, int flags) 2881 { 2882 struct sock *sk = sock->sk; 2883 struct sk_buff *skb; 2884 int copied, err; 2885 int vnet_hdr_len = 0; 2886 2887 err = -EINVAL; 2888 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 2889 goto out; 2890 2891 #if 0 2892 /* What error should we return now? EUNATTACH? */ 2893 if (pkt_sk(sk)->ifindex < 0) 2894 return -ENODEV; 2895 #endif 2896 2897 if (flags & MSG_ERRQUEUE) { 2898 err = sock_recv_errqueue(sk, msg, len, 2899 SOL_PACKET, PACKET_TX_TIMESTAMP); 2900 goto out; 2901 } 2902 2903 /* 2904 * Call the generic datagram receiver. This handles all sorts 2905 * of horrible races and re-entrancy so we can forget about it 2906 * in the protocol layers. 2907 * 2908 * Now it will return ENETDOWN, if device have just gone down, 2909 * but then it will block. 2910 */ 2911 2912 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); 2913 2914 /* 2915 * An error occurred so return it. Because skb_recv_datagram() 2916 * handles the blocking we don't see and worry about blocking 2917 * retries. 2918 */ 2919 2920 if (skb == NULL) 2921 goto out; 2922 2923 if (pkt_sk(sk)->has_vnet_hdr) { 2924 struct virtio_net_hdr vnet_hdr = { 0 }; 2925 2926 err = -EINVAL; 2927 vnet_hdr_len = sizeof(vnet_hdr); 2928 if (len < vnet_hdr_len) 2929 goto out_free; 2930 2931 len -= vnet_hdr_len; 2932 2933 if (skb_is_gso(skb)) { 2934 struct skb_shared_info *sinfo = skb_shinfo(skb); 2935 2936 /* This is a hint as to how much should be linear. */ 2937 vnet_hdr.hdr_len = 2938 __cpu_to_virtio16(false, skb_headlen(skb)); 2939 vnet_hdr.gso_size = 2940 __cpu_to_virtio16(false, sinfo->gso_size); 2941 if (sinfo->gso_type & SKB_GSO_TCPV4) 2942 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 2943 else if (sinfo->gso_type & SKB_GSO_TCPV6) 2944 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 2945 else if (sinfo->gso_type & SKB_GSO_UDP) 2946 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; 2947 else if (sinfo->gso_type & SKB_GSO_FCOE) 2948 goto out_free; 2949 else 2950 BUG(); 2951 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 2952 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; 2953 } else 2954 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; 2955 2956 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2957 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 2958 vnet_hdr.csum_start = __cpu_to_virtio16(false, 2959 skb_checksum_start_offset(skb)); 2960 vnet_hdr.csum_offset = __cpu_to_virtio16(false, 2961 skb->csum_offset); 2962 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 2963 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; 2964 } /* else everything is zero */ 2965 2966 err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len); 2967 if (err < 0) 2968 goto out_free; 2969 } 2970 2971 /* You lose any data beyond the buffer you gave. If it worries 2972 * a user program they can ask the device for its MTU 2973 * anyway. 2974 */ 2975 copied = skb->len; 2976 if (copied > len) { 2977 copied = len; 2978 msg->msg_flags |= MSG_TRUNC; 2979 } 2980 2981 err = skb_copy_datagram_msg(skb, 0, msg, copied); 2982 if (err) 2983 goto out_free; 2984 2985 sock_recv_ts_and_drops(msg, sk, skb); 2986 2987 if (msg->msg_name) { 2988 /* If the address length field is there to be filled 2989 * in, we fill it in now. 2990 */ 2991 if (sock->type == SOCK_PACKET) { 2992 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 2993 msg->msg_namelen = sizeof(struct sockaddr_pkt); 2994 } else { 2995 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 2996 msg->msg_namelen = sll->sll_halen + 2997 offsetof(struct sockaddr_ll, sll_addr); 2998 } 2999 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, 3000 msg->msg_namelen); 3001 } 3002 3003 if (pkt_sk(sk)->auxdata) { 3004 struct tpacket_auxdata aux; 3005 3006 aux.tp_status = TP_STATUS_USER; 3007 if (skb->ip_summed == CHECKSUM_PARTIAL) 3008 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3009 aux.tp_len = PACKET_SKB_CB(skb)->origlen; 3010 aux.tp_snaplen = skb->len; 3011 aux.tp_mac = 0; 3012 aux.tp_net = skb_network_offset(skb); 3013 if (vlan_tx_tag_present(skb)) { 3014 aux.tp_vlan_tci = vlan_tx_tag_get(skb); 3015 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3016 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3017 } else { 3018 aux.tp_vlan_tci = 0; 3019 aux.tp_vlan_tpid = 0; 3020 } 3021 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3022 } 3023 3024 /* 3025 * Free or return the buffer as appropriate. Again this 3026 * hides all the races and re-entrancy issues from us. 3027 */ 3028 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3029 3030 out_free: 3031 skb_free_datagram(sk, skb); 3032 out: 3033 return err; 3034 } 3035 3036 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3037 int *uaddr_len, int peer) 3038 { 3039 struct net_device *dev; 3040 struct sock *sk = sock->sk; 3041 3042 if (peer) 3043 return -EOPNOTSUPP; 3044 3045 uaddr->sa_family = AF_PACKET; 3046 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); 3047 rcu_read_lock(); 3048 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 3049 if (dev) 3050 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); 3051 rcu_read_unlock(); 3052 *uaddr_len = sizeof(*uaddr); 3053 3054 return 0; 3055 } 3056 3057 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3058 int *uaddr_len, int peer) 3059 { 3060 struct net_device *dev; 3061 struct sock *sk = sock->sk; 3062 struct packet_sock *po = pkt_sk(sk); 3063 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3064 3065 if (peer) 3066 return -EOPNOTSUPP; 3067 3068 sll->sll_family = AF_PACKET; 3069 sll->sll_ifindex = po->ifindex; 3070 sll->sll_protocol = po->num; 3071 sll->sll_pkttype = 0; 3072 rcu_read_lock(); 3073 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 3074 if (dev) { 3075 sll->sll_hatype = dev->type; 3076 sll->sll_halen = dev->addr_len; 3077 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 3078 } else { 3079 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3080 sll->sll_halen = 0; 3081 } 3082 rcu_read_unlock(); 3083 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3084 3085 return 0; 3086 } 3087 3088 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3089 int what) 3090 { 3091 switch (i->type) { 3092 case PACKET_MR_MULTICAST: 3093 if (i->alen != dev->addr_len) 3094 return -EINVAL; 3095 if (what > 0) 3096 return dev_mc_add(dev, i->addr); 3097 else 3098 return dev_mc_del(dev, i->addr); 3099 break; 3100 case PACKET_MR_PROMISC: 3101 return dev_set_promiscuity(dev, what); 3102 case PACKET_MR_ALLMULTI: 3103 return dev_set_allmulti(dev, what); 3104 case PACKET_MR_UNICAST: 3105 if (i->alen != dev->addr_len) 3106 return -EINVAL; 3107 if (what > 0) 3108 return dev_uc_add(dev, i->addr); 3109 else 3110 return dev_uc_del(dev, i->addr); 3111 break; 3112 default: 3113 break; 3114 } 3115 return 0; 3116 } 3117 3118 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) 3119 { 3120 for ( ; i; i = i->next) { 3121 if (i->ifindex == dev->ifindex) 3122 packet_dev_mc(dev, i, what); 3123 } 3124 } 3125 3126 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3127 { 3128 struct packet_sock *po = pkt_sk(sk); 3129 struct packet_mclist *ml, *i; 3130 struct net_device *dev; 3131 int err; 3132 3133 rtnl_lock(); 3134 3135 err = -ENODEV; 3136 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3137 if (!dev) 3138 goto done; 3139 3140 err = -EINVAL; 3141 if (mreq->mr_alen > dev->addr_len) 3142 goto done; 3143 3144 err = -ENOBUFS; 3145 i = kmalloc(sizeof(*i), GFP_KERNEL); 3146 if (i == NULL) 3147 goto done; 3148 3149 err = 0; 3150 for (ml = po->mclist; ml; ml = ml->next) { 3151 if (ml->ifindex == mreq->mr_ifindex && 3152 ml->type == mreq->mr_type && 3153 ml->alen == mreq->mr_alen && 3154 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3155 ml->count++; 3156 /* Free the new element ... */ 3157 kfree(i); 3158 goto done; 3159 } 3160 } 3161 3162 i->type = mreq->mr_type; 3163 i->ifindex = mreq->mr_ifindex; 3164 i->alen = mreq->mr_alen; 3165 memcpy(i->addr, mreq->mr_address, i->alen); 3166 i->count = 1; 3167 i->next = po->mclist; 3168 po->mclist = i; 3169 err = packet_dev_mc(dev, i, 1); 3170 if (err) { 3171 po->mclist = i->next; 3172 kfree(i); 3173 } 3174 3175 done: 3176 rtnl_unlock(); 3177 return err; 3178 } 3179 3180 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3181 { 3182 struct packet_mclist *ml, **mlp; 3183 3184 rtnl_lock(); 3185 3186 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3187 if (ml->ifindex == mreq->mr_ifindex && 3188 ml->type == mreq->mr_type && 3189 ml->alen == mreq->mr_alen && 3190 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3191 if (--ml->count == 0) { 3192 struct net_device *dev; 3193 *mlp = ml->next; 3194 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3195 if (dev) 3196 packet_dev_mc(dev, ml, -1); 3197 kfree(ml); 3198 } 3199 rtnl_unlock(); 3200 return 0; 3201 } 3202 } 3203 rtnl_unlock(); 3204 return -EADDRNOTAVAIL; 3205 } 3206 3207 static void packet_flush_mclist(struct sock *sk) 3208 { 3209 struct packet_sock *po = pkt_sk(sk); 3210 struct packet_mclist *ml; 3211 3212 if (!po->mclist) 3213 return; 3214 3215 rtnl_lock(); 3216 while ((ml = po->mclist) != NULL) { 3217 struct net_device *dev; 3218 3219 po->mclist = ml->next; 3220 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3221 if (dev != NULL) 3222 packet_dev_mc(dev, ml, -1); 3223 kfree(ml); 3224 } 3225 rtnl_unlock(); 3226 } 3227 3228 static int 3229 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 3230 { 3231 struct sock *sk = sock->sk; 3232 struct packet_sock *po = pkt_sk(sk); 3233 int ret; 3234 3235 if (level != SOL_PACKET) 3236 return -ENOPROTOOPT; 3237 3238 switch (optname) { 3239 case PACKET_ADD_MEMBERSHIP: 3240 case PACKET_DROP_MEMBERSHIP: 3241 { 3242 struct packet_mreq_max mreq; 3243 int len = optlen; 3244 memset(&mreq, 0, sizeof(mreq)); 3245 if (len < sizeof(struct packet_mreq)) 3246 return -EINVAL; 3247 if (len > sizeof(mreq)) 3248 len = sizeof(mreq); 3249 if (copy_from_user(&mreq, optval, len)) 3250 return -EFAULT; 3251 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3252 return -EINVAL; 3253 if (optname == PACKET_ADD_MEMBERSHIP) 3254 ret = packet_mc_add(sk, &mreq); 3255 else 3256 ret = packet_mc_drop(sk, &mreq); 3257 return ret; 3258 } 3259 3260 case PACKET_RX_RING: 3261 case PACKET_TX_RING: 3262 { 3263 union tpacket_req_u req_u; 3264 int len; 3265 3266 switch (po->tp_version) { 3267 case TPACKET_V1: 3268 case TPACKET_V2: 3269 len = sizeof(req_u.req); 3270 break; 3271 case TPACKET_V3: 3272 default: 3273 len = sizeof(req_u.req3); 3274 break; 3275 } 3276 if (optlen < len) 3277 return -EINVAL; 3278 if (pkt_sk(sk)->has_vnet_hdr) 3279 return -EINVAL; 3280 if (copy_from_user(&req_u.req, optval, len)) 3281 return -EFAULT; 3282 return packet_set_ring(sk, &req_u, 0, 3283 optname == PACKET_TX_RING); 3284 } 3285 case PACKET_COPY_THRESH: 3286 { 3287 int val; 3288 3289 if (optlen != sizeof(val)) 3290 return -EINVAL; 3291 if (copy_from_user(&val, optval, sizeof(val))) 3292 return -EFAULT; 3293 3294 pkt_sk(sk)->copy_thresh = val; 3295 return 0; 3296 } 3297 case PACKET_VERSION: 3298 { 3299 int val; 3300 3301 if (optlen != sizeof(val)) 3302 return -EINVAL; 3303 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3304 return -EBUSY; 3305 if (copy_from_user(&val, optval, sizeof(val))) 3306 return -EFAULT; 3307 switch (val) { 3308 case TPACKET_V1: 3309 case TPACKET_V2: 3310 case TPACKET_V3: 3311 po->tp_version = val; 3312 return 0; 3313 default: 3314 return -EINVAL; 3315 } 3316 } 3317 case PACKET_RESERVE: 3318 { 3319 unsigned int val; 3320 3321 if (optlen != sizeof(val)) 3322 return -EINVAL; 3323 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3324 return -EBUSY; 3325 if (copy_from_user(&val, optval, sizeof(val))) 3326 return -EFAULT; 3327 po->tp_reserve = val; 3328 return 0; 3329 } 3330 case PACKET_LOSS: 3331 { 3332 unsigned int val; 3333 3334 if (optlen != sizeof(val)) 3335 return -EINVAL; 3336 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3337 return -EBUSY; 3338 if (copy_from_user(&val, optval, sizeof(val))) 3339 return -EFAULT; 3340 po->tp_loss = !!val; 3341 return 0; 3342 } 3343 case PACKET_AUXDATA: 3344 { 3345 int val; 3346 3347 if (optlen < sizeof(val)) 3348 return -EINVAL; 3349 if (copy_from_user(&val, optval, sizeof(val))) 3350 return -EFAULT; 3351 3352 po->auxdata = !!val; 3353 return 0; 3354 } 3355 case PACKET_ORIGDEV: 3356 { 3357 int val; 3358 3359 if (optlen < sizeof(val)) 3360 return -EINVAL; 3361 if (copy_from_user(&val, optval, sizeof(val))) 3362 return -EFAULT; 3363 3364 po->origdev = !!val; 3365 return 0; 3366 } 3367 case PACKET_VNET_HDR: 3368 { 3369 int val; 3370 3371 if (sock->type != SOCK_RAW) 3372 return -EINVAL; 3373 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3374 return -EBUSY; 3375 if (optlen < sizeof(val)) 3376 return -EINVAL; 3377 if (copy_from_user(&val, optval, sizeof(val))) 3378 return -EFAULT; 3379 3380 po->has_vnet_hdr = !!val; 3381 return 0; 3382 } 3383 case PACKET_TIMESTAMP: 3384 { 3385 int val; 3386 3387 if (optlen != sizeof(val)) 3388 return -EINVAL; 3389 if (copy_from_user(&val, optval, sizeof(val))) 3390 return -EFAULT; 3391 3392 po->tp_tstamp = val; 3393 return 0; 3394 } 3395 case PACKET_FANOUT: 3396 { 3397 int val; 3398 3399 if (optlen != sizeof(val)) 3400 return -EINVAL; 3401 if (copy_from_user(&val, optval, sizeof(val))) 3402 return -EFAULT; 3403 3404 return fanout_add(sk, val & 0xffff, val >> 16); 3405 } 3406 case PACKET_TX_HAS_OFF: 3407 { 3408 unsigned int val; 3409 3410 if (optlen != sizeof(val)) 3411 return -EINVAL; 3412 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3413 return -EBUSY; 3414 if (copy_from_user(&val, optval, sizeof(val))) 3415 return -EFAULT; 3416 po->tp_tx_has_off = !!val; 3417 return 0; 3418 } 3419 case PACKET_QDISC_BYPASS: 3420 { 3421 int val; 3422 3423 if (optlen != sizeof(val)) 3424 return -EINVAL; 3425 if (copy_from_user(&val, optval, sizeof(val))) 3426 return -EFAULT; 3427 3428 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; 3429 return 0; 3430 } 3431 default: 3432 return -ENOPROTOOPT; 3433 } 3434 } 3435 3436 static int packet_getsockopt(struct socket *sock, int level, int optname, 3437 char __user *optval, int __user *optlen) 3438 { 3439 int len; 3440 int val, lv = sizeof(val); 3441 struct sock *sk = sock->sk; 3442 struct packet_sock *po = pkt_sk(sk); 3443 void *data = &val; 3444 union tpacket_stats_u st; 3445 3446 if (level != SOL_PACKET) 3447 return -ENOPROTOOPT; 3448 3449 if (get_user(len, optlen)) 3450 return -EFAULT; 3451 3452 if (len < 0) 3453 return -EINVAL; 3454 3455 switch (optname) { 3456 case PACKET_STATISTICS: 3457 spin_lock_bh(&sk->sk_receive_queue.lock); 3458 memcpy(&st, &po->stats, sizeof(st)); 3459 memset(&po->stats, 0, sizeof(po->stats)); 3460 spin_unlock_bh(&sk->sk_receive_queue.lock); 3461 3462 if (po->tp_version == TPACKET_V3) { 3463 lv = sizeof(struct tpacket_stats_v3); 3464 st.stats3.tp_packets += st.stats3.tp_drops; 3465 data = &st.stats3; 3466 } else { 3467 lv = sizeof(struct tpacket_stats); 3468 st.stats1.tp_packets += st.stats1.tp_drops; 3469 data = &st.stats1; 3470 } 3471 3472 break; 3473 case PACKET_AUXDATA: 3474 val = po->auxdata; 3475 break; 3476 case PACKET_ORIGDEV: 3477 val = po->origdev; 3478 break; 3479 case PACKET_VNET_HDR: 3480 val = po->has_vnet_hdr; 3481 break; 3482 case PACKET_VERSION: 3483 val = po->tp_version; 3484 break; 3485 case PACKET_HDRLEN: 3486 if (len > sizeof(int)) 3487 len = sizeof(int); 3488 if (copy_from_user(&val, optval, len)) 3489 return -EFAULT; 3490 switch (val) { 3491 case TPACKET_V1: 3492 val = sizeof(struct tpacket_hdr); 3493 break; 3494 case TPACKET_V2: 3495 val = sizeof(struct tpacket2_hdr); 3496 break; 3497 case TPACKET_V3: 3498 val = sizeof(struct tpacket3_hdr); 3499 break; 3500 default: 3501 return -EINVAL; 3502 } 3503 break; 3504 case PACKET_RESERVE: 3505 val = po->tp_reserve; 3506 break; 3507 case PACKET_LOSS: 3508 val = po->tp_loss; 3509 break; 3510 case PACKET_TIMESTAMP: 3511 val = po->tp_tstamp; 3512 break; 3513 case PACKET_FANOUT: 3514 val = (po->fanout ? 3515 ((u32)po->fanout->id | 3516 ((u32)po->fanout->type << 16) | 3517 ((u32)po->fanout->flags << 24)) : 3518 0); 3519 break; 3520 case PACKET_TX_HAS_OFF: 3521 val = po->tp_tx_has_off; 3522 break; 3523 case PACKET_QDISC_BYPASS: 3524 val = packet_use_direct_xmit(po); 3525 break; 3526 default: 3527 return -ENOPROTOOPT; 3528 } 3529 3530 if (len > lv) 3531 len = lv; 3532 if (put_user(len, optlen)) 3533 return -EFAULT; 3534 if (copy_to_user(optval, data, len)) 3535 return -EFAULT; 3536 return 0; 3537 } 3538 3539 3540 static int packet_notifier(struct notifier_block *this, 3541 unsigned long msg, void *ptr) 3542 { 3543 struct sock *sk; 3544 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3545 struct net *net = dev_net(dev); 3546 3547 rcu_read_lock(); 3548 sk_for_each_rcu(sk, &net->packet.sklist) { 3549 struct packet_sock *po = pkt_sk(sk); 3550 3551 switch (msg) { 3552 case NETDEV_UNREGISTER: 3553 if (po->mclist) 3554 packet_dev_mclist(dev, po->mclist, -1); 3555 /* fallthrough */ 3556 3557 case NETDEV_DOWN: 3558 if (dev->ifindex == po->ifindex) { 3559 spin_lock(&po->bind_lock); 3560 if (po->running) { 3561 __unregister_prot_hook(sk, false); 3562 sk->sk_err = ENETDOWN; 3563 if (!sock_flag(sk, SOCK_DEAD)) 3564 sk->sk_error_report(sk); 3565 } 3566 if (msg == NETDEV_UNREGISTER) { 3567 packet_cached_dev_reset(po); 3568 po->ifindex = -1; 3569 if (po->prot_hook.dev) 3570 dev_put(po->prot_hook.dev); 3571 po->prot_hook.dev = NULL; 3572 } 3573 spin_unlock(&po->bind_lock); 3574 } 3575 break; 3576 case NETDEV_UP: 3577 if (dev->ifindex == po->ifindex) { 3578 spin_lock(&po->bind_lock); 3579 if (po->num) 3580 register_prot_hook(sk); 3581 spin_unlock(&po->bind_lock); 3582 } 3583 break; 3584 } 3585 } 3586 rcu_read_unlock(); 3587 return NOTIFY_DONE; 3588 } 3589 3590 3591 static int packet_ioctl(struct socket *sock, unsigned int cmd, 3592 unsigned long arg) 3593 { 3594 struct sock *sk = sock->sk; 3595 3596 switch (cmd) { 3597 case SIOCOUTQ: 3598 { 3599 int amount = sk_wmem_alloc_get(sk); 3600 3601 return put_user(amount, (int __user *)arg); 3602 } 3603 case SIOCINQ: 3604 { 3605 struct sk_buff *skb; 3606 int amount = 0; 3607 3608 spin_lock_bh(&sk->sk_receive_queue.lock); 3609 skb = skb_peek(&sk->sk_receive_queue); 3610 if (skb) 3611 amount = skb->len; 3612 spin_unlock_bh(&sk->sk_receive_queue.lock); 3613 return put_user(amount, (int __user *)arg); 3614 } 3615 case SIOCGSTAMP: 3616 return sock_get_timestamp(sk, (struct timeval __user *)arg); 3617 case SIOCGSTAMPNS: 3618 return sock_get_timestampns(sk, (struct timespec __user *)arg); 3619 3620 #ifdef CONFIG_INET 3621 case SIOCADDRT: 3622 case SIOCDELRT: 3623 case SIOCDARP: 3624 case SIOCGARP: 3625 case SIOCSARP: 3626 case SIOCGIFADDR: 3627 case SIOCSIFADDR: 3628 case SIOCGIFBRDADDR: 3629 case SIOCSIFBRDADDR: 3630 case SIOCGIFNETMASK: 3631 case SIOCSIFNETMASK: 3632 case SIOCGIFDSTADDR: 3633 case SIOCSIFDSTADDR: 3634 case SIOCSIFFLAGS: 3635 return inet_dgram_ops.ioctl(sock, cmd, arg); 3636 #endif 3637 3638 default: 3639 return -ENOIOCTLCMD; 3640 } 3641 return 0; 3642 } 3643 3644 static unsigned int packet_poll(struct file *file, struct socket *sock, 3645 poll_table *wait) 3646 { 3647 struct sock *sk = sock->sk; 3648 struct packet_sock *po = pkt_sk(sk); 3649 unsigned int mask = datagram_poll(file, sock, wait); 3650 3651 spin_lock_bh(&sk->sk_receive_queue.lock); 3652 if (po->rx_ring.pg_vec) { 3653 if (!packet_previous_rx_frame(po, &po->rx_ring, 3654 TP_STATUS_KERNEL)) 3655 mask |= POLLIN | POLLRDNORM; 3656 } 3657 spin_unlock_bh(&sk->sk_receive_queue.lock); 3658 spin_lock_bh(&sk->sk_write_queue.lock); 3659 if (po->tx_ring.pg_vec) { 3660 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 3661 mask |= POLLOUT | POLLWRNORM; 3662 } 3663 spin_unlock_bh(&sk->sk_write_queue.lock); 3664 return mask; 3665 } 3666 3667 3668 /* Dirty? Well, I still did not learn better way to account 3669 * for user mmaps. 3670 */ 3671 3672 static void packet_mm_open(struct vm_area_struct *vma) 3673 { 3674 struct file *file = vma->vm_file; 3675 struct socket *sock = file->private_data; 3676 struct sock *sk = sock->sk; 3677 3678 if (sk) 3679 atomic_inc(&pkt_sk(sk)->mapped); 3680 } 3681 3682 static void packet_mm_close(struct vm_area_struct *vma) 3683 { 3684 struct file *file = vma->vm_file; 3685 struct socket *sock = file->private_data; 3686 struct sock *sk = sock->sk; 3687 3688 if (sk) 3689 atomic_dec(&pkt_sk(sk)->mapped); 3690 } 3691 3692 static const struct vm_operations_struct packet_mmap_ops = { 3693 .open = packet_mm_open, 3694 .close = packet_mm_close, 3695 }; 3696 3697 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 3698 unsigned int len) 3699 { 3700 int i; 3701 3702 for (i = 0; i < len; i++) { 3703 if (likely(pg_vec[i].buffer)) { 3704 if (is_vmalloc_addr(pg_vec[i].buffer)) 3705 vfree(pg_vec[i].buffer); 3706 else 3707 free_pages((unsigned long)pg_vec[i].buffer, 3708 order); 3709 pg_vec[i].buffer = NULL; 3710 } 3711 } 3712 kfree(pg_vec); 3713 } 3714 3715 static char *alloc_one_pg_vec_page(unsigned long order) 3716 { 3717 char *buffer; 3718 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 3719 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 3720 3721 buffer = (char *) __get_free_pages(gfp_flags, order); 3722 if (buffer) 3723 return buffer; 3724 3725 /* __get_free_pages failed, fall back to vmalloc */ 3726 buffer = vzalloc((1 << order) * PAGE_SIZE); 3727 if (buffer) 3728 return buffer; 3729 3730 /* vmalloc failed, lets dig into swap here */ 3731 gfp_flags &= ~__GFP_NORETRY; 3732 buffer = (char *) __get_free_pages(gfp_flags, order); 3733 if (buffer) 3734 return buffer; 3735 3736 /* complete and utter failure */ 3737 return NULL; 3738 } 3739 3740 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 3741 { 3742 unsigned int block_nr = req->tp_block_nr; 3743 struct pgv *pg_vec; 3744 int i; 3745 3746 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); 3747 if (unlikely(!pg_vec)) 3748 goto out; 3749 3750 for (i = 0; i < block_nr; i++) { 3751 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 3752 if (unlikely(!pg_vec[i].buffer)) 3753 goto out_free_pgvec; 3754 } 3755 3756 out: 3757 return pg_vec; 3758 3759 out_free_pgvec: 3760 free_pg_vec(pg_vec, order, block_nr); 3761 pg_vec = NULL; 3762 goto out; 3763 } 3764 3765 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 3766 int closing, int tx_ring) 3767 { 3768 struct pgv *pg_vec = NULL; 3769 struct packet_sock *po = pkt_sk(sk); 3770 int was_running, order = 0; 3771 struct packet_ring_buffer *rb; 3772 struct sk_buff_head *rb_queue; 3773 __be16 num; 3774 int err = -EINVAL; 3775 /* Added to avoid minimal code churn */ 3776 struct tpacket_req *req = &req_u->req; 3777 3778 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ 3779 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { 3780 WARN(1, "Tx-ring is not supported.\n"); 3781 goto out; 3782 } 3783 3784 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 3785 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 3786 3787 err = -EBUSY; 3788 if (!closing) { 3789 if (atomic_read(&po->mapped)) 3790 goto out; 3791 if (packet_read_pending(rb)) 3792 goto out; 3793 } 3794 3795 if (req->tp_block_nr) { 3796 /* Sanity tests and some calculations */ 3797 err = -EBUSY; 3798 if (unlikely(rb->pg_vec)) 3799 goto out; 3800 3801 switch (po->tp_version) { 3802 case TPACKET_V1: 3803 po->tp_hdrlen = TPACKET_HDRLEN; 3804 break; 3805 case TPACKET_V2: 3806 po->tp_hdrlen = TPACKET2_HDRLEN; 3807 break; 3808 case TPACKET_V3: 3809 po->tp_hdrlen = TPACKET3_HDRLEN; 3810 break; 3811 } 3812 3813 err = -EINVAL; 3814 if (unlikely((int)req->tp_block_size <= 0)) 3815 goto out; 3816 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 3817 goto out; 3818 if (po->tp_version >= TPACKET_V3 && 3819 (int)(req->tp_block_size - 3820 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) 3821 goto out; 3822 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 3823 po->tp_reserve)) 3824 goto out; 3825 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 3826 goto out; 3827 3828 rb->frames_per_block = req->tp_block_size/req->tp_frame_size; 3829 if (unlikely(rb->frames_per_block <= 0)) 3830 goto out; 3831 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 3832 req->tp_frame_nr)) 3833 goto out; 3834 3835 err = -ENOMEM; 3836 order = get_order(req->tp_block_size); 3837 pg_vec = alloc_pg_vec(req, order); 3838 if (unlikely(!pg_vec)) 3839 goto out; 3840 switch (po->tp_version) { 3841 case TPACKET_V3: 3842 /* Transmit path is not supported. We checked 3843 * it above but just being paranoid 3844 */ 3845 if (!tx_ring) 3846 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); 3847 break; 3848 default: 3849 break; 3850 } 3851 } 3852 /* Done */ 3853 else { 3854 err = -EINVAL; 3855 if (unlikely(req->tp_frame_nr)) 3856 goto out; 3857 } 3858 3859 lock_sock(sk); 3860 3861 /* Detach socket from network */ 3862 spin_lock(&po->bind_lock); 3863 was_running = po->running; 3864 num = po->num; 3865 if (was_running) { 3866 po->num = 0; 3867 __unregister_prot_hook(sk, false); 3868 } 3869 spin_unlock(&po->bind_lock); 3870 3871 synchronize_net(); 3872 3873 err = -EBUSY; 3874 mutex_lock(&po->pg_vec_lock); 3875 if (closing || atomic_read(&po->mapped) == 0) { 3876 err = 0; 3877 spin_lock_bh(&rb_queue->lock); 3878 swap(rb->pg_vec, pg_vec); 3879 rb->frame_max = (req->tp_frame_nr - 1); 3880 rb->head = 0; 3881 rb->frame_size = req->tp_frame_size; 3882 spin_unlock_bh(&rb_queue->lock); 3883 3884 swap(rb->pg_vec_order, order); 3885 swap(rb->pg_vec_len, req->tp_block_nr); 3886 3887 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 3888 po->prot_hook.func = (po->rx_ring.pg_vec) ? 3889 tpacket_rcv : packet_rcv; 3890 skb_queue_purge(rb_queue); 3891 if (atomic_read(&po->mapped)) 3892 pr_err("packet_mmap: vma is busy: %d\n", 3893 atomic_read(&po->mapped)); 3894 } 3895 mutex_unlock(&po->pg_vec_lock); 3896 3897 spin_lock(&po->bind_lock); 3898 if (was_running) { 3899 po->num = num; 3900 register_prot_hook(sk); 3901 } 3902 spin_unlock(&po->bind_lock); 3903 if (closing && (po->tp_version > TPACKET_V2)) { 3904 /* Because we don't support block-based V3 on tx-ring */ 3905 if (!tx_ring) 3906 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue); 3907 } 3908 release_sock(sk); 3909 3910 if (pg_vec) 3911 free_pg_vec(pg_vec, order, req->tp_block_nr); 3912 out: 3913 return err; 3914 } 3915 3916 static int packet_mmap(struct file *file, struct socket *sock, 3917 struct vm_area_struct *vma) 3918 { 3919 struct sock *sk = sock->sk; 3920 struct packet_sock *po = pkt_sk(sk); 3921 unsigned long size, expected_size; 3922 struct packet_ring_buffer *rb; 3923 unsigned long start; 3924 int err = -EINVAL; 3925 int i; 3926 3927 if (vma->vm_pgoff) 3928 return -EINVAL; 3929 3930 mutex_lock(&po->pg_vec_lock); 3931 3932 expected_size = 0; 3933 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 3934 if (rb->pg_vec) { 3935 expected_size += rb->pg_vec_len 3936 * rb->pg_vec_pages 3937 * PAGE_SIZE; 3938 } 3939 } 3940 3941 if (expected_size == 0) 3942 goto out; 3943 3944 size = vma->vm_end - vma->vm_start; 3945 if (size != expected_size) 3946 goto out; 3947 3948 start = vma->vm_start; 3949 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 3950 if (rb->pg_vec == NULL) 3951 continue; 3952 3953 for (i = 0; i < rb->pg_vec_len; i++) { 3954 struct page *page; 3955 void *kaddr = rb->pg_vec[i].buffer; 3956 int pg_num; 3957 3958 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 3959 page = pgv_to_page(kaddr); 3960 err = vm_insert_page(vma, start, page); 3961 if (unlikely(err)) 3962 goto out; 3963 start += PAGE_SIZE; 3964 kaddr += PAGE_SIZE; 3965 } 3966 } 3967 } 3968 3969 atomic_inc(&po->mapped); 3970 vma->vm_ops = &packet_mmap_ops; 3971 err = 0; 3972 3973 out: 3974 mutex_unlock(&po->pg_vec_lock); 3975 return err; 3976 } 3977 3978 static const struct proto_ops packet_ops_spkt = { 3979 .family = PF_PACKET, 3980 .owner = THIS_MODULE, 3981 .release = packet_release, 3982 .bind = packet_bind_spkt, 3983 .connect = sock_no_connect, 3984 .socketpair = sock_no_socketpair, 3985 .accept = sock_no_accept, 3986 .getname = packet_getname_spkt, 3987 .poll = datagram_poll, 3988 .ioctl = packet_ioctl, 3989 .listen = sock_no_listen, 3990 .shutdown = sock_no_shutdown, 3991 .setsockopt = sock_no_setsockopt, 3992 .getsockopt = sock_no_getsockopt, 3993 .sendmsg = packet_sendmsg_spkt, 3994 .recvmsg = packet_recvmsg, 3995 .mmap = sock_no_mmap, 3996 .sendpage = sock_no_sendpage, 3997 }; 3998 3999 static const struct proto_ops packet_ops = { 4000 .family = PF_PACKET, 4001 .owner = THIS_MODULE, 4002 .release = packet_release, 4003 .bind = packet_bind, 4004 .connect = sock_no_connect, 4005 .socketpair = sock_no_socketpair, 4006 .accept = sock_no_accept, 4007 .getname = packet_getname, 4008 .poll = packet_poll, 4009 .ioctl = packet_ioctl, 4010 .listen = sock_no_listen, 4011 .shutdown = sock_no_shutdown, 4012 .setsockopt = packet_setsockopt, 4013 .getsockopt = packet_getsockopt, 4014 .sendmsg = packet_sendmsg, 4015 .recvmsg = packet_recvmsg, 4016 .mmap = packet_mmap, 4017 .sendpage = sock_no_sendpage, 4018 }; 4019 4020 static const struct net_proto_family packet_family_ops = { 4021 .family = PF_PACKET, 4022 .create = packet_create, 4023 .owner = THIS_MODULE, 4024 }; 4025 4026 static struct notifier_block packet_netdev_notifier = { 4027 .notifier_call = packet_notifier, 4028 }; 4029 4030 #ifdef CONFIG_PROC_FS 4031 4032 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4033 __acquires(RCU) 4034 { 4035 struct net *net = seq_file_net(seq); 4036 4037 rcu_read_lock(); 4038 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4039 } 4040 4041 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4042 { 4043 struct net *net = seq_file_net(seq); 4044 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4045 } 4046 4047 static void packet_seq_stop(struct seq_file *seq, void *v) 4048 __releases(RCU) 4049 { 4050 rcu_read_unlock(); 4051 } 4052 4053 static int packet_seq_show(struct seq_file *seq, void *v) 4054 { 4055 if (v == SEQ_START_TOKEN) 4056 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 4057 else { 4058 struct sock *s = sk_entry(v); 4059 const struct packet_sock *po = pkt_sk(s); 4060 4061 seq_printf(seq, 4062 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 4063 s, 4064 atomic_read(&s->sk_refcnt), 4065 s->sk_type, 4066 ntohs(po->num), 4067 po->ifindex, 4068 po->running, 4069 atomic_read(&s->sk_rmem_alloc), 4070 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4071 sock_i_ino(s)); 4072 } 4073 4074 return 0; 4075 } 4076 4077 static const struct seq_operations packet_seq_ops = { 4078 .start = packet_seq_start, 4079 .next = packet_seq_next, 4080 .stop = packet_seq_stop, 4081 .show = packet_seq_show, 4082 }; 4083 4084 static int packet_seq_open(struct inode *inode, struct file *file) 4085 { 4086 return seq_open_net(inode, file, &packet_seq_ops, 4087 sizeof(struct seq_net_private)); 4088 } 4089 4090 static const struct file_operations packet_seq_fops = { 4091 .owner = THIS_MODULE, 4092 .open = packet_seq_open, 4093 .read = seq_read, 4094 .llseek = seq_lseek, 4095 .release = seq_release_net, 4096 }; 4097 4098 #endif 4099 4100 static int __net_init packet_net_init(struct net *net) 4101 { 4102 mutex_init(&net->packet.sklist_lock); 4103 INIT_HLIST_HEAD(&net->packet.sklist); 4104 4105 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) 4106 return -ENOMEM; 4107 4108 return 0; 4109 } 4110 4111 static void __net_exit packet_net_exit(struct net *net) 4112 { 4113 remove_proc_entry("packet", net->proc_net); 4114 } 4115 4116 static struct pernet_operations packet_net_ops = { 4117 .init = packet_net_init, 4118 .exit = packet_net_exit, 4119 }; 4120 4121 4122 static void __exit packet_exit(void) 4123 { 4124 unregister_netdevice_notifier(&packet_netdev_notifier); 4125 unregister_pernet_subsys(&packet_net_ops); 4126 sock_unregister(PF_PACKET); 4127 proto_unregister(&packet_proto); 4128 } 4129 4130 static int __init packet_init(void) 4131 { 4132 int rc = proto_register(&packet_proto, 0); 4133 4134 if (rc != 0) 4135 goto out; 4136 4137 sock_register(&packet_family_ops); 4138 register_pernet_subsys(&packet_net_ops); 4139 register_netdevice_notifier(&packet_netdev_notifier); 4140 out: 4141 return rc; 4142 } 4143 4144 module_init(packet_init); 4145 module_exit(packet_exit); 4146 MODULE_LICENSE("GPL"); 4147 MODULE_ALIAS_NETPROTO(PF_PACKET); 4148