1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * PACKET - implements raw packet sockets. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * 13 * Fixes: 14 * Alan Cox : verify_area() now used correctly 15 * Alan Cox : new skbuff lists, look ma no backlogs! 16 * Alan Cox : tidied skbuff lists. 17 * Alan Cox : Now uses generic datagram routines I 18 * added. Also fixed the peek/read crash 19 * from all old Linux datagram code. 20 * Alan Cox : Uses the improved datagram code. 21 * Alan Cox : Added NULL's for socket options. 22 * Alan Cox : Re-commented the code. 23 * Alan Cox : Use new kernel side addressing 24 * Rob Janssen : Correct MTU usage. 25 * Dave Platt : Counter leaks caused by incorrect 26 * interrupt locking and some slightly 27 * dubious gcc output. Can you read 28 * compiler: it said _VOLATILE_ 29 * Richard Kooijman : Timestamp fixes. 30 * Alan Cox : New buffers. Use sk->mac.raw. 31 * Alan Cox : sendmsg/recvmsg support. 32 * Alan Cox : Protocol setting support 33 * Alexey Kuznetsov : Untied from IPv4 stack. 34 * Cyrus Durgin : Fixed kerneld for kmod. 35 * Michal Ostrowski : Module initialization cleanup. 36 * Ulises Alonso : Frame number limit removal and 37 * packet_set_ring memory leak. 38 * Eric Biederman : Allow for > 8 byte hardware addresses. 39 * The convention is that longer addresses 40 * will simply extend the hardware address 41 * byte arrays at the end of sockaddr_ll 42 * and packet_mreq. 43 * Johann Baudy : Added TX RING. 44 * Chetan Loke : Implemented TPACKET_V3 block abstraction 45 * layer. 46 * Copyright (C) 2011, <lokec@ccs.neu.edu> 47 */ 48 49 #include <linux/types.h> 50 #include <linux/mm.h> 51 #include <linux/capability.h> 52 #include <linux/fcntl.h> 53 #include <linux/socket.h> 54 #include <linux/in.h> 55 #include <linux/inet.h> 56 #include <linux/netdevice.h> 57 #include <linux/if_packet.h> 58 #include <linux/wireless.h> 59 #include <linux/kernel.h> 60 #include <linux/kmod.h> 61 #include <linux/slab.h> 62 #include <linux/vmalloc.h> 63 #include <net/net_namespace.h> 64 #include <net/ip.h> 65 #include <net/protocol.h> 66 #include <linux/skbuff.h> 67 #include <net/sock.h> 68 #include <linux/errno.h> 69 #include <linux/timer.h> 70 #include <linux/uaccess.h> 71 #include <asm/ioctls.h> 72 #include <asm/page.h> 73 #include <asm/cacheflush.h> 74 #include <asm/io.h> 75 #include <linux/proc_fs.h> 76 #include <linux/seq_file.h> 77 #include <linux/poll.h> 78 #include <linux/module.h> 79 #include <linux/init.h> 80 #include <linux/mutex.h> 81 #include <linux/if_vlan.h> 82 #include <linux/virtio_net.h> 83 #include <linux/errqueue.h> 84 #include <linux/net_tstamp.h> 85 #include <linux/percpu.h> 86 #ifdef CONFIG_INET 87 #include <net/inet_common.h> 88 #endif 89 #include <linux/bpf.h> 90 #include <net/compat.h> 91 92 #include "internal.h" 93 94 /* 95 Assumptions: 96 - if device has no dev->hard_header routine, it adds and removes ll header 97 inside itself. In this case ll header is invisible outside of device, 98 but higher levels still should reserve dev->hard_header_len. 99 Some devices are enough clever to reallocate skb, when header 100 will not fit to reserved space (tunnel), another ones are silly 101 (PPP). 102 - packet socket receives packets with pulled ll header, 103 so that SOCK_RAW should push it back. 104 105 On receive: 106 ----------- 107 108 Incoming, dev->hard_header!=NULL 109 mac_header -> ll header 110 data -> data 111 112 Outgoing, dev->hard_header!=NULL 113 mac_header -> ll header 114 data -> ll header 115 116 Incoming, dev->hard_header==NULL 117 mac_header -> UNKNOWN position. It is very likely, that it points to ll 118 header. PPP makes it, that is wrong, because introduce 119 assymetry between rx and tx paths. 120 data -> data 121 122 Outgoing, dev->hard_header==NULL 123 mac_header -> data. ll header is still not built! 124 data -> data 125 126 Resume 127 If dev->hard_header==NULL we are unlikely to restore sensible ll header. 128 129 130 On transmit: 131 ------------ 132 133 dev->hard_header != NULL 134 mac_header -> ll header 135 data -> ll header 136 137 dev->hard_header == NULL (ll header is added by device, we cannot control it) 138 mac_header -> data 139 data -> data 140 141 We should set nh.raw on output to correct posistion, 142 packet classifier depends on it. 143 */ 144 145 /* Private packet socket structures. */ 146 147 /* identical to struct packet_mreq except it has 148 * a longer address field. 149 */ 150 struct packet_mreq_max { 151 int mr_ifindex; 152 unsigned short mr_type; 153 unsigned short mr_alen; 154 unsigned char mr_address[MAX_ADDR_LEN]; 155 }; 156 157 union tpacket_uhdr { 158 struct tpacket_hdr *h1; 159 struct tpacket2_hdr *h2; 160 struct tpacket3_hdr *h3; 161 void *raw; 162 }; 163 164 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 165 int closing, int tx_ring); 166 167 #define V3_ALIGNMENT (8) 168 169 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 170 171 #define BLK_PLUS_PRIV(sz_of_priv) \ 172 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 173 174 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 175 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 176 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 177 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 178 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 179 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 180 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) 181 182 struct packet_sock; 183 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 184 struct packet_type *pt, struct net_device *orig_dev); 185 186 static void *packet_previous_frame(struct packet_sock *po, 187 struct packet_ring_buffer *rb, 188 int status); 189 static void packet_increment_head(struct packet_ring_buffer *buff); 190 static int prb_curr_blk_in_use(struct tpacket_block_desc *); 191 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 192 struct packet_sock *); 193 static void prb_retire_current_block(struct tpacket_kbdq_core *, 194 struct packet_sock *, unsigned int status); 195 static int prb_queue_frozen(struct tpacket_kbdq_core *); 196 static void prb_open_block(struct tpacket_kbdq_core *, 197 struct tpacket_block_desc *); 198 static void prb_retire_rx_blk_timer_expired(struct timer_list *); 199 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); 200 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 201 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 202 struct tpacket3_hdr *); 203 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 204 struct tpacket3_hdr *); 205 static void packet_flush_mclist(struct sock *sk); 206 static u16 packet_pick_tx_queue(struct sk_buff *skb); 207 208 struct packet_skb_cb { 209 union { 210 struct sockaddr_pkt pkt; 211 union { 212 /* Trick: alias skb original length with 213 * ll.sll_family and ll.protocol in order 214 * to save room. 215 */ 216 unsigned int origlen; 217 struct sockaddr_ll ll; 218 }; 219 } sa; 220 }; 221 222 #define vio_le() virtio_legacy_is_little_endian() 223 224 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 225 226 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 227 #define GET_PBLOCK_DESC(x, bid) \ 228 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 229 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 230 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 231 #define GET_NEXT_PRB_BLK_NUM(x) \ 232 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 233 ((x)->kactive_blk_num+1) : 0) 234 235 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 236 static void __fanout_link(struct sock *sk, struct packet_sock *po); 237 238 static int packet_direct_xmit(struct sk_buff *skb) 239 { 240 return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); 241 } 242 243 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 244 { 245 struct net_device *dev; 246 247 rcu_read_lock(); 248 dev = rcu_dereference(po->cached_dev); 249 if (likely(dev)) 250 dev_hold(dev); 251 rcu_read_unlock(); 252 253 return dev; 254 } 255 256 static void packet_cached_dev_assign(struct packet_sock *po, 257 struct net_device *dev) 258 { 259 rcu_assign_pointer(po->cached_dev, dev); 260 } 261 262 static void packet_cached_dev_reset(struct packet_sock *po) 263 { 264 RCU_INIT_POINTER(po->cached_dev, NULL); 265 } 266 267 static bool packet_use_direct_xmit(const struct packet_sock *po) 268 { 269 return po->xmit == packet_direct_xmit; 270 } 271 272 static u16 packet_pick_tx_queue(struct sk_buff *skb) 273 { 274 struct net_device *dev = skb->dev; 275 const struct net_device_ops *ops = dev->netdev_ops; 276 int cpu = raw_smp_processor_id(); 277 u16 queue_index; 278 279 #ifdef CONFIG_XPS 280 skb->sender_cpu = cpu + 1; 281 #endif 282 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); 283 if (ops->ndo_select_queue) { 284 queue_index = ops->ndo_select_queue(dev, skb, NULL); 285 queue_index = netdev_cap_txqueue(dev, queue_index); 286 } else { 287 queue_index = netdev_pick_tx(dev, skb, NULL); 288 } 289 290 return queue_index; 291 } 292 293 /* __register_prot_hook must be invoked through register_prot_hook 294 * or from a context in which asynchronous accesses to the packet 295 * socket is not possible (packet_create()). 296 */ 297 static void __register_prot_hook(struct sock *sk) 298 { 299 struct packet_sock *po = pkt_sk(sk); 300 301 if (!po->running) { 302 if (po->fanout) 303 __fanout_link(sk, po); 304 else 305 dev_add_pack(&po->prot_hook); 306 307 sock_hold(sk); 308 po->running = 1; 309 } 310 } 311 312 static void register_prot_hook(struct sock *sk) 313 { 314 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); 315 __register_prot_hook(sk); 316 } 317 318 /* If the sync parameter is true, we will temporarily drop 319 * the po->bind_lock and do a synchronize_net to make sure no 320 * asynchronous packet processing paths still refer to the elements 321 * of po->prot_hook. If the sync parameter is false, it is the 322 * callers responsibility to take care of this. 323 */ 324 static void __unregister_prot_hook(struct sock *sk, bool sync) 325 { 326 struct packet_sock *po = pkt_sk(sk); 327 328 lockdep_assert_held_once(&po->bind_lock); 329 330 po->running = 0; 331 332 if (po->fanout) 333 __fanout_unlink(sk, po); 334 else 335 __dev_remove_pack(&po->prot_hook); 336 337 __sock_put(sk); 338 339 if (sync) { 340 spin_unlock(&po->bind_lock); 341 synchronize_net(); 342 spin_lock(&po->bind_lock); 343 } 344 } 345 346 static void unregister_prot_hook(struct sock *sk, bool sync) 347 { 348 struct packet_sock *po = pkt_sk(sk); 349 350 if (po->running) 351 __unregister_prot_hook(sk, sync); 352 } 353 354 static inline struct page * __pure pgv_to_page(void *addr) 355 { 356 if (is_vmalloc_addr(addr)) 357 return vmalloc_to_page(addr); 358 return virt_to_page(addr); 359 } 360 361 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 362 { 363 union tpacket_uhdr h; 364 365 h.raw = frame; 366 switch (po->tp_version) { 367 case TPACKET_V1: 368 h.h1->tp_status = status; 369 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 370 break; 371 case TPACKET_V2: 372 h.h2->tp_status = status; 373 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 374 break; 375 case TPACKET_V3: 376 h.h3->tp_status = status; 377 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 378 break; 379 default: 380 WARN(1, "TPACKET version not supported.\n"); 381 BUG(); 382 } 383 384 smp_wmb(); 385 } 386 387 static int __packet_get_status(const struct packet_sock *po, void *frame) 388 { 389 union tpacket_uhdr h; 390 391 smp_rmb(); 392 393 h.raw = frame; 394 switch (po->tp_version) { 395 case TPACKET_V1: 396 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 397 return h.h1->tp_status; 398 case TPACKET_V2: 399 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 400 return h.h2->tp_status; 401 case TPACKET_V3: 402 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 403 return h.h3->tp_status; 404 default: 405 WARN(1, "TPACKET version not supported.\n"); 406 BUG(); 407 return 0; 408 } 409 } 410 411 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts, 412 unsigned int flags) 413 { 414 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 415 416 if (shhwtstamps && 417 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 418 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts)) 419 return TP_STATUS_TS_RAW_HARDWARE; 420 421 if (ktime_to_timespec64_cond(skb->tstamp, ts)) 422 return TP_STATUS_TS_SOFTWARE; 423 424 return 0; 425 } 426 427 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 428 struct sk_buff *skb) 429 { 430 union tpacket_uhdr h; 431 struct timespec64 ts; 432 __u32 ts_status; 433 434 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 435 return 0; 436 437 h.raw = frame; 438 /* 439 * versions 1 through 3 overflow the timestamps in y2106, since they 440 * all store the seconds in a 32-bit unsigned integer. 441 * If we create a version 4, that should have a 64-bit timestamp, 442 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit 443 * nanoseconds. 444 */ 445 switch (po->tp_version) { 446 case TPACKET_V1: 447 h.h1->tp_sec = ts.tv_sec; 448 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 449 break; 450 case TPACKET_V2: 451 h.h2->tp_sec = ts.tv_sec; 452 h.h2->tp_nsec = ts.tv_nsec; 453 break; 454 case TPACKET_V3: 455 h.h3->tp_sec = ts.tv_sec; 456 h.h3->tp_nsec = ts.tv_nsec; 457 break; 458 default: 459 WARN(1, "TPACKET version not supported.\n"); 460 BUG(); 461 } 462 463 /* one flush is safe, as both fields always lie on the same cacheline */ 464 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 465 smp_wmb(); 466 467 return ts_status; 468 } 469 470 static void *packet_lookup_frame(const struct packet_sock *po, 471 const struct packet_ring_buffer *rb, 472 unsigned int position, 473 int status) 474 { 475 unsigned int pg_vec_pos, frame_offset; 476 union tpacket_uhdr h; 477 478 pg_vec_pos = position / rb->frames_per_block; 479 frame_offset = position % rb->frames_per_block; 480 481 h.raw = rb->pg_vec[pg_vec_pos].buffer + 482 (frame_offset * rb->frame_size); 483 484 if (status != __packet_get_status(po, h.raw)) 485 return NULL; 486 487 return h.raw; 488 } 489 490 static void *packet_current_frame(struct packet_sock *po, 491 struct packet_ring_buffer *rb, 492 int status) 493 { 494 return packet_lookup_frame(po, rb, rb->head, status); 495 } 496 497 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 498 { 499 del_timer_sync(&pkc->retire_blk_timer); 500 } 501 502 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 503 struct sk_buff_head *rb_queue) 504 { 505 struct tpacket_kbdq_core *pkc; 506 507 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 508 509 spin_lock_bh(&rb_queue->lock); 510 pkc->delete_blk_timer = 1; 511 spin_unlock_bh(&rb_queue->lock); 512 513 prb_del_retire_blk_timer(pkc); 514 } 515 516 static void prb_setup_retire_blk_timer(struct packet_sock *po) 517 { 518 struct tpacket_kbdq_core *pkc; 519 520 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 521 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired, 522 0); 523 pkc->retire_blk_timer.expires = jiffies; 524 } 525 526 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 527 int blk_size_in_bytes) 528 { 529 struct net_device *dev; 530 unsigned int mbits, div; 531 struct ethtool_link_ksettings ecmd; 532 int err; 533 534 rtnl_lock(); 535 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 536 if (unlikely(!dev)) { 537 rtnl_unlock(); 538 return DEFAULT_PRB_RETIRE_TOV; 539 } 540 err = __ethtool_get_link_ksettings(dev, &ecmd); 541 rtnl_unlock(); 542 if (err) 543 return DEFAULT_PRB_RETIRE_TOV; 544 545 /* If the link speed is so slow you don't really 546 * need to worry about perf anyways 547 */ 548 if (ecmd.base.speed < SPEED_1000 || 549 ecmd.base.speed == SPEED_UNKNOWN) 550 return DEFAULT_PRB_RETIRE_TOV; 551 552 div = ecmd.base.speed / 1000; 553 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 554 555 if (div) 556 mbits /= div; 557 558 if (div) 559 return mbits + 1; 560 return mbits; 561 } 562 563 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 564 union tpacket_req_u *req_u) 565 { 566 p1->feature_req_word = req_u->req3.tp_feature_req_word; 567 } 568 569 static void init_prb_bdqc(struct packet_sock *po, 570 struct packet_ring_buffer *rb, 571 struct pgv *pg_vec, 572 union tpacket_req_u *req_u) 573 { 574 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 575 struct tpacket_block_desc *pbd; 576 577 memset(p1, 0x0, sizeof(*p1)); 578 579 p1->knxt_seq_num = 1; 580 p1->pkbdq = pg_vec; 581 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 582 p1->pkblk_start = pg_vec[0].buffer; 583 p1->kblk_size = req_u->req3.tp_block_size; 584 p1->knum_blocks = req_u->req3.tp_block_nr; 585 p1->hdrlen = po->tp_hdrlen; 586 p1->version = po->tp_version; 587 p1->last_kactive_blk_num = 0; 588 po->stats.stats3.tp_freeze_q_cnt = 0; 589 if (req_u->req3.tp_retire_blk_tov) 590 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; 591 else 592 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, 593 req_u->req3.tp_block_size); 594 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); 595 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 596 rwlock_init(&p1->blk_fill_in_prog_lock); 597 598 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 599 prb_init_ft_ops(p1, req_u); 600 prb_setup_retire_blk_timer(po); 601 prb_open_block(p1, pbd); 602 } 603 604 /* Do NOT update the last_blk_num first. 605 * Assumes sk_buff_head lock is held. 606 */ 607 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) 608 { 609 mod_timer(&pkc->retire_blk_timer, 610 jiffies + pkc->tov_in_jiffies); 611 pkc->last_kactive_blk_num = pkc->kactive_blk_num; 612 } 613 614 /* 615 * Timer logic: 616 * 1) We refresh the timer only when we open a block. 617 * By doing this we don't waste cycles refreshing the timer 618 * on packet-by-packet basis. 619 * 620 * With a 1MB block-size, on a 1Gbps line, it will take 621 * i) ~8 ms to fill a block + ii) memcpy etc. 622 * In this cut we are not accounting for the memcpy time. 623 * 624 * So, if the user sets the 'tmo' to 10ms then the timer 625 * will never fire while the block is still getting filled 626 * (which is what we want). However, the user could choose 627 * to close a block early and that's fine. 628 * 629 * But when the timer does fire, we check whether or not to refresh it. 630 * Since the tmo granularity is in msecs, it is not too expensive 631 * to refresh the timer, lets say every '8' msecs. 632 * Either the user can set the 'tmo' or we can derive it based on 633 * a) line-speed and b) block-size. 634 * prb_calc_retire_blk_tmo() calculates the tmo. 635 * 636 */ 637 static void prb_retire_rx_blk_timer_expired(struct timer_list *t) 638 { 639 struct packet_sock *po = 640 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); 641 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 642 unsigned int frozen; 643 struct tpacket_block_desc *pbd; 644 645 spin_lock(&po->sk.sk_receive_queue.lock); 646 647 frozen = prb_queue_frozen(pkc); 648 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 649 650 if (unlikely(pkc->delete_blk_timer)) 651 goto out; 652 653 /* We only need to plug the race when the block is partially filled. 654 * tpacket_rcv: 655 * lock(); increment BLOCK_NUM_PKTS; unlock() 656 * copy_bits() is in progress ... 657 * timer fires on other cpu: 658 * we can't retire the current block because copy_bits 659 * is in progress. 660 * 661 */ 662 if (BLOCK_NUM_PKTS(pbd)) { 663 /* Waiting for skb_copy_bits to finish... */ 664 write_lock(&pkc->blk_fill_in_prog_lock); 665 write_unlock(&pkc->blk_fill_in_prog_lock); 666 } 667 668 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 669 if (!frozen) { 670 if (!BLOCK_NUM_PKTS(pbd)) { 671 /* An empty block. Just refresh the timer. */ 672 goto refresh_timer; 673 } 674 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 675 if (!prb_dispatch_next_block(pkc, po)) 676 goto refresh_timer; 677 else 678 goto out; 679 } else { 680 /* Case 1. Queue was frozen because user-space was 681 * lagging behind. 682 */ 683 if (prb_curr_blk_in_use(pbd)) { 684 /* 685 * Ok, user-space is still behind. 686 * So just refresh the timer. 687 */ 688 goto refresh_timer; 689 } else { 690 /* Case 2. queue was frozen,user-space caught up, 691 * now the link went idle && the timer fired. 692 * We don't have a block to close.So we open this 693 * block and restart the timer. 694 * opening a block thaws the queue,restarts timer 695 * Thawing/timer-refresh is a side effect. 696 */ 697 prb_open_block(pkc, pbd); 698 goto out; 699 } 700 } 701 } 702 703 refresh_timer: 704 _prb_refresh_rx_retire_blk_timer(pkc); 705 706 out: 707 spin_unlock(&po->sk.sk_receive_queue.lock); 708 } 709 710 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 711 struct tpacket_block_desc *pbd1, __u32 status) 712 { 713 /* Flush everything minus the block header */ 714 715 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 716 u8 *start, *end; 717 718 start = (u8 *)pbd1; 719 720 /* Skip the block header(we know header WILL fit in 4K) */ 721 start += PAGE_SIZE; 722 723 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 724 for (; start < end; start += PAGE_SIZE) 725 flush_dcache_page(pgv_to_page(start)); 726 727 smp_wmb(); 728 #endif 729 730 /* Now update the block status. */ 731 732 BLOCK_STATUS(pbd1) = status; 733 734 /* Flush the block header */ 735 736 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 737 start = (u8 *)pbd1; 738 flush_dcache_page(pgv_to_page(start)); 739 740 smp_wmb(); 741 #endif 742 } 743 744 /* 745 * Side effect: 746 * 747 * 1) flush the block 748 * 2) Increment active_blk_num 749 * 750 * Note:We DONT refresh the timer on purpose. 751 * Because almost always the next block will be opened. 752 */ 753 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 754 struct tpacket_block_desc *pbd1, 755 struct packet_sock *po, unsigned int stat) 756 { 757 __u32 status = TP_STATUS_USER | stat; 758 759 struct tpacket3_hdr *last_pkt; 760 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 761 struct sock *sk = &po->sk; 762 763 if (atomic_read(&po->tp_drops)) 764 status |= TP_STATUS_LOSING; 765 766 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 767 last_pkt->tp_next_offset = 0; 768 769 /* Get the ts of the last pkt */ 770 if (BLOCK_NUM_PKTS(pbd1)) { 771 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 772 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 773 } else { 774 /* Ok, we tmo'd - so get the current time. 775 * 776 * It shouldn't really happen as we don't close empty 777 * blocks. See prb_retire_rx_blk_timer_expired(). 778 */ 779 struct timespec64 ts; 780 ktime_get_real_ts64(&ts); 781 h1->ts_last_pkt.ts_sec = ts.tv_sec; 782 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 783 } 784 785 smp_wmb(); 786 787 /* Flush the block */ 788 prb_flush_block(pkc1, pbd1, status); 789 790 sk->sk_data_ready(sk); 791 792 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 793 } 794 795 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 796 { 797 pkc->reset_pending_on_curr_blk = 0; 798 } 799 800 /* 801 * Side effect of opening a block: 802 * 803 * 1) prb_queue is thawed. 804 * 2) retire_blk_timer is refreshed. 805 * 806 */ 807 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 808 struct tpacket_block_desc *pbd1) 809 { 810 struct timespec64 ts; 811 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 812 813 smp_rmb(); 814 815 /* We could have just memset this but we will lose the 816 * flexibility of making the priv area sticky 817 */ 818 819 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 820 BLOCK_NUM_PKTS(pbd1) = 0; 821 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 822 823 ktime_get_real_ts64(&ts); 824 825 h1->ts_first_pkt.ts_sec = ts.tv_sec; 826 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 827 828 pkc1->pkblk_start = (char *)pbd1; 829 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 830 831 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 832 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 833 834 pbd1->version = pkc1->version; 835 pkc1->prev = pkc1->nxt_offset; 836 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 837 838 prb_thaw_queue(pkc1); 839 _prb_refresh_rx_retire_blk_timer(pkc1); 840 841 smp_wmb(); 842 } 843 844 /* 845 * Queue freeze logic: 846 * 1) Assume tp_block_nr = 8 blocks. 847 * 2) At time 't0', user opens Rx ring. 848 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 849 * 4) user-space is either sleeping or processing block '0'. 850 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 851 * it will close block-7,loop around and try to fill block '0'. 852 * call-flow: 853 * __packet_lookup_frame_in_block 854 * prb_retire_current_block() 855 * prb_dispatch_next_block() 856 * |->(BLOCK_STATUS == USER) evaluates to true 857 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 858 * 6) Now there are two cases: 859 * 6.1) Link goes idle right after the queue is frozen. 860 * But remember, the last open_block() refreshed the timer. 861 * When this timer expires,it will refresh itself so that we can 862 * re-open block-0 in near future. 863 * 6.2) Link is busy and keeps on receiving packets. This is a simple 864 * case and __packet_lookup_frame_in_block will check if block-0 865 * is free and can now be re-used. 866 */ 867 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 868 struct packet_sock *po) 869 { 870 pkc->reset_pending_on_curr_blk = 1; 871 po->stats.stats3.tp_freeze_q_cnt++; 872 } 873 874 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 875 876 /* 877 * If the next block is free then we will dispatch it 878 * and return a good offset. 879 * Else, we will freeze the queue. 880 * So, caller must check the return value. 881 */ 882 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 883 struct packet_sock *po) 884 { 885 struct tpacket_block_desc *pbd; 886 887 smp_rmb(); 888 889 /* 1. Get current block num */ 890 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 891 892 /* 2. If this block is currently in_use then freeze the queue */ 893 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 894 prb_freeze_queue(pkc, po); 895 return NULL; 896 } 897 898 /* 899 * 3. 900 * open this block and return the offset where the first packet 901 * needs to get stored. 902 */ 903 prb_open_block(pkc, pbd); 904 return (void *)pkc->nxt_offset; 905 } 906 907 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 908 struct packet_sock *po, unsigned int status) 909 { 910 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 911 912 /* retire/close the current block */ 913 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 914 /* 915 * Plug the case where copy_bits() is in progress on 916 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 917 * have space to copy the pkt in the current block and 918 * called prb_retire_current_block() 919 * 920 * We don't need to worry about the TMO case because 921 * the timer-handler already handled this case. 922 */ 923 if (!(status & TP_STATUS_BLK_TMO)) { 924 /* Waiting for skb_copy_bits to finish... */ 925 write_lock(&pkc->blk_fill_in_prog_lock); 926 write_unlock(&pkc->blk_fill_in_prog_lock); 927 } 928 prb_close_block(pkc, pbd, po, status); 929 return; 930 } 931 } 932 933 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) 934 { 935 return TP_STATUS_USER & BLOCK_STATUS(pbd); 936 } 937 938 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 939 { 940 return pkc->reset_pending_on_curr_blk; 941 } 942 943 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 944 __releases(&pkc->blk_fill_in_prog_lock) 945 { 946 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 947 948 read_unlock(&pkc->blk_fill_in_prog_lock); 949 } 950 951 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 952 struct tpacket3_hdr *ppd) 953 { 954 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 955 } 956 957 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 958 struct tpacket3_hdr *ppd) 959 { 960 ppd->hv1.tp_rxhash = 0; 961 } 962 963 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 964 struct tpacket3_hdr *ppd) 965 { 966 if (skb_vlan_tag_present(pkc->skb)) { 967 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 968 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 969 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 970 } else { 971 ppd->hv1.tp_vlan_tci = 0; 972 ppd->hv1.tp_vlan_tpid = 0; 973 ppd->tp_status = TP_STATUS_AVAILABLE; 974 } 975 } 976 977 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 978 struct tpacket3_hdr *ppd) 979 { 980 ppd->hv1.tp_padding = 0; 981 prb_fill_vlan_info(pkc, ppd); 982 983 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 984 prb_fill_rxhash(pkc, ppd); 985 else 986 prb_clear_rxhash(pkc, ppd); 987 } 988 989 static void prb_fill_curr_block(char *curr, 990 struct tpacket_kbdq_core *pkc, 991 struct tpacket_block_desc *pbd, 992 unsigned int len) 993 __acquires(&pkc->blk_fill_in_prog_lock) 994 { 995 struct tpacket3_hdr *ppd; 996 997 ppd = (struct tpacket3_hdr *)curr; 998 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 999 pkc->prev = curr; 1000 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1001 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1002 BLOCK_NUM_PKTS(pbd) += 1; 1003 read_lock(&pkc->blk_fill_in_prog_lock); 1004 prb_run_all_ft_ops(pkc, ppd); 1005 } 1006 1007 /* Assumes caller has the sk->rx_queue.lock */ 1008 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1009 struct sk_buff *skb, 1010 unsigned int len 1011 ) 1012 { 1013 struct tpacket_kbdq_core *pkc; 1014 struct tpacket_block_desc *pbd; 1015 char *curr, *end; 1016 1017 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1018 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1019 1020 /* Queue is frozen when user space is lagging behind */ 1021 if (prb_queue_frozen(pkc)) { 1022 /* 1023 * Check if that last block which caused the queue to freeze, 1024 * is still in_use by user-space. 1025 */ 1026 if (prb_curr_blk_in_use(pbd)) { 1027 /* Can't record this packet */ 1028 return NULL; 1029 } else { 1030 /* 1031 * Ok, the block was released by user-space. 1032 * Now let's open that block. 1033 * opening a block also thaws the queue. 1034 * Thawing is a side effect. 1035 */ 1036 prb_open_block(pkc, pbd); 1037 } 1038 } 1039 1040 smp_mb(); 1041 curr = pkc->nxt_offset; 1042 pkc->skb = skb; 1043 end = (char *)pbd + pkc->kblk_size; 1044 1045 /* first try the current block */ 1046 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1047 prb_fill_curr_block(curr, pkc, pbd, len); 1048 return (void *)curr; 1049 } 1050 1051 /* Ok, close the current block */ 1052 prb_retire_current_block(pkc, po, 0); 1053 1054 /* Now, try to dispatch the next block */ 1055 curr = (char *)prb_dispatch_next_block(pkc, po); 1056 if (curr) { 1057 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1058 prb_fill_curr_block(curr, pkc, pbd, len); 1059 return (void *)curr; 1060 } 1061 1062 /* 1063 * No free blocks are available.user_space hasn't caught up yet. 1064 * Queue was just frozen and now this packet will get dropped. 1065 */ 1066 return NULL; 1067 } 1068 1069 static void *packet_current_rx_frame(struct packet_sock *po, 1070 struct sk_buff *skb, 1071 int status, unsigned int len) 1072 { 1073 char *curr = NULL; 1074 switch (po->tp_version) { 1075 case TPACKET_V1: 1076 case TPACKET_V2: 1077 curr = packet_lookup_frame(po, &po->rx_ring, 1078 po->rx_ring.head, status); 1079 return curr; 1080 case TPACKET_V3: 1081 return __packet_lookup_frame_in_block(po, skb, len); 1082 default: 1083 WARN(1, "TPACKET version not supported\n"); 1084 BUG(); 1085 return NULL; 1086 } 1087 } 1088 1089 static void *prb_lookup_block(const struct packet_sock *po, 1090 const struct packet_ring_buffer *rb, 1091 unsigned int idx, 1092 int status) 1093 { 1094 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1095 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1096 1097 if (status != BLOCK_STATUS(pbd)) 1098 return NULL; 1099 return pbd; 1100 } 1101 1102 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1103 { 1104 unsigned int prev; 1105 if (rb->prb_bdqc.kactive_blk_num) 1106 prev = rb->prb_bdqc.kactive_blk_num-1; 1107 else 1108 prev = rb->prb_bdqc.knum_blocks-1; 1109 return prev; 1110 } 1111 1112 /* Assumes caller has held the rx_queue.lock */ 1113 static void *__prb_previous_block(struct packet_sock *po, 1114 struct packet_ring_buffer *rb, 1115 int status) 1116 { 1117 unsigned int previous = prb_previous_blk_num(rb); 1118 return prb_lookup_block(po, rb, previous, status); 1119 } 1120 1121 static void *packet_previous_rx_frame(struct packet_sock *po, 1122 struct packet_ring_buffer *rb, 1123 int status) 1124 { 1125 if (po->tp_version <= TPACKET_V2) 1126 return packet_previous_frame(po, rb, status); 1127 1128 return __prb_previous_block(po, rb, status); 1129 } 1130 1131 static void packet_increment_rx_head(struct packet_sock *po, 1132 struct packet_ring_buffer *rb) 1133 { 1134 switch (po->tp_version) { 1135 case TPACKET_V1: 1136 case TPACKET_V2: 1137 return packet_increment_head(rb); 1138 case TPACKET_V3: 1139 default: 1140 WARN(1, "TPACKET version not supported.\n"); 1141 BUG(); 1142 return; 1143 } 1144 } 1145 1146 static void *packet_previous_frame(struct packet_sock *po, 1147 struct packet_ring_buffer *rb, 1148 int status) 1149 { 1150 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1151 return packet_lookup_frame(po, rb, previous, status); 1152 } 1153 1154 static void packet_increment_head(struct packet_ring_buffer *buff) 1155 { 1156 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1157 } 1158 1159 static void packet_inc_pending(struct packet_ring_buffer *rb) 1160 { 1161 this_cpu_inc(*rb->pending_refcnt); 1162 } 1163 1164 static void packet_dec_pending(struct packet_ring_buffer *rb) 1165 { 1166 this_cpu_dec(*rb->pending_refcnt); 1167 } 1168 1169 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1170 { 1171 unsigned int refcnt = 0; 1172 int cpu; 1173 1174 /* We don't use pending refcount in rx_ring. */ 1175 if (rb->pending_refcnt == NULL) 1176 return 0; 1177 1178 for_each_possible_cpu(cpu) 1179 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1180 1181 return refcnt; 1182 } 1183 1184 static int packet_alloc_pending(struct packet_sock *po) 1185 { 1186 po->rx_ring.pending_refcnt = NULL; 1187 1188 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1189 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1190 return -ENOBUFS; 1191 1192 return 0; 1193 } 1194 1195 static void packet_free_pending(struct packet_sock *po) 1196 { 1197 free_percpu(po->tx_ring.pending_refcnt); 1198 } 1199 1200 #define ROOM_POW_OFF 2 1201 #define ROOM_NONE 0x0 1202 #define ROOM_LOW 0x1 1203 #define ROOM_NORMAL 0x2 1204 1205 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off) 1206 { 1207 int idx, len; 1208 1209 len = READ_ONCE(po->rx_ring.frame_max) + 1; 1210 idx = READ_ONCE(po->rx_ring.head); 1211 if (pow_off) 1212 idx += len >> pow_off; 1213 if (idx >= len) 1214 idx -= len; 1215 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1216 } 1217 1218 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off) 1219 { 1220 int idx, len; 1221 1222 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); 1223 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); 1224 if (pow_off) 1225 idx += len >> pow_off; 1226 if (idx >= len) 1227 idx -= len; 1228 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1229 } 1230 1231 static int __packet_rcv_has_room(const struct packet_sock *po, 1232 const struct sk_buff *skb) 1233 { 1234 const struct sock *sk = &po->sk; 1235 int ret = ROOM_NONE; 1236 1237 if (po->prot_hook.func != tpacket_rcv) { 1238 int rcvbuf = READ_ONCE(sk->sk_rcvbuf); 1239 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc) 1240 - (skb ? skb->truesize : 0); 1241 1242 if (avail > (rcvbuf >> ROOM_POW_OFF)) 1243 return ROOM_NORMAL; 1244 else if (avail > 0) 1245 return ROOM_LOW; 1246 else 1247 return ROOM_NONE; 1248 } 1249 1250 if (po->tp_version == TPACKET_V3) { 1251 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) 1252 ret = ROOM_NORMAL; 1253 else if (__tpacket_v3_has_room(po, 0)) 1254 ret = ROOM_LOW; 1255 } else { 1256 if (__tpacket_has_room(po, ROOM_POW_OFF)) 1257 ret = ROOM_NORMAL; 1258 else if (__tpacket_has_room(po, 0)) 1259 ret = ROOM_LOW; 1260 } 1261 1262 return ret; 1263 } 1264 1265 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1266 { 1267 int pressure, ret; 1268 1269 ret = __packet_rcv_has_room(po, skb); 1270 pressure = ret != ROOM_NORMAL; 1271 1272 if (READ_ONCE(po->pressure) != pressure) 1273 WRITE_ONCE(po->pressure, pressure); 1274 1275 return ret; 1276 } 1277 1278 static void packet_rcv_try_clear_pressure(struct packet_sock *po) 1279 { 1280 if (READ_ONCE(po->pressure) && 1281 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 1282 WRITE_ONCE(po->pressure, 0); 1283 } 1284 1285 static void packet_sock_destruct(struct sock *sk) 1286 { 1287 skb_queue_purge(&sk->sk_error_queue); 1288 1289 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1290 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 1291 1292 if (!sock_flag(sk, SOCK_DEAD)) { 1293 pr_err("Attempt to release alive packet socket: %p\n", sk); 1294 return; 1295 } 1296 1297 sk_refcnt_debug_dec(sk); 1298 } 1299 1300 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) 1301 { 1302 u32 *history = po->rollover->history; 1303 u32 victim, rxhash; 1304 int i, count = 0; 1305 1306 rxhash = skb_get_hash(skb); 1307 for (i = 0; i < ROLLOVER_HLEN; i++) 1308 if (READ_ONCE(history[i]) == rxhash) 1309 count++; 1310 1311 victim = prandom_u32() % ROLLOVER_HLEN; 1312 1313 /* Avoid dirtying the cache line if possible */ 1314 if (READ_ONCE(history[victim]) != rxhash) 1315 WRITE_ONCE(history[victim], rxhash); 1316 1317 return count > (ROLLOVER_HLEN >> 1); 1318 } 1319 1320 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1321 struct sk_buff *skb, 1322 unsigned int num) 1323 { 1324 return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1325 } 1326 1327 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1328 struct sk_buff *skb, 1329 unsigned int num) 1330 { 1331 unsigned int val = atomic_inc_return(&f->rr_cur); 1332 1333 return val % num; 1334 } 1335 1336 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1337 struct sk_buff *skb, 1338 unsigned int num) 1339 { 1340 return smp_processor_id() % num; 1341 } 1342 1343 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1344 struct sk_buff *skb, 1345 unsigned int num) 1346 { 1347 return prandom_u32_max(num); 1348 } 1349 1350 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1351 struct sk_buff *skb, 1352 unsigned int idx, bool try_self, 1353 unsigned int num) 1354 { 1355 struct packet_sock *po, *po_next, *po_skip = NULL; 1356 unsigned int i, j, room = ROOM_NONE; 1357 1358 po = pkt_sk(f->arr[idx]); 1359 1360 if (try_self) { 1361 room = packet_rcv_has_room(po, skb); 1362 if (room == ROOM_NORMAL || 1363 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) 1364 return idx; 1365 po_skip = po; 1366 } 1367 1368 i = j = min_t(int, po->rollover->sock, num - 1); 1369 do { 1370 po_next = pkt_sk(f->arr[i]); 1371 if (po_next != po_skip && !READ_ONCE(po_next->pressure) && 1372 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { 1373 if (i != j) 1374 po->rollover->sock = i; 1375 atomic_long_inc(&po->rollover->num); 1376 if (room == ROOM_LOW) 1377 atomic_long_inc(&po->rollover->num_huge); 1378 return i; 1379 } 1380 1381 if (++i == num) 1382 i = 0; 1383 } while (i != j); 1384 1385 atomic_long_inc(&po->rollover->num_failed); 1386 return idx; 1387 } 1388 1389 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1390 struct sk_buff *skb, 1391 unsigned int num) 1392 { 1393 return skb_get_queue_mapping(skb) % num; 1394 } 1395 1396 static unsigned int fanout_demux_bpf(struct packet_fanout *f, 1397 struct sk_buff *skb, 1398 unsigned int num) 1399 { 1400 struct bpf_prog *prog; 1401 unsigned int ret = 0; 1402 1403 rcu_read_lock(); 1404 prog = rcu_dereference(f->bpf_prog); 1405 if (prog) 1406 ret = bpf_prog_run_clear_cb(prog, skb) % num; 1407 rcu_read_unlock(); 1408 1409 return ret; 1410 } 1411 1412 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1413 { 1414 return f->flags & (flag >> 8); 1415 } 1416 1417 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1418 struct packet_type *pt, struct net_device *orig_dev) 1419 { 1420 struct packet_fanout *f = pt->af_packet_priv; 1421 unsigned int num = READ_ONCE(f->num_members); 1422 struct net *net = read_pnet(&f->net); 1423 struct packet_sock *po; 1424 unsigned int idx; 1425 1426 if (!net_eq(dev_net(dev), net) || !num) { 1427 kfree_skb(skb); 1428 return 0; 1429 } 1430 1431 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1432 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); 1433 if (!skb) 1434 return 0; 1435 } 1436 switch (f->type) { 1437 case PACKET_FANOUT_HASH: 1438 default: 1439 idx = fanout_demux_hash(f, skb, num); 1440 break; 1441 case PACKET_FANOUT_LB: 1442 idx = fanout_demux_lb(f, skb, num); 1443 break; 1444 case PACKET_FANOUT_CPU: 1445 idx = fanout_demux_cpu(f, skb, num); 1446 break; 1447 case PACKET_FANOUT_RND: 1448 idx = fanout_demux_rnd(f, skb, num); 1449 break; 1450 case PACKET_FANOUT_QM: 1451 idx = fanout_demux_qm(f, skb, num); 1452 break; 1453 case PACKET_FANOUT_ROLLOVER: 1454 idx = fanout_demux_rollover(f, skb, 0, false, num); 1455 break; 1456 case PACKET_FANOUT_CBPF: 1457 case PACKET_FANOUT_EBPF: 1458 idx = fanout_demux_bpf(f, skb, num); 1459 break; 1460 } 1461 1462 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) 1463 idx = fanout_demux_rollover(f, skb, idx, true, num); 1464 1465 po = pkt_sk(f->arr[idx]); 1466 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1467 } 1468 1469 DEFINE_MUTEX(fanout_mutex); 1470 EXPORT_SYMBOL_GPL(fanout_mutex); 1471 static LIST_HEAD(fanout_list); 1472 static u16 fanout_next_id; 1473 1474 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1475 { 1476 struct packet_fanout *f = po->fanout; 1477 1478 spin_lock(&f->lock); 1479 f->arr[f->num_members] = sk; 1480 smp_wmb(); 1481 f->num_members++; 1482 if (f->num_members == 1) 1483 dev_add_pack(&f->prot_hook); 1484 spin_unlock(&f->lock); 1485 } 1486 1487 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1488 { 1489 struct packet_fanout *f = po->fanout; 1490 int i; 1491 1492 spin_lock(&f->lock); 1493 for (i = 0; i < f->num_members; i++) { 1494 if (f->arr[i] == sk) 1495 break; 1496 } 1497 BUG_ON(i >= f->num_members); 1498 f->arr[i] = f->arr[f->num_members - 1]; 1499 f->num_members--; 1500 if (f->num_members == 0) 1501 __dev_remove_pack(&f->prot_hook); 1502 spin_unlock(&f->lock); 1503 } 1504 1505 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1506 { 1507 if (sk->sk_family != PF_PACKET) 1508 return false; 1509 1510 return ptype->af_packet_priv == pkt_sk(sk)->fanout; 1511 } 1512 1513 static void fanout_init_data(struct packet_fanout *f) 1514 { 1515 switch (f->type) { 1516 case PACKET_FANOUT_LB: 1517 atomic_set(&f->rr_cur, 0); 1518 break; 1519 case PACKET_FANOUT_CBPF: 1520 case PACKET_FANOUT_EBPF: 1521 RCU_INIT_POINTER(f->bpf_prog, NULL); 1522 break; 1523 } 1524 } 1525 1526 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) 1527 { 1528 struct bpf_prog *old; 1529 1530 spin_lock(&f->lock); 1531 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); 1532 rcu_assign_pointer(f->bpf_prog, new); 1533 spin_unlock(&f->lock); 1534 1535 if (old) { 1536 synchronize_net(); 1537 bpf_prog_destroy(old); 1538 } 1539 } 1540 1541 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, 1542 unsigned int len) 1543 { 1544 struct bpf_prog *new; 1545 struct sock_fprog fprog; 1546 int ret; 1547 1548 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1549 return -EPERM; 1550 1551 ret = copy_bpf_fprog_from_user(&fprog, data, len); 1552 if (ret) 1553 return ret; 1554 1555 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); 1556 if (ret) 1557 return ret; 1558 1559 __fanout_set_data_bpf(po->fanout, new); 1560 return 0; 1561 } 1562 1563 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, 1564 unsigned int len) 1565 { 1566 struct bpf_prog *new; 1567 u32 fd; 1568 1569 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1570 return -EPERM; 1571 if (len != sizeof(fd)) 1572 return -EINVAL; 1573 if (copy_from_sockptr(&fd, data, len)) 1574 return -EFAULT; 1575 1576 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 1577 if (IS_ERR(new)) 1578 return PTR_ERR(new); 1579 1580 __fanout_set_data_bpf(po->fanout, new); 1581 return 0; 1582 } 1583 1584 static int fanout_set_data(struct packet_sock *po, sockptr_t data, 1585 unsigned int len) 1586 { 1587 switch (po->fanout->type) { 1588 case PACKET_FANOUT_CBPF: 1589 return fanout_set_data_cbpf(po, data, len); 1590 case PACKET_FANOUT_EBPF: 1591 return fanout_set_data_ebpf(po, data, len); 1592 default: 1593 return -EINVAL; 1594 } 1595 } 1596 1597 static void fanout_release_data(struct packet_fanout *f) 1598 { 1599 switch (f->type) { 1600 case PACKET_FANOUT_CBPF: 1601 case PACKET_FANOUT_EBPF: 1602 __fanout_set_data_bpf(f, NULL); 1603 } 1604 } 1605 1606 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) 1607 { 1608 struct packet_fanout *f; 1609 1610 list_for_each_entry(f, &fanout_list, list) { 1611 if (f->id == candidate_id && 1612 read_pnet(&f->net) == sock_net(sk)) { 1613 return false; 1614 } 1615 } 1616 return true; 1617 } 1618 1619 static bool fanout_find_new_id(struct sock *sk, u16 *new_id) 1620 { 1621 u16 id = fanout_next_id; 1622 1623 do { 1624 if (__fanout_id_is_free(sk, id)) { 1625 *new_id = id; 1626 fanout_next_id = id + 1; 1627 return true; 1628 } 1629 1630 id++; 1631 } while (id != fanout_next_id); 1632 1633 return false; 1634 } 1635 1636 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1637 { 1638 struct packet_rollover *rollover = NULL; 1639 struct packet_sock *po = pkt_sk(sk); 1640 struct packet_fanout *f, *match; 1641 u8 type = type_flags & 0xff; 1642 u8 flags = type_flags >> 8; 1643 int err; 1644 1645 switch (type) { 1646 case PACKET_FANOUT_ROLLOVER: 1647 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1648 return -EINVAL; 1649 case PACKET_FANOUT_HASH: 1650 case PACKET_FANOUT_LB: 1651 case PACKET_FANOUT_CPU: 1652 case PACKET_FANOUT_RND: 1653 case PACKET_FANOUT_QM: 1654 case PACKET_FANOUT_CBPF: 1655 case PACKET_FANOUT_EBPF: 1656 break; 1657 default: 1658 return -EINVAL; 1659 } 1660 1661 mutex_lock(&fanout_mutex); 1662 1663 err = -EALREADY; 1664 if (po->fanout) 1665 goto out; 1666 1667 if (type == PACKET_FANOUT_ROLLOVER || 1668 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1669 err = -ENOMEM; 1670 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); 1671 if (!rollover) 1672 goto out; 1673 atomic_long_set(&rollover->num, 0); 1674 atomic_long_set(&rollover->num_huge, 0); 1675 atomic_long_set(&rollover->num_failed, 0); 1676 } 1677 1678 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { 1679 if (id != 0) { 1680 err = -EINVAL; 1681 goto out; 1682 } 1683 if (!fanout_find_new_id(sk, &id)) { 1684 err = -ENOMEM; 1685 goto out; 1686 } 1687 /* ephemeral flag for the first socket in the group: drop it */ 1688 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); 1689 } 1690 1691 match = NULL; 1692 list_for_each_entry(f, &fanout_list, list) { 1693 if (f->id == id && 1694 read_pnet(&f->net) == sock_net(sk)) { 1695 match = f; 1696 break; 1697 } 1698 } 1699 err = -EINVAL; 1700 if (match && match->flags != flags) 1701 goto out; 1702 if (!match) { 1703 err = -ENOMEM; 1704 match = kzalloc(sizeof(*match), GFP_KERNEL); 1705 if (!match) 1706 goto out; 1707 write_pnet(&match->net, sock_net(sk)); 1708 match->id = id; 1709 match->type = type; 1710 match->flags = flags; 1711 INIT_LIST_HEAD(&match->list); 1712 spin_lock_init(&match->lock); 1713 refcount_set(&match->sk_ref, 0); 1714 fanout_init_data(match); 1715 match->prot_hook.type = po->prot_hook.type; 1716 match->prot_hook.dev = po->prot_hook.dev; 1717 match->prot_hook.func = packet_rcv_fanout; 1718 match->prot_hook.af_packet_priv = match; 1719 match->prot_hook.id_match = match_fanout_group; 1720 list_add(&match->list, &fanout_list); 1721 } 1722 err = -EINVAL; 1723 1724 spin_lock(&po->bind_lock); 1725 if (po->running && 1726 match->type == type && 1727 match->prot_hook.type == po->prot_hook.type && 1728 match->prot_hook.dev == po->prot_hook.dev) { 1729 err = -ENOSPC; 1730 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 1731 __dev_remove_pack(&po->prot_hook); 1732 po->fanout = match; 1733 po->rollover = rollover; 1734 rollover = NULL; 1735 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1736 __fanout_link(sk, po); 1737 err = 0; 1738 } 1739 } 1740 spin_unlock(&po->bind_lock); 1741 1742 if (err && !refcount_read(&match->sk_ref)) { 1743 list_del(&match->list); 1744 kfree(match); 1745 } 1746 1747 out: 1748 kfree(rollover); 1749 mutex_unlock(&fanout_mutex); 1750 return err; 1751 } 1752 1753 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes 1754 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. 1755 * It is the responsibility of the caller to call fanout_release_data() and 1756 * free the returned packet_fanout (after synchronize_net()) 1757 */ 1758 static struct packet_fanout *fanout_release(struct sock *sk) 1759 { 1760 struct packet_sock *po = pkt_sk(sk); 1761 struct packet_fanout *f; 1762 1763 mutex_lock(&fanout_mutex); 1764 f = po->fanout; 1765 if (f) { 1766 po->fanout = NULL; 1767 1768 if (refcount_dec_and_test(&f->sk_ref)) 1769 list_del(&f->list); 1770 else 1771 f = NULL; 1772 } 1773 mutex_unlock(&fanout_mutex); 1774 1775 return f; 1776 } 1777 1778 static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1779 struct sk_buff *skb) 1780 { 1781 /* Earlier code assumed this would be a VLAN pkt, double-check 1782 * this now that we have the actual packet in hand. We can only 1783 * do this check on Ethernet devices. 1784 */ 1785 if (unlikely(dev->type != ARPHRD_ETHER)) 1786 return false; 1787 1788 skb_reset_mac_header(skb); 1789 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); 1790 } 1791 1792 static const struct proto_ops packet_ops; 1793 1794 static const struct proto_ops packet_ops_spkt; 1795 1796 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1797 struct packet_type *pt, struct net_device *orig_dev) 1798 { 1799 struct sock *sk; 1800 struct sockaddr_pkt *spkt; 1801 1802 /* 1803 * When we registered the protocol we saved the socket in the data 1804 * field for just this event. 1805 */ 1806 1807 sk = pt->af_packet_priv; 1808 1809 /* 1810 * Yank back the headers [hope the device set this 1811 * right or kerboom...] 1812 * 1813 * Incoming packets have ll header pulled, 1814 * push it back. 1815 * 1816 * For outgoing ones skb->data == skb_mac_header(skb) 1817 * so that this procedure is noop. 1818 */ 1819 1820 if (skb->pkt_type == PACKET_LOOPBACK) 1821 goto out; 1822 1823 if (!net_eq(dev_net(dev), sock_net(sk))) 1824 goto out; 1825 1826 skb = skb_share_check(skb, GFP_ATOMIC); 1827 if (skb == NULL) 1828 goto oom; 1829 1830 /* drop any routing info */ 1831 skb_dst_drop(skb); 1832 1833 /* drop conntrack reference */ 1834 nf_reset_ct(skb); 1835 1836 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1837 1838 skb_push(skb, skb->data - skb_mac_header(skb)); 1839 1840 /* 1841 * The SOCK_PACKET socket receives _all_ frames. 1842 */ 1843 1844 spkt->spkt_family = dev->type; 1845 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1846 spkt->spkt_protocol = skb->protocol; 1847 1848 /* 1849 * Charge the memory to the socket. This is done specifically 1850 * to prevent sockets using all the memory up. 1851 */ 1852 1853 if (sock_queue_rcv_skb(sk, skb) == 0) 1854 return 0; 1855 1856 out: 1857 kfree_skb(skb); 1858 oom: 1859 return 0; 1860 } 1861 1862 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) 1863 { 1864 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && 1865 sock->type == SOCK_RAW) { 1866 skb_reset_mac_header(skb); 1867 skb->protocol = dev_parse_header_protocol(skb); 1868 } 1869 1870 skb_probe_transport_header(skb); 1871 } 1872 1873 /* 1874 * Output a raw packet to a device layer. This bypasses all the other 1875 * protocol layers and you must therefore supply it with a complete frame 1876 */ 1877 1878 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, 1879 size_t len) 1880 { 1881 struct sock *sk = sock->sk; 1882 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1883 struct sk_buff *skb = NULL; 1884 struct net_device *dev; 1885 struct sockcm_cookie sockc; 1886 __be16 proto = 0; 1887 int err; 1888 int extra_len = 0; 1889 1890 /* 1891 * Get and verify the address. 1892 */ 1893 1894 if (saddr) { 1895 if (msg->msg_namelen < sizeof(struct sockaddr)) 1896 return -EINVAL; 1897 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1898 proto = saddr->spkt_protocol; 1899 } else 1900 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1901 1902 /* 1903 * Find the device first to size check it 1904 */ 1905 1906 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1907 retry: 1908 rcu_read_lock(); 1909 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1910 err = -ENODEV; 1911 if (dev == NULL) 1912 goto out_unlock; 1913 1914 err = -ENETDOWN; 1915 if (!(dev->flags & IFF_UP)) 1916 goto out_unlock; 1917 1918 /* 1919 * You may not queue a frame bigger than the mtu. This is the lowest level 1920 * raw protocol and you must do your own fragmentation at this level. 1921 */ 1922 1923 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1924 if (!netif_supports_nofcs(dev)) { 1925 err = -EPROTONOSUPPORT; 1926 goto out_unlock; 1927 } 1928 extra_len = 4; /* We're doing our own CRC */ 1929 } 1930 1931 err = -EMSGSIZE; 1932 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 1933 goto out_unlock; 1934 1935 if (!skb) { 1936 size_t reserved = LL_RESERVED_SPACE(dev); 1937 int tlen = dev->needed_tailroom; 1938 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 1939 1940 rcu_read_unlock(); 1941 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 1942 if (skb == NULL) 1943 return -ENOBUFS; 1944 /* FIXME: Save some space for broken drivers that write a hard 1945 * header at transmission time by themselves. PPP is the notable 1946 * one here. This should really be fixed at the driver level. 1947 */ 1948 skb_reserve(skb, reserved); 1949 skb_reset_network_header(skb); 1950 1951 /* Try to align data part correctly */ 1952 if (hhlen) { 1953 skb->data -= hhlen; 1954 skb->tail -= hhlen; 1955 if (len < hhlen) 1956 skb_reset_network_header(skb); 1957 } 1958 err = memcpy_from_msg(skb_put(skb, len), msg, len); 1959 if (err) 1960 goto out_free; 1961 goto retry; 1962 } 1963 1964 if (!dev_validate_header(dev, skb->data, len)) { 1965 err = -EINVAL; 1966 goto out_unlock; 1967 } 1968 if (len > (dev->mtu + dev->hard_header_len + extra_len) && 1969 !packet_extra_vlan_len_allowed(dev, skb)) { 1970 err = -EMSGSIZE; 1971 goto out_unlock; 1972 } 1973 1974 sockcm_init(&sockc, sk); 1975 if (msg->msg_controllen) { 1976 err = sock_cmsg_send(sk, msg, &sockc); 1977 if (unlikely(err)) 1978 goto out_unlock; 1979 } 1980 1981 skb->protocol = proto; 1982 skb->dev = dev; 1983 skb->priority = sk->sk_priority; 1984 skb->mark = sk->sk_mark; 1985 skb->tstamp = sockc.transmit_time; 1986 1987 skb_setup_tx_timestamp(skb, sockc.tsflags); 1988 1989 if (unlikely(extra_len == 4)) 1990 skb->no_fcs = 1; 1991 1992 packet_parse_headers(skb, sock); 1993 1994 dev_queue_xmit(skb); 1995 rcu_read_unlock(); 1996 return len; 1997 1998 out_unlock: 1999 rcu_read_unlock(); 2000 out_free: 2001 kfree_skb(skb); 2002 return err; 2003 } 2004 2005 static unsigned int run_filter(struct sk_buff *skb, 2006 const struct sock *sk, 2007 unsigned int res) 2008 { 2009 struct sk_filter *filter; 2010 2011 rcu_read_lock(); 2012 filter = rcu_dereference(sk->sk_filter); 2013 if (filter != NULL) 2014 res = bpf_prog_run_clear_cb(filter->prog, skb); 2015 rcu_read_unlock(); 2016 2017 return res; 2018 } 2019 2020 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, 2021 size_t *len) 2022 { 2023 struct virtio_net_hdr vnet_hdr; 2024 2025 if (*len < sizeof(vnet_hdr)) 2026 return -EINVAL; 2027 *len -= sizeof(vnet_hdr); 2028 2029 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0)) 2030 return -EINVAL; 2031 2032 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); 2033 } 2034 2035 /* 2036 * This function makes lazy skb cloning in hope that most of packets 2037 * are discarded by BPF. 2038 * 2039 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 2040 * and skb->cb are mangled. It works because (and until) packets 2041 * falling here are owned by current CPU. Output packets are cloned 2042 * by dev_queue_xmit_nit(), input packets are processed by net_bh 2043 * sequencially, so that if we return skb to original state on exit, 2044 * we will not harm anyone. 2045 */ 2046 2047 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 2048 struct packet_type *pt, struct net_device *orig_dev) 2049 { 2050 struct sock *sk; 2051 struct sockaddr_ll *sll; 2052 struct packet_sock *po; 2053 u8 *skb_head = skb->data; 2054 int skb_len = skb->len; 2055 unsigned int snaplen, res; 2056 bool is_drop_n_account = false; 2057 2058 if (skb->pkt_type == PACKET_LOOPBACK) 2059 goto drop; 2060 2061 sk = pt->af_packet_priv; 2062 po = pkt_sk(sk); 2063 2064 if (!net_eq(dev_net(dev), sock_net(sk))) 2065 goto drop; 2066 2067 skb->dev = dev; 2068 2069 if (dev->header_ops) { 2070 /* The device has an explicit notion of ll header, 2071 * exported to higher levels. 2072 * 2073 * Otherwise, the device hides details of its frame 2074 * structure, so that corresponding packet head is 2075 * never delivered to user. 2076 */ 2077 if (sk->sk_type != SOCK_DGRAM) 2078 skb_push(skb, skb->data - skb_mac_header(skb)); 2079 else if (skb->pkt_type == PACKET_OUTGOING) { 2080 /* Special case: outgoing packets have ll header at head */ 2081 skb_pull(skb, skb_network_offset(skb)); 2082 } 2083 } 2084 2085 snaplen = skb->len; 2086 2087 res = run_filter(skb, sk, snaplen); 2088 if (!res) 2089 goto drop_n_restore; 2090 if (snaplen > res) 2091 snaplen = res; 2092 2093 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2094 goto drop_n_acct; 2095 2096 if (skb_shared(skb)) { 2097 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2098 if (nskb == NULL) 2099 goto drop_n_acct; 2100 2101 if (skb_head != skb->data) { 2102 skb->data = skb_head; 2103 skb->len = skb_len; 2104 } 2105 consume_skb(skb); 2106 skb = nskb; 2107 } 2108 2109 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); 2110 2111 sll = &PACKET_SKB_CB(skb)->sa.ll; 2112 sll->sll_hatype = dev->type; 2113 sll->sll_pkttype = skb->pkt_type; 2114 if (unlikely(po->origdev)) 2115 sll->sll_ifindex = orig_dev->ifindex; 2116 else 2117 sll->sll_ifindex = dev->ifindex; 2118 2119 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2120 2121 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). 2122 * Use their space for storing the original skb length. 2123 */ 2124 PACKET_SKB_CB(skb)->sa.origlen = skb->len; 2125 2126 if (pskb_trim(skb, snaplen)) 2127 goto drop_n_acct; 2128 2129 skb_set_owner_r(skb, sk); 2130 skb->dev = NULL; 2131 skb_dst_drop(skb); 2132 2133 /* drop conntrack reference */ 2134 nf_reset_ct(skb); 2135 2136 spin_lock(&sk->sk_receive_queue.lock); 2137 po->stats.stats1.tp_packets++; 2138 sock_skb_set_dropcount(sk, skb); 2139 __skb_queue_tail(&sk->sk_receive_queue, skb); 2140 spin_unlock(&sk->sk_receive_queue.lock); 2141 sk->sk_data_ready(sk); 2142 return 0; 2143 2144 drop_n_acct: 2145 is_drop_n_account = true; 2146 atomic_inc(&po->tp_drops); 2147 atomic_inc(&sk->sk_drops); 2148 2149 drop_n_restore: 2150 if (skb_head != skb->data && skb_shared(skb)) { 2151 skb->data = skb_head; 2152 skb->len = skb_len; 2153 } 2154 drop: 2155 if (!is_drop_n_account) 2156 consume_skb(skb); 2157 else 2158 kfree_skb(skb); 2159 return 0; 2160 } 2161 2162 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 2163 struct packet_type *pt, struct net_device *orig_dev) 2164 { 2165 struct sock *sk; 2166 struct packet_sock *po; 2167 struct sockaddr_ll *sll; 2168 union tpacket_uhdr h; 2169 u8 *skb_head = skb->data; 2170 int skb_len = skb->len; 2171 unsigned int snaplen, res; 2172 unsigned long status = TP_STATUS_USER; 2173 unsigned short macoff, netoff, hdrlen; 2174 struct sk_buff *copy_skb = NULL; 2175 struct timespec64 ts; 2176 __u32 ts_status; 2177 bool is_drop_n_account = false; 2178 unsigned int slot_id = 0; 2179 bool do_vnet = false; 2180 2181 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2182 * We may add members to them until current aligned size without forcing 2183 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 2184 */ 2185 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 2186 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 2187 2188 if (skb->pkt_type == PACKET_LOOPBACK) 2189 goto drop; 2190 2191 sk = pt->af_packet_priv; 2192 po = pkt_sk(sk); 2193 2194 if (!net_eq(dev_net(dev), sock_net(sk))) 2195 goto drop; 2196 2197 if (dev->header_ops) { 2198 if (sk->sk_type != SOCK_DGRAM) 2199 skb_push(skb, skb->data - skb_mac_header(skb)); 2200 else if (skb->pkt_type == PACKET_OUTGOING) { 2201 /* Special case: outgoing packets have ll header at head */ 2202 skb_pull(skb, skb_network_offset(skb)); 2203 } 2204 } 2205 2206 snaplen = skb->len; 2207 2208 res = run_filter(skb, sk, snaplen); 2209 if (!res) 2210 goto drop_n_restore; 2211 2212 /* If we are flooded, just give up */ 2213 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) { 2214 atomic_inc(&po->tp_drops); 2215 goto drop_n_restore; 2216 } 2217 2218 if (skb->ip_summed == CHECKSUM_PARTIAL) 2219 status |= TP_STATUS_CSUMNOTREADY; 2220 else if (skb->pkt_type != PACKET_OUTGOING && 2221 (skb->ip_summed == CHECKSUM_COMPLETE || 2222 skb_csum_unnecessary(skb))) 2223 status |= TP_STATUS_CSUM_VALID; 2224 2225 if (snaplen > res) 2226 snaplen = res; 2227 2228 if (sk->sk_type == SOCK_DGRAM) { 2229 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 2230 po->tp_reserve; 2231 } else { 2232 unsigned int maclen = skb_network_offset(skb); 2233 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2234 (maclen < 16 ? 16 : maclen)) + 2235 po->tp_reserve; 2236 if (po->has_vnet_hdr) { 2237 netoff += sizeof(struct virtio_net_hdr); 2238 do_vnet = true; 2239 } 2240 macoff = netoff - maclen; 2241 } 2242 if (po->tp_version <= TPACKET_V2) { 2243 if (macoff + snaplen > po->rx_ring.frame_size) { 2244 if (po->copy_thresh && 2245 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 2246 if (skb_shared(skb)) { 2247 copy_skb = skb_clone(skb, GFP_ATOMIC); 2248 } else { 2249 copy_skb = skb_get(skb); 2250 skb_head = skb->data; 2251 } 2252 if (copy_skb) 2253 skb_set_owner_r(copy_skb, sk); 2254 } 2255 snaplen = po->rx_ring.frame_size - macoff; 2256 if ((int)snaplen < 0) { 2257 snaplen = 0; 2258 do_vnet = false; 2259 } 2260 } 2261 } else if (unlikely(macoff + snaplen > 2262 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2263 u32 nval; 2264 2265 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 2266 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 2267 snaplen, nval, macoff); 2268 snaplen = nval; 2269 if (unlikely((int)snaplen < 0)) { 2270 snaplen = 0; 2271 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2272 do_vnet = false; 2273 } 2274 } 2275 spin_lock(&sk->sk_receive_queue.lock); 2276 h.raw = packet_current_rx_frame(po, skb, 2277 TP_STATUS_KERNEL, (macoff+snaplen)); 2278 if (!h.raw) 2279 goto drop_n_account; 2280 2281 if (po->tp_version <= TPACKET_V2) { 2282 slot_id = po->rx_ring.head; 2283 if (test_bit(slot_id, po->rx_ring.rx_owner_map)) 2284 goto drop_n_account; 2285 __set_bit(slot_id, po->rx_ring.rx_owner_map); 2286 } 2287 2288 if (do_vnet && 2289 virtio_net_hdr_from_skb(skb, h.raw + macoff - 2290 sizeof(struct virtio_net_hdr), 2291 vio_le(), true, 0)) { 2292 if (po->tp_version == TPACKET_V3) 2293 prb_clear_blk_fill_status(&po->rx_ring); 2294 goto drop_n_account; 2295 } 2296 2297 if (po->tp_version <= TPACKET_V2) { 2298 packet_increment_rx_head(po, &po->rx_ring); 2299 /* 2300 * LOSING will be reported till you read the stats, 2301 * because it's COR - Clear On Read. 2302 * Anyways, moving it for V1/V2 only as V3 doesn't need this 2303 * at packet level. 2304 */ 2305 if (atomic_read(&po->tp_drops)) 2306 status |= TP_STATUS_LOSING; 2307 } 2308 2309 po->stats.stats1.tp_packets++; 2310 if (copy_skb) { 2311 status |= TP_STATUS_COPY; 2312 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 2313 } 2314 spin_unlock(&sk->sk_receive_queue.lock); 2315 2316 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 2317 2318 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 2319 ktime_get_real_ts64(&ts); 2320 2321 status |= ts_status; 2322 2323 switch (po->tp_version) { 2324 case TPACKET_V1: 2325 h.h1->tp_len = skb->len; 2326 h.h1->tp_snaplen = snaplen; 2327 h.h1->tp_mac = macoff; 2328 h.h1->tp_net = netoff; 2329 h.h1->tp_sec = ts.tv_sec; 2330 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 2331 hdrlen = sizeof(*h.h1); 2332 break; 2333 case TPACKET_V2: 2334 h.h2->tp_len = skb->len; 2335 h.h2->tp_snaplen = snaplen; 2336 h.h2->tp_mac = macoff; 2337 h.h2->tp_net = netoff; 2338 h.h2->tp_sec = ts.tv_sec; 2339 h.h2->tp_nsec = ts.tv_nsec; 2340 if (skb_vlan_tag_present(skb)) { 2341 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2342 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2343 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2344 } else { 2345 h.h2->tp_vlan_tci = 0; 2346 h.h2->tp_vlan_tpid = 0; 2347 } 2348 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2349 hdrlen = sizeof(*h.h2); 2350 break; 2351 case TPACKET_V3: 2352 /* tp_nxt_offset,vlan are already populated above. 2353 * So DONT clear those fields here 2354 */ 2355 h.h3->tp_status |= status; 2356 h.h3->tp_len = skb->len; 2357 h.h3->tp_snaplen = snaplen; 2358 h.h3->tp_mac = macoff; 2359 h.h3->tp_net = netoff; 2360 h.h3->tp_sec = ts.tv_sec; 2361 h.h3->tp_nsec = ts.tv_nsec; 2362 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2363 hdrlen = sizeof(*h.h3); 2364 break; 2365 default: 2366 BUG(); 2367 } 2368 2369 sll = h.raw + TPACKET_ALIGN(hdrlen); 2370 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2371 sll->sll_family = AF_PACKET; 2372 sll->sll_hatype = dev->type; 2373 sll->sll_protocol = skb->protocol; 2374 sll->sll_pkttype = skb->pkt_type; 2375 if (unlikely(po->origdev)) 2376 sll->sll_ifindex = orig_dev->ifindex; 2377 else 2378 sll->sll_ifindex = dev->ifindex; 2379 2380 smp_mb(); 2381 2382 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2383 if (po->tp_version <= TPACKET_V2) { 2384 u8 *start, *end; 2385 2386 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2387 macoff + snaplen); 2388 2389 for (start = h.raw; start < end; start += PAGE_SIZE) 2390 flush_dcache_page(pgv_to_page(start)); 2391 } 2392 smp_wmb(); 2393 #endif 2394 2395 if (po->tp_version <= TPACKET_V2) { 2396 spin_lock(&sk->sk_receive_queue.lock); 2397 __packet_set_status(po, h.raw, status); 2398 __clear_bit(slot_id, po->rx_ring.rx_owner_map); 2399 spin_unlock(&sk->sk_receive_queue.lock); 2400 sk->sk_data_ready(sk); 2401 } else if (po->tp_version == TPACKET_V3) { 2402 prb_clear_blk_fill_status(&po->rx_ring); 2403 } 2404 2405 drop_n_restore: 2406 if (skb_head != skb->data && skb_shared(skb)) { 2407 skb->data = skb_head; 2408 skb->len = skb_len; 2409 } 2410 drop: 2411 if (!is_drop_n_account) 2412 consume_skb(skb); 2413 else 2414 kfree_skb(skb); 2415 return 0; 2416 2417 drop_n_account: 2418 spin_unlock(&sk->sk_receive_queue.lock); 2419 atomic_inc(&po->tp_drops); 2420 is_drop_n_account = true; 2421 2422 sk->sk_data_ready(sk); 2423 kfree_skb(copy_skb); 2424 goto drop_n_restore; 2425 } 2426 2427 static void tpacket_destruct_skb(struct sk_buff *skb) 2428 { 2429 struct packet_sock *po = pkt_sk(skb->sk); 2430 2431 if (likely(po->tx_ring.pg_vec)) { 2432 void *ph; 2433 __u32 ts; 2434 2435 ph = skb_zcopy_get_nouarg(skb); 2436 packet_dec_pending(&po->tx_ring); 2437 2438 ts = __packet_set_timestamp(po, ph, skb); 2439 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2440 2441 if (!packet_read_pending(&po->tx_ring)) 2442 complete(&po->skb_completion); 2443 } 2444 2445 sock_wfree(skb); 2446 } 2447 2448 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) 2449 { 2450 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2451 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2452 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > 2453 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) 2454 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), 2455 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2456 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); 2457 2458 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) 2459 return -EINVAL; 2460 2461 return 0; 2462 } 2463 2464 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, 2465 struct virtio_net_hdr *vnet_hdr) 2466 { 2467 if (*len < sizeof(*vnet_hdr)) 2468 return -EINVAL; 2469 *len -= sizeof(*vnet_hdr); 2470 2471 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) 2472 return -EFAULT; 2473 2474 return __packet_snd_vnet_parse(vnet_hdr, *len); 2475 } 2476 2477 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2478 void *frame, struct net_device *dev, void *data, int tp_len, 2479 __be16 proto, unsigned char *addr, int hlen, int copylen, 2480 const struct sockcm_cookie *sockc) 2481 { 2482 union tpacket_uhdr ph; 2483 int to_write, offset, len, nr_frags, len_max; 2484 struct socket *sock = po->sk.sk_socket; 2485 struct page *page; 2486 int err; 2487 2488 ph.raw = frame; 2489 2490 skb->protocol = proto; 2491 skb->dev = dev; 2492 skb->priority = po->sk.sk_priority; 2493 skb->mark = po->sk.sk_mark; 2494 skb->tstamp = sockc->transmit_time; 2495 skb_setup_tx_timestamp(skb, sockc->tsflags); 2496 skb_zcopy_set_nouarg(skb, ph.raw); 2497 2498 skb_reserve(skb, hlen); 2499 skb_reset_network_header(skb); 2500 2501 to_write = tp_len; 2502 2503 if (sock->type == SOCK_DGRAM) { 2504 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2505 NULL, tp_len); 2506 if (unlikely(err < 0)) 2507 return -EINVAL; 2508 } else if (copylen) { 2509 int hdrlen = min_t(int, copylen, tp_len); 2510 2511 skb_push(skb, dev->hard_header_len); 2512 skb_put(skb, copylen - dev->hard_header_len); 2513 err = skb_store_bits(skb, 0, data, hdrlen); 2514 if (unlikely(err)) 2515 return err; 2516 if (!dev_validate_header(dev, skb->data, hdrlen)) 2517 return -EINVAL; 2518 2519 data += hdrlen; 2520 to_write -= hdrlen; 2521 } 2522 2523 offset = offset_in_page(data); 2524 len_max = PAGE_SIZE - offset; 2525 len = ((to_write > len_max) ? len_max : to_write); 2526 2527 skb->data_len = to_write; 2528 skb->len += to_write; 2529 skb->truesize += to_write; 2530 refcount_add(to_write, &po->sk.sk_wmem_alloc); 2531 2532 while (likely(to_write)) { 2533 nr_frags = skb_shinfo(skb)->nr_frags; 2534 2535 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2536 pr_err("Packet exceed the number of skb frags(%lu)\n", 2537 MAX_SKB_FRAGS); 2538 return -EFAULT; 2539 } 2540 2541 page = pgv_to_page(data); 2542 data += len; 2543 flush_dcache_page(page); 2544 get_page(page); 2545 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2546 to_write -= len; 2547 offset = 0; 2548 len_max = PAGE_SIZE; 2549 len = ((to_write > len_max) ? len_max : to_write); 2550 } 2551 2552 packet_parse_headers(skb, sock); 2553 2554 return tp_len; 2555 } 2556 2557 static int tpacket_parse_header(struct packet_sock *po, void *frame, 2558 int size_max, void **data) 2559 { 2560 union tpacket_uhdr ph; 2561 int tp_len, off; 2562 2563 ph.raw = frame; 2564 2565 switch (po->tp_version) { 2566 case TPACKET_V3: 2567 if (ph.h3->tp_next_offset != 0) { 2568 pr_warn_once("variable sized slot not supported"); 2569 return -EINVAL; 2570 } 2571 tp_len = ph.h3->tp_len; 2572 break; 2573 case TPACKET_V2: 2574 tp_len = ph.h2->tp_len; 2575 break; 2576 default: 2577 tp_len = ph.h1->tp_len; 2578 break; 2579 } 2580 if (unlikely(tp_len > size_max)) { 2581 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2582 return -EMSGSIZE; 2583 } 2584 2585 if (unlikely(po->tp_tx_has_off)) { 2586 int off_min, off_max; 2587 2588 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2589 off_max = po->tx_ring.frame_size - tp_len; 2590 if (po->sk.sk_type == SOCK_DGRAM) { 2591 switch (po->tp_version) { 2592 case TPACKET_V3: 2593 off = ph.h3->tp_net; 2594 break; 2595 case TPACKET_V2: 2596 off = ph.h2->tp_net; 2597 break; 2598 default: 2599 off = ph.h1->tp_net; 2600 break; 2601 } 2602 } else { 2603 switch (po->tp_version) { 2604 case TPACKET_V3: 2605 off = ph.h3->tp_mac; 2606 break; 2607 case TPACKET_V2: 2608 off = ph.h2->tp_mac; 2609 break; 2610 default: 2611 off = ph.h1->tp_mac; 2612 break; 2613 } 2614 } 2615 if (unlikely((off < off_min) || (off_max < off))) 2616 return -EINVAL; 2617 } else { 2618 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2619 } 2620 2621 *data = frame + off; 2622 return tp_len; 2623 } 2624 2625 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2626 { 2627 struct sk_buff *skb = NULL; 2628 struct net_device *dev; 2629 struct virtio_net_hdr *vnet_hdr = NULL; 2630 struct sockcm_cookie sockc; 2631 __be16 proto; 2632 int err, reserve = 0; 2633 void *ph; 2634 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2635 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2636 unsigned char *addr = NULL; 2637 int tp_len, size_max; 2638 void *data; 2639 int len_sum = 0; 2640 int status = TP_STATUS_AVAILABLE; 2641 int hlen, tlen, copylen = 0; 2642 long timeo = 0; 2643 2644 mutex_lock(&po->pg_vec_lock); 2645 2646 /* packet_sendmsg() check on tx_ring.pg_vec was lockless, 2647 * we need to confirm it under protection of pg_vec_lock. 2648 */ 2649 if (unlikely(!po->tx_ring.pg_vec)) { 2650 err = -EBUSY; 2651 goto out; 2652 } 2653 if (likely(saddr == NULL)) { 2654 dev = packet_cached_dev_get(po); 2655 proto = po->num; 2656 } else { 2657 err = -EINVAL; 2658 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2659 goto out; 2660 if (msg->msg_namelen < (saddr->sll_halen 2661 + offsetof(struct sockaddr_ll, 2662 sll_addr))) 2663 goto out; 2664 proto = saddr->sll_protocol; 2665 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2666 if (po->sk.sk_socket->type == SOCK_DGRAM) { 2667 if (dev && msg->msg_namelen < dev->addr_len + 2668 offsetof(struct sockaddr_ll, sll_addr)) 2669 goto out_put; 2670 addr = saddr->sll_addr; 2671 } 2672 } 2673 2674 err = -ENXIO; 2675 if (unlikely(dev == NULL)) 2676 goto out; 2677 err = -ENETDOWN; 2678 if (unlikely(!(dev->flags & IFF_UP))) 2679 goto out_put; 2680 2681 sockcm_init(&sockc, &po->sk); 2682 if (msg->msg_controllen) { 2683 err = sock_cmsg_send(&po->sk, msg, &sockc); 2684 if (unlikely(err)) 2685 goto out_put; 2686 } 2687 2688 if (po->sk.sk_socket->type == SOCK_RAW) 2689 reserve = dev->hard_header_len; 2690 size_max = po->tx_ring.frame_size 2691 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2692 2693 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) 2694 size_max = dev->mtu + reserve + VLAN_HLEN; 2695 2696 reinit_completion(&po->skb_completion); 2697 2698 do { 2699 ph = packet_current_frame(po, &po->tx_ring, 2700 TP_STATUS_SEND_REQUEST); 2701 if (unlikely(ph == NULL)) { 2702 if (need_wait && skb) { 2703 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); 2704 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); 2705 if (timeo <= 0) { 2706 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; 2707 goto out_put; 2708 } 2709 } 2710 /* check for additional frames */ 2711 continue; 2712 } 2713 2714 skb = NULL; 2715 tp_len = tpacket_parse_header(po, ph, size_max, &data); 2716 if (tp_len < 0) 2717 goto tpacket_error; 2718 2719 status = TP_STATUS_SEND_REQUEST; 2720 hlen = LL_RESERVED_SPACE(dev); 2721 tlen = dev->needed_tailroom; 2722 if (po->has_vnet_hdr) { 2723 vnet_hdr = data; 2724 data += sizeof(*vnet_hdr); 2725 tp_len -= sizeof(*vnet_hdr); 2726 if (tp_len < 0 || 2727 __packet_snd_vnet_parse(vnet_hdr, tp_len)) { 2728 tp_len = -EINVAL; 2729 goto tpacket_error; 2730 } 2731 copylen = __virtio16_to_cpu(vio_le(), 2732 vnet_hdr->hdr_len); 2733 } 2734 copylen = max_t(int, copylen, dev->hard_header_len); 2735 skb = sock_alloc_send_skb(&po->sk, 2736 hlen + tlen + sizeof(struct sockaddr_ll) + 2737 (copylen - dev->hard_header_len), 2738 !need_wait, &err); 2739 2740 if (unlikely(skb == NULL)) { 2741 /* we assume the socket was initially writeable ... */ 2742 if (likely(len_sum > 0)) 2743 err = len_sum; 2744 goto out_status; 2745 } 2746 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, 2747 addr, hlen, copylen, &sockc); 2748 if (likely(tp_len >= 0) && 2749 tp_len > dev->mtu + reserve && 2750 !po->has_vnet_hdr && 2751 !packet_extra_vlan_len_allowed(dev, skb)) 2752 tp_len = -EMSGSIZE; 2753 2754 if (unlikely(tp_len < 0)) { 2755 tpacket_error: 2756 if (po->tp_loss) { 2757 __packet_set_status(po, ph, 2758 TP_STATUS_AVAILABLE); 2759 packet_increment_head(&po->tx_ring); 2760 kfree_skb(skb); 2761 continue; 2762 } else { 2763 status = TP_STATUS_WRONG_FORMAT; 2764 err = tp_len; 2765 goto out_status; 2766 } 2767 } 2768 2769 if (po->has_vnet_hdr) { 2770 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { 2771 tp_len = -EINVAL; 2772 goto tpacket_error; 2773 } 2774 virtio_net_hdr_set_proto(skb, vnet_hdr); 2775 } 2776 2777 skb->destructor = tpacket_destruct_skb; 2778 __packet_set_status(po, ph, TP_STATUS_SENDING); 2779 packet_inc_pending(&po->tx_ring); 2780 2781 status = TP_STATUS_SEND_REQUEST; 2782 err = po->xmit(skb); 2783 if (unlikely(err > 0)) { 2784 err = net_xmit_errno(err); 2785 if (err && __packet_get_status(po, ph) == 2786 TP_STATUS_AVAILABLE) { 2787 /* skb was destructed already */ 2788 skb = NULL; 2789 goto out_status; 2790 } 2791 /* 2792 * skb was dropped but not destructed yet; 2793 * let's treat it like congestion or err < 0 2794 */ 2795 err = 0; 2796 } 2797 packet_increment_head(&po->tx_ring); 2798 len_sum += tp_len; 2799 } while (likely((ph != NULL) || 2800 /* Note: packet_read_pending() might be slow if we have 2801 * to call it as it's per_cpu variable, but in fast-path 2802 * we already short-circuit the loop with the first 2803 * condition, and luckily don't have to go that path 2804 * anyway. 2805 */ 2806 (need_wait && packet_read_pending(&po->tx_ring)))); 2807 2808 err = len_sum; 2809 goto out_put; 2810 2811 out_status: 2812 __packet_set_status(po, ph, status); 2813 kfree_skb(skb); 2814 out_put: 2815 dev_put(dev); 2816 out: 2817 mutex_unlock(&po->pg_vec_lock); 2818 return err; 2819 } 2820 2821 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2822 size_t reserve, size_t len, 2823 size_t linear, int noblock, 2824 int *err) 2825 { 2826 struct sk_buff *skb; 2827 2828 /* Under a page? Don't bother with paged skb. */ 2829 if (prepad + len < PAGE_SIZE || !linear) 2830 linear = len; 2831 2832 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2833 err, 0); 2834 if (!skb) 2835 return NULL; 2836 2837 skb_reserve(skb, reserve); 2838 skb_put(skb, linear); 2839 skb->data_len = len - linear; 2840 skb->len += len - linear; 2841 2842 return skb; 2843 } 2844 2845 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2846 { 2847 struct sock *sk = sock->sk; 2848 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2849 struct sk_buff *skb; 2850 struct net_device *dev; 2851 __be16 proto; 2852 unsigned char *addr = NULL; 2853 int err, reserve = 0; 2854 struct sockcm_cookie sockc; 2855 struct virtio_net_hdr vnet_hdr = { 0 }; 2856 int offset = 0; 2857 struct packet_sock *po = pkt_sk(sk); 2858 bool has_vnet_hdr = false; 2859 int hlen, tlen, linear; 2860 int extra_len = 0; 2861 2862 /* 2863 * Get and verify the address. 2864 */ 2865 2866 if (likely(saddr == NULL)) { 2867 dev = packet_cached_dev_get(po); 2868 proto = po->num; 2869 } else { 2870 err = -EINVAL; 2871 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2872 goto out; 2873 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2874 goto out; 2875 proto = saddr->sll_protocol; 2876 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2877 if (sock->type == SOCK_DGRAM) { 2878 if (dev && msg->msg_namelen < dev->addr_len + 2879 offsetof(struct sockaddr_ll, sll_addr)) 2880 goto out_unlock; 2881 addr = saddr->sll_addr; 2882 } 2883 } 2884 2885 err = -ENXIO; 2886 if (unlikely(dev == NULL)) 2887 goto out_unlock; 2888 err = -ENETDOWN; 2889 if (unlikely(!(dev->flags & IFF_UP))) 2890 goto out_unlock; 2891 2892 sockcm_init(&sockc, sk); 2893 sockc.mark = sk->sk_mark; 2894 if (msg->msg_controllen) { 2895 err = sock_cmsg_send(sk, msg, &sockc); 2896 if (unlikely(err)) 2897 goto out_unlock; 2898 } 2899 2900 if (sock->type == SOCK_RAW) 2901 reserve = dev->hard_header_len; 2902 if (po->has_vnet_hdr) { 2903 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); 2904 if (err) 2905 goto out_unlock; 2906 has_vnet_hdr = true; 2907 } 2908 2909 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 2910 if (!netif_supports_nofcs(dev)) { 2911 err = -EPROTONOSUPPORT; 2912 goto out_unlock; 2913 } 2914 extra_len = 4; /* We're doing our own CRC */ 2915 } 2916 2917 err = -EMSGSIZE; 2918 if (!vnet_hdr.gso_type && 2919 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 2920 goto out_unlock; 2921 2922 err = -ENOBUFS; 2923 hlen = LL_RESERVED_SPACE(dev); 2924 tlen = dev->needed_tailroom; 2925 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 2926 linear = max(linear, min_t(int, len, dev->hard_header_len)); 2927 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 2928 msg->msg_flags & MSG_DONTWAIT, &err); 2929 if (skb == NULL) 2930 goto out_unlock; 2931 2932 skb_reset_network_header(skb); 2933 2934 err = -EINVAL; 2935 if (sock->type == SOCK_DGRAM) { 2936 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2937 if (unlikely(offset < 0)) 2938 goto out_free; 2939 } else if (reserve) { 2940 skb_reserve(skb, -reserve); 2941 if (len < reserve + sizeof(struct ipv6hdr) && 2942 dev->min_header_len != dev->hard_header_len) 2943 skb_reset_network_header(skb); 2944 } 2945 2946 /* Returns -EFAULT on error */ 2947 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 2948 if (err) 2949 goto out_free; 2950 2951 if (sock->type == SOCK_RAW && 2952 !dev_validate_header(dev, skb->data, len)) { 2953 err = -EINVAL; 2954 goto out_free; 2955 } 2956 2957 skb_setup_tx_timestamp(skb, sockc.tsflags); 2958 2959 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 2960 !packet_extra_vlan_len_allowed(dev, skb)) { 2961 err = -EMSGSIZE; 2962 goto out_free; 2963 } 2964 2965 skb->protocol = proto; 2966 skb->dev = dev; 2967 skb->priority = sk->sk_priority; 2968 skb->mark = sockc.mark; 2969 skb->tstamp = sockc.transmit_time; 2970 2971 if (has_vnet_hdr) { 2972 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 2973 if (err) 2974 goto out_free; 2975 len += sizeof(vnet_hdr); 2976 virtio_net_hdr_set_proto(skb, &vnet_hdr); 2977 } 2978 2979 packet_parse_headers(skb, sock); 2980 2981 if (unlikely(extra_len == 4)) 2982 skb->no_fcs = 1; 2983 2984 err = po->xmit(skb); 2985 if (err > 0 && (err = net_xmit_errno(err)) != 0) 2986 goto out_unlock; 2987 2988 dev_put(dev); 2989 2990 return len; 2991 2992 out_free: 2993 kfree_skb(skb); 2994 out_unlock: 2995 if (dev) 2996 dev_put(dev); 2997 out: 2998 return err; 2999 } 3000 3001 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 3002 { 3003 struct sock *sk = sock->sk; 3004 struct packet_sock *po = pkt_sk(sk); 3005 3006 if (po->tx_ring.pg_vec) 3007 return tpacket_snd(po, msg); 3008 else 3009 return packet_snd(sock, msg, len); 3010 } 3011 3012 /* 3013 * Close a PACKET socket. This is fairly simple. We immediately go 3014 * to 'closed' state and remove our protocol entry in the device list. 3015 */ 3016 3017 static int packet_release(struct socket *sock) 3018 { 3019 struct sock *sk = sock->sk; 3020 struct packet_sock *po; 3021 struct packet_fanout *f; 3022 struct net *net; 3023 union tpacket_req_u req_u; 3024 3025 if (!sk) 3026 return 0; 3027 3028 net = sock_net(sk); 3029 po = pkt_sk(sk); 3030 3031 mutex_lock(&net->packet.sklist_lock); 3032 sk_del_node_init_rcu(sk); 3033 mutex_unlock(&net->packet.sklist_lock); 3034 3035 preempt_disable(); 3036 sock_prot_inuse_add(net, sk->sk_prot, -1); 3037 preempt_enable(); 3038 3039 spin_lock(&po->bind_lock); 3040 unregister_prot_hook(sk, false); 3041 packet_cached_dev_reset(po); 3042 3043 if (po->prot_hook.dev) { 3044 dev_put(po->prot_hook.dev); 3045 po->prot_hook.dev = NULL; 3046 } 3047 spin_unlock(&po->bind_lock); 3048 3049 packet_flush_mclist(sk); 3050 3051 lock_sock(sk); 3052 if (po->rx_ring.pg_vec) { 3053 memset(&req_u, 0, sizeof(req_u)); 3054 packet_set_ring(sk, &req_u, 1, 0); 3055 } 3056 3057 if (po->tx_ring.pg_vec) { 3058 memset(&req_u, 0, sizeof(req_u)); 3059 packet_set_ring(sk, &req_u, 1, 1); 3060 } 3061 release_sock(sk); 3062 3063 f = fanout_release(sk); 3064 3065 synchronize_net(); 3066 3067 kfree(po->rollover); 3068 if (f) { 3069 fanout_release_data(f); 3070 kfree(f); 3071 } 3072 /* 3073 * Now the socket is dead. No more input will appear. 3074 */ 3075 sock_orphan(sk); 3076 sock->sk = NULL; 3077 3078 /* Purge queues */ 3079 3080 skb_queue_purge(&sk->sk_receive_queue); 3081 packet_free_pending(po); 3082 sk_refcnt_debug_release(sk); 3083 3084 sock_put(sk); 3085 return 0; 3086 } 3087 3088 /* 3089 * Attach a packet hook. 3090 */ 3091 3092 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 3093 __be16 proto) 3094 { 3095 struct packet_sock *po = pkt_sk(sk); 3096 struct net_device *dev_curr; 3097 __be16 proto_curr; 3098 bool need_rehook; 3099 struct net_device *dev = NULL; 3100 int ret = 0; 3101 bool unlisted = false; 3102 3103 lock_sock(sk); 3104 spin_lock(&po->bind_lock); 3105 rcu_read_lock(); 3106 3107 if (po->fanout) { 3108 ret = -EINVAL; 3109 goto out_unlock; 3110 } 3111 3112 if (name) { 3113 dev = dev_get_by_name_rcu(sock_net(sk), name); 3114 if (!dev) { 3115 ret = -ENODEV; 3116 goto out_unlock; 3117 } 3118 } else if (ifindex) { 3119 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3120 if (!dev) { 3121 ret = -ENODEV; 3122 goto out_unlock; 3123 } 3124 } 3125 3126 if (dev) 3127 dev_hold(dev); 3128 3129 proto_curr = po->prot_hook.type; 3130 dev_curr = po->prot_hook.dev; 3131 3132 need_rehook = proto_curr != proto || dev_curr != dev; 3133 3134 if (need_rehook) { 3135 if (po->running) { 3136 rcu_read_unlock(); 3137 /* prevents packet_notifier() from calling 3138 * register_prot_hook() 3139 */ 3140 po->num = 0; 3141 __unregister_prot_hook(sk, true); 3142 rcu_read_lock(); 3143 dev_curr = po->prot_hook.dev; 3144 if (dev) 3145 unlisted = !dev_get_by_index_rcu(sock_net(sk), 3146 dev->ifindex); 3147 } 3148 3149 BUG_ON(po->running); 3150 po->num = proto; 3151 po->prot_hook.type = proto; 3152 3153 if (unlikely(unlisted)) { 3154 dev_put(dev); 3155 po->prot_hook.dev = NULL; 3156 po->ifindex = -1; 3157 packet_cached_dev_reset(po); 3158 } else { 3159 po->prot_hook.dev = dev; 3160 po->ifindex = dev ? dev->ifindex : 0; 3161 packet_cached_dev_assign(po, dev); 3162 } 3163 } 3164 if (dev_curr) 3165 dev_put(dev_curr); 3166 3167 if (proto == 0 || !need_rehook) 3168 goto out_unlock; 3169 3170 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 3171 register_prot_hook(sk); 3172 } else { 3173 sk->sk_err = ENETDOWN; 3174 if (!sock_flag(sk, SOCK_DEAD)) 3175 sk->sk_error_report(sk); 3176 } 3177 3178 out_unlock: 3179 rcu_read_unlock(); 3180 spin_unlock(&po->bind_lock); 3181 release_sock(sk); 3182 return ret; 3183 } 3184 3185 /* 3186 * Bind a packet socket to a device 3187 */ 3188 3189 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 3190 int addr_len) 3191 { 3192 struct sock *sk = sock->sk; 3193 char name[sizeof(uaddr->sa_data) + 1]; 3194 3195 /* 3196 * Check legality 3197 */ 3198 3199 if (addr_len != sizeof(struct sockaddr)) 3200 return -EINVAL; 3201 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be 3202 * zero-terminated. 3203 */ 3204 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); 3205 name[sizeof(uaddr->sa_data)] = 0; 3206 3207 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3208 } 3209 3210 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3211 { 3212 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3213 struct sock *sk = sock->sk; 3214 3215 /* 3216 * Check legality 3217 */ 3218 3219 if (addr_len < sizeof(struct sockaddr_ll)) 3220 return -EINVAL; 3221 if (sll->sll_family != AF_PACKET) 3222 return -EINVAL; 3223 3224 return packet_do_bind(sk, NULL, sll->sll_ifindex, 3225 sll->sll_protocol ? : pkt_sk(sk)->num); 3226 } 3227 3228 static struct proto packet_proto = { 3229 .name = "PACKET", 3230 .owner = THIS_MODULE, 3231 .obj_size = sizeof(struct packet_sock), 3232 }; 3233 3234 /* 3235 * Create a packet of type SOCK_PACKET. 3236 */ 3237 3238 static int packet_create(struct net *net, struct socket *sock, int protocol, 3239 int kern) 3240 { 3241 struct sock *sk; 3242 struct packet_sock *po; 3243 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 3244 int err; 3245 3246 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 3247 return -EPERM; 3248 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 3249 sock->type != SOCK_PACKET) 3250 return -ESOCKTNOSUPPORT; 3251 3252 sock->state = SS_UNCONNECTED; 3253 3254 err = -ENOBUFS; 3255 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); 3256 if (sk == NULL) 3257 goto out; 3258 3259 sock->ops = &packet_ops; 3260 if (sock->type == SOCK_PACKET) 3261 sock->ops = &packet_ops_spkt; 3262 3263 sock_init_data(sock, sk); 3264 3265 po = pkt_sk(sk); 3266 init_completion(&po->skb_completion); 3267 sk->sk_family = PF_PACKET; 3268 po->num = proto; 3269 po->xmit = dev_queue_xmit; 3270 3271 err = packet_alloc_pending(po); 3272 if (err) 3273 goto out2; 3274 3275 packet_cached_dev_reset(po); 3276 3277 sk->sk_destruct = packet_sock_destruct; 3278 sk_refcnt_debug_inc(sk); 3279 3280 /* 3281 * Attach a protocol block 3282 */ 3283 3284 spin_lock_init(&po->bind_lock); 3285 mutex_init(&po->pg_vec_lock); 3286 po->rollover = NULL; 3287 po->prot_hook.func = packet_rcv; 3288 3289 if (sock->type == SOCK_PACKET) 3290 po->prot_hook.func = packet_rcv_spkt; 3291 3292 po->prot_hook.af_packet_priv = sk; 3293 3294 if (proto) { 3295 po->prot_hook.type = proto; 3296 __register_prot_hook(sk); 3297 } 3298 3299 mutex_lock(&net->packet.sklist_lock); 3300 sk_add_node_tail_rcu(sk, &net->packet.sklist); 3301 mutex_unlock(&net->packet.sklist_lock); 3302 3303 preempt_disable(); 3304 sock_prot_inuse_add(net, &packet_proto, 1); 3305 preempt_enable(); 3306 3307 return 0; 3308 out2: 3309 sk_free(sk); 3310 out: 3311 return err; 3312 } 3313 3314 /* 3315 * Pull a packet from our receive queue and hand it to the user. 3316 * If necessary we block. 3317 */ 3318 3319 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 3320 int flags) 3321 { 3322 struct sock *sk = sock->sk; 3323 struct sk_buff *skb; 3324 int copied, err; 3325 int vnet_hdr_len = 0; 3326 unsigned int origlen = 0; 3327 3328 err = -EINVAL; 3329 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 3330 goto out; 3331 3332 #if 0 3333 /* What error should we return now? EUNATTACH? */ 3334 if (pkt_sk(sk)->ifindex < 0) 3335 return -ENODEV; 3336 #endif 3337 3338 if (flags & MSG_ERRQUEUE) { 3339 err = sock_recv_errqueue(sk, msg, len, 3340 SOL_PACKET, PACKET_TX_TIMESTAMP); 3341 goto out; 3342 } 3343 3344 /* 3345 * Call the generic datagram receiver. This handles all sorts 3346 * of horrible races and re-entrancy so we can forget about it 3347 * in the protocol layers. 3348 * 3349 * Now it will return ENETDOWN, if device have just gone down, 3350 * but then it will block. 3351 */ 3352 3353 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); 3354 3355 /* 3356 * An error occurred so return it. Because skb_recv_datagram() 3357 * handles the blocking we don't see and worry about blocking 3358 * retries. 3359 */ 3360 3361 if (skb == NULL) 3362 goto out; 3363 3364 packet_rcv_try_clear_pressure(pkt_sk(sk)); 3365 3366 if (pkt_sk(sk)->has_vnet_hdr) { 3367 err = packet_rcv_vnet(msg, skb, &len); 3368 if (err) 3369 goto out_free; 3370 vnet_hdr_len = sizeof(struct virtio_net_hdr); 3371 } 3372 3373 /* You lose any data beyond the buffer you gave. If it worries 3374 * a user program they can ask the device for its MTU 3375 * anyway. 3376 */ 3377 copied = skb->len; 3378 if (copied > len) { 3379 copied = len; 3380 msg->msg_flags |= MSG_TRUNC; 3381 } 3382 3383 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3384 if (err) 3385 goto out_free; 3386 3387 if (sock->type != SOCK_PACKET) { 3388 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3389 3390 /* Original length was stored in sockaddr_ll fields */ 3391 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3392 sll->sll_family = AF_PACKET; 3393 sll->sll_protocol = skb->protocol; 3394 } 3395 3396 sock_recv_ts_and_drops(msg, sk, skb); 3397 3398 if (msg->msg_name) { 3399 int copy_len; 3400 3401 /* If the address length field is there to be filled 3402 * in, we fill it in now. 3403 */ 3404 if (sock->type == SOCK_PACKET) { 3405 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3406 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3407 copy_len = msg->msg_namelen; 3408 } else { 3409 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3410 3411 msg->msg_namelen = sll->sll_halen + 3412 offsetof(struct sockaddr_ll, sll_addr); 3413 copy_len = msg->msg_namelen; 3414 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) { 3415 memset(msg->msg_name + 3416 offsetof(struct sockaddr_ll, sll_addr), 3417 0, sizeof(sll->sll_addr)); 3418 msg->msg_namelen = sizeof(struct sockaddr_ll); 3419 } 3420 } 3421 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); 3422 } 3423 3424 if (pkt_sk(sk)->auxdata) { 3425 struct tpacket_auxdata aux; 3426 3427 aux.tp_status = TP_STATUS_USER; 3428 if (skb->ip_summed == CHECKSUM_PARTIAL) 3429 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3430 else if (skb->pkt_type != PACKET_OUTGOING && 3431 (skb->ip_summed == CHECKSUM_COMPLETE || 3432 skb_csum_unnecessary(skb))) 3433 aux.tp_status |= TP_STATUS_CSUM_VALID; 3434 3435 aux.tp_len = origlen; 3436 aux.tp_snaplen = skb->len; 3437 aux.tp_mac = 0; 3438 aux.tp_net = skb_network_offset(skb); 3439 if (skb_vlan_tag_present(skb)) { 3440 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3441 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3442 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3443 } else { 3444 aux.tp_vlan_tci = 0; 3445 aux.tp_vlan_tpid = 0; 3446 } 3447 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3448 } 3449 3450 /* 3451 * Free or return the buffer as appropriate. Again this 3452 * hides all the races and re-entrancy issues from us. 3453 */ 3454 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3455 3456 out_free: 3457 skb_free_datagram(sk, skb); 3458 out: 3459 return err; 3460 } 3461 3462 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3463 int peer) 3464 { 3465 struct net_device *dev; 3466 struct sock *sk = sock->sk; 3467 3468 if (peer) 3469 return -EOPNOTSUPP; 3470 3471 uaddr->sa_family = AF_PACKET; 3472 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); 3473 rcu_read_lock(); 3474 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 3475 if (dev) 3476 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); 3477 rcu_read_unlock(); 3478 3479 return sizeof(*uaddr); 3480 } 3481 3482 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3483 int peer) 3484 { 3485 struct net_device *dev; 3486 struct sock *sk = sock->sk; 3487 struct packet_sock *po = pkt_sk(sk); 3488 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3489 3490 if (peer) 3491 return -EOPNOTSUPP; 3492 3493 sll->sll_family = AF_PACKET; 3494 sll->sll_ifindex = po->ifindex; 3495 sll->sll_protocol = po->num; 3496 sll->sll_pkttype = 0; 3497 rcu_read_lock(); 3498 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 3499 if (dev) { 3500 sll->sll_hatype = dev->type; 3501 sll->sll_halen = dev->addr_len; 3502 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 3503 } else { 3504 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3505 sll->sll_halen = 0; 3506 } 3507 rcu_read_unlock(); 3508 3509 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3510 } 3511 3512 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3513 int what) 3514 { 3515 switch (i->type) { 3516 case PACKET_MR_MULTICAST: 3517 if (i->alen != dev->addr_len) 3518 return -EINVAL; 3519 if (what > 0) 3520 return dev_mc_add(dev, i->addr); 3521 else 3522 return dev_mc_del(dev, i->addr); 3523 break; 3524 case PACKET_MR_PROMISC: 3525 return dev_set_promiscuity(dev, what); 3526 case PACKET_MR_ALLMULTI: 3527 return dev_set_allmulti(dev, what); 3528 case PACKET_MR_UNICAST: 3529 if (i->alen != dev->addr_len) 3530 return -EINVAL; 3531 if (what > 0) 3532 return dev_uc_add(dev, i->addr); 3533 else 3534 return dev_uc_del(dev, i->addr); 3535 break; 3536 default: 3537 break; 3538 } 3539 return 0; 3540 } 3541 3542 static void packet_dev_mclist_delete(struct net_device *dev, 3543 struct packet_mclist **mlp) 3544 { 3545 struct packet_mclist *ml; 3546 3547 while ((ml = *mlp) != NULL) { 3548 if (ml->ifindex == dev->ifindex) { 3549 packet_dev_mc(dev, ml, -1); 3550 *mlp = ml->next; 3551 kfree(ml); 3552 } else 3553 mlp = &ml->next; 3554 } 3555 } 3556 3557 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3558 { 3559 struct packet_sock *po = pkt_sk(sk); 3560 struct packet_mclist *ml, *i; 3561 struct net_device *dev; 3562 int err; 3563 3564 rtnl_lock(); 3565 3566 err = -ENODEV; 3567 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3568 if (!dev) 3569 goto done; 3570 3571 err = -EINVAL; 3572 if (mreq->mr_alen > dev->addr_len) 3573 goto done; 3574 3575 err = -ENOBUFS; 3576 i = kmalloc(sizeof(*i), GFP_KERNEL); 3577 if (i == NULL) 3578 goto done; 3579 3580 err = 0; 3581 for (ml = po->mclist; ml; ml = ml->next) { 3582 if (ml->ifindex == mreq->mr_ifindex && 3583 ml->type == mreq->mr_type && 3584 ml->alen == mreq->mr_alen && 3585 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3586 ml->count++; 3587 /* Free the new element ... */ 3588 kfree(i); 3589 goto done; 3590 } 3591 } 3592 3593 i->type = mreq->mr_type; 3594 i->ifindex = mreq->mr_ifindex; 3595 i->alen = mreq->mr_alen; 3596 memcpy(i->addr, mreq->mr_address, i->alen); 3597 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3598 i->count = 1; 3599 i->next = po->mclist; 3600 po->mclist = i; 3601 err = packet_dev_mc(dev, i, 1); 3602 if (err) { 3603 po->mclist = i->next; 3604 kfree(i); 3605 } 3606 3607 done: 3608 rtnl_unlock(); 3609 return err; 3610 } 3611 3612 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3613 { 3614 struct packet_mclist *ml, **mlp; 3615 3616 rtnl_lock(); 3617 3618 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3619 if (ml->ifindex == mreq->mr_ifindex && 3620 ml->type == mreq->mr_type && 3621 ml->alen == mreq->mr_alen && 3622 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3623 if (--ml->count == 0) { 3624 struct net_device *dev; 3625 *mlp = ml->next; 3626 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3627 if (dev) 3628 packet_dev_mc(dev, ml, -1); 3629 kfree(ml); 3630 } 3631 break; 3632 } 3633 } 3634 rtnl_unlock(); 3635 return 0; 3636 } 3637 3638 static void packet_flush_mclist(struct sock *sk) 3639 { 3640 struct packet_sock *po = pkt_sk(sk); 3641 struct packet_mclist *ml; 3642 3643 if (!po->mclist) 3644 return; 3645 3646 rtnl_lock(); 3647 while ((ml = po->mclist) != NULL) { 3648 struct net_device *dev; 3649 3650 po->mclist = ml->next; 3651 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3652 if (dev != NULL) 3653 packet_dev_mc(dev, ml, -1); 3654 kfree(ml); 3655 } 3656 rtnl_unlock(); 3657 } 3658 3659 static int 3660 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, 3661 unsigned int optlen) 3662 { 3663 struct sock *sk = sock->sk; 3664 struct packet_sock *po = pkt_sk(sk); 3665 int ret; 3666 3667 if (level != SOL_PACKET) 3668 return -ENOPROTOOPT; 3669 3670 switch (optname) { 3671 case PACKET_ADD_MEMBERSHIP: 3672 case PACKET_DROP_MEMBERSHIP: 3673 { 3674 struct packet_mreq_max mreq; 3675 int len = optlen; 3676 memset(&mreq, 0, sizeof(mreq)); 3677 if (len < sizeof(struct packet_mreq)) 3678 return -EINVAL; 3679 if (len > sizeof(mreq)) 3680 len = sizeof(mreq); 3681 if (copy_from_sockptr(&mreq, optval, len)) 3682 return -EFAULT; 3683 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3684 return -EINVAL; 3685 if (optname == PACKET_ADD_MEMBERSHIP) 3686 ret = packet_mc_add(sk, &mreq); 3687 else 3688 ret = packet_mc_drop(sk, &mreq); 3689 return ret; 3690 } 3691 3692 case PACKET_RX_RING: 3693 case PACKET_TX_RING: 3694 { 3695 union tpacket_req_u req_u; 3696 int len; 3697 3698 lock_sock(sk); 3699 switch (po->tp_version) { 3700 case TPACKET_V1: 3701 case TPACKET_V2: 3702 len = sizeof(req_u.req); 3703 break; 3704 case TPACKET_V3: 3705 default: 3706 len = sizeof(req_u.req3); 3707 break; 3708 } 3709 if (optlen < len) { 3710 ret = -EINVAL; 3711 } else { 3712 if (copy_from_sockptr(&req_u.req, optval, len)) 3713 ret = -EFAULT; 3714 else 3715 ret = packet_set_ring(sk, &req_u, 0, 3716 optname == PACKET_TX_RING); 3717 } 3718 release_sock(sk); 3719 return ret; 3720 } 3721 case PACKET_COPY_THRESH: 3722 { 3723 int val; 3724 3725 if (optlen != sizeof(val)) 3726 return -EINVAL; 3727 if (copy_from_sockptr(&val, optval, sizeof(val))) 3728 return -EFAULT; 3729 3730 pkt_sk(sk)->copy_thresh = val; 3731 return 0; 3732 } 3733 case PACKET_VERSION: 3734 { 3735 int val; 3736 3737 if (optlen != sizeof(val)) 3738 return -EINVAL; 3739 if (copy_from_sockptr(&val, optval, sizeof(val))) 3740 return -EFAULT; 3741 switch (val) { 3742 case TPACKET_V1: 3743 case TPACKET_V2: 3744 case TPACKET_V3: 3745 break; 3746 default: 3747 return -EINVAL; 3748 } 3749 lock_sock(sk); 3750 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3751 ret = -EBUSY; 3752 } else { 3753 po->tp_version = val; 3754 ret = 0; 3755 } 3756 release_sock(sk); 3757 return ret; 3758 } 3759 case PACKET_RESERVE: 3760 { 3761 unsigned int val; 3762 3763 if (optlen != sizeof(val)) 3764 return -EINVAL; 3765 if (copy_from_sockptr(&val, optval, sizeof(val))) 3766 return -EFAULT; 3767 if (val > INT_MAX) 3768 return -EINVAL; 3769 lock_sock(sk); 3770 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3771 ret = -EBUSY; 3772 } else { 3773 po->tp_reserve = val; 3774 ret = 0; 3775 } 3776 release_sock(sk); 3777 return ret; 3778 } 3779 case PACKET_LOSS: 3780 { 3781 unsigned int val; 3782 3783 if (optlen != sizeof(val)) 3784 return -EINVAL; 3785 if (copy_from_sockptr(&val, optval, sizeof(val))) 3786 return -EFAULT; 3787 3788 lock_sock(sk); 3789 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3790 ret = -EBUSY; 3791 } else { 3792 po->tp_loss = !!val; 3793 ret = 0; 3794 } 3795 release_sock(sk); 3796 return ret; 3797 } 3798 case PACKET_AUXDATA: 3799 { 3800 int val; 3801 3802 if (optlen < sizeof(val)) 3803 return -EINVAL; 3804 if (copy_from_sockptr(&val, optval, sizeof(val))) 3805 return -EFAULT; 3806 3807 lock_sock(sk); 3808 po->auxdata = !!val; 3809 release_sock(sk); 3810 return 0; 3811 } 3812 case PACKET_ORIGDEV: 3813 { 3814 int val; 3815 3816 if (optlen < sizeof(val)) 3817 return -EINVAL; 3818 if (copy_from_sockptr(&val, optval, sizeof(val))) 3819 return -EFAULT; 3820 3821 lock_sock(sk); 3822 po->origdev = !!val; 3823 release_sock(sk); 3824 return 0; 3825 } 3826 case PACKET_VNET_HDR: 3827 { 3828 int val; 3829 3830 if (sock->type != SOCK_RAW) 3831 return -EINVAL; 3832 if (optlen < sizeof(val)) 3833 return -EINVAL; 3834 if (copy_from_sockptr(&val, optval, sizeof(val))) 3835 return -EFAULT; 3836 3837 lock_sock(sk); 3838 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3839 ret = -EBUSY; 3840 } else { 3841 po->has_vnet_hdr = !!val; 3842 ret = 0; 3843 } 3844 release_sock(sk); 3845 return ret; 3846 } 3847 case PACKET_TIMESTAMP: 3848 { 3849 int val; 3850 3851 if (optlen != sizeof(val)) 3852 return -EINVAL; 3853 if (copy_from_sockptr(&val, optval, sizeof(val))) 3854 return -EFAULT; 3855 3856 po->tp_tstamp = val; 3857 return 0; 3858 } 3859 case PACKET_FANOUT: 3860 { 3861 int val; 3862 3863 if (optlen != sizeof(val)) 3864 return -EINVAL; 3865 if (copy_from_sockptr(&val, optval, sizeof(val))) 3866 return -EFAULT; 3867 3868 return fanout_add(sk, val & 0xffff, val >> 16); 3869 } 3870 case PACKET_FANOUT_DATA: 3871 { 3872 if (!po->fanout) 3873 return -EINVAL; 3874 3875 return fanout_set_data(po, optval, optlen); 3876 } 3877 case PACKET_IGNORE_OUTGOING: 3878 { 3879 int val; 3880 3881 if (optlen != sizeof(val)) 3882 return -EINVAL; 3883 if (copy_from_sockptr(&val, optval, sizeof(val))) 3884 return -EFAULT; 3885 if (val < 0 || val > 1) 3886 return -EINVAL; 3887 3888 po->prot_hook.ignore_outgoing = !!val; 3889 return 0; 3890 } 3891 case PACKET_TX_HAS_OFF: 3892 { 3893 unsigned int val; 3894 3895 if (optlen != sizeof(val)) 3896 return -EINVAL; 3897 if (copy_from_sockptr(&val, optval, sizeof(val))) 3898 return -EFAULT; 3899 3900 lock_sock(sk); 3901 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3902 ret = -EBUSY; 3903 } else { 3904 po->tp_tx_has_off = !!val; 3905 ret = 0; 3906 } 3907 release_sock(sk); 3908 return 0; 3909 } 3910 case PACKET_QDISC_BYPASS: 3911 { 3912 int val; 3913 3914 if (optlen != sizeof(val)) 3915 return -EINVAL; 3916 if (copy_from_sockptr(&val, optval, sizeof(val))) 3917 return -EFAULT; 3918 3919 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; 3920 return 0; 3921 } 3922 default: 3923 return -ENOPROTOOPT; 3924 } 3925 } 3926 3927 static int packet_getsockopt(struct socket *sock, int level, int optname, 3928 char __user *optval, int __user *optlen) 3929 { 3930 int len; 3931 int val, lv = sizeof(val); 3932 struct sock *sk = sock->sk; 3933 struct packet_sock *po = pkt_sk(sk); 3934 void *data = &val; 3935 union tpacket_stats_u st; 3936 struct tpacket_rollover_stats rstats; 3937 int drops; 3938 3939 if (level != SOL_PACKET) 3940 return -ENOPROTOOPT; 3941 3942 if (get_user(len, optlen)) 3943 return -EFAULT; 3944 3945 if (len < 0) 3946 return -EINVAL; 3947 3948 switch (optname) { 3949 case PACKET_STATISTICS: 3950 spin_lock_bh(&sk->sk_receive_queue.lock); 3951 memcpy(&st, &po->stats, sizeof(st)); 3952 memset(&po->stats, 0, sizeof(po->stats)); 3953 spin_unlock_bh(&sk->sk_receive_queue.lock); 3954 drops = atomic_xchg(&po->tp_drops, 0); 3955 3956 if (po->tp_version == TPACKET_V3) { 3957 lv = sizeof(struct tpacket_stats_v3); 3958 st.stats3.tp_drops = drops; 3959 st.stats3.tp_packets += drops; 3960 data = &st.stats3; 3961 } else { 3962 lv = sizeof(struct tpacket_stats); 3963 st.stats1.tp_drops = drops; 3964 st.stats1.tp_packets += drops; 3965 data = &st.stats1; 3966 } 3967 3968 break; 3969 case PACKET_AUXDATA: 3970 val = po->auxdata; 3971 break; 3972 case PACKET_ORIGDEV: 3973 val = po->origdev; 3974 break; 3975 case PACKET_VNET_HDR: 3976 val = po->has_vnet_hdr; 3977 break; 3978 case PACKET_VERSION: 3979 val = po->tp_version; 3980 break; 3981 case PACKET_HDRLEN: 3982 if (len > sizeof(int)) 3983 len = sizeof(int); 3984 if (len < sizeof(int)) 3985 return -EINVAL; 3986 if (copy_from_user(&val, optval, len)) 3987 return -EFAULT; 3988 switch (val) { 3989 case TPACKET_V1: 3990 val = sizeof(struct tpacket_hdr); 3991 break; 3992 case TPACKET_V2: 3993 val = sizeof(struct tpacket2_hdr); 3994 break; 3995 case TPACKET_V3: 3996 val = sizeof(struct tpacket3_hdr); 3997 break; 3998 default: 3999 return -EINVAL; 4000 } 4001 break; 4002 case PACKET_RESERVE: 4003 val = po->tp_reserve; 4004 break; 4005 case PACKET_LOSS: 4006 val = po->tp_loss; 4007 break; 4008 case PACKET_TIMESTAMP: 4009 val = po->tp_tstamp; 4010 break; 4011 case PACKET_FANOUT: 4012 val = (po->fanout ? 4013 ((u32)po->fanout->id | 4014 ((u32)po->fanout->type << 16) | 4015 ((u32)po->fanout->flags << 24)) : 4016 0); 4017 break; 4018 case PACKET_IGNORE_OUTGOING: 4019 val = po->prot_hook.ignore_outgoing; 4020 break; 4021 case PACKET_ROLLOVER_STATS: 4022 if (!po->rollover) 4023 return -EINVAL; 4024 rstats.tp_all = atomic_long_read(&po->rollover->num); 4025 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 4026 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 4027 data = &rstats; 4028 lv = sizeof(rstats); 4029 break; 4030 case PACKET_TX_HAS_OFF: 4031 val = po->tp_tx_has_off; 4032 break; 4033 case PACKET_QDISC_BYPASS: 4034 val = packet_use_direct_xmit(po); 4035 break; 4036 default: 4037 return -ENOPROTOOPT; 4038 } 4039 4040 if (len > lv) 4041 len = lv; 4042 if (put_user(len, optlen)) 4043 return -EFAULT; 4044 if (copy_to_user(optval, data, len)) 4045 return -EFAULT; 4046 return 0; 4047 } 4048 4049 static int packet_notifier(struct notifier_block *this, 4050 unsigned long msg, void *ptr) 4051 { 4052 struct sock *sk; 4053 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4054 struct net *net = dev_net(dev); 4055 4056 rcu_read_lock(); 4057 sk_for_each_rcu(sk, &net->packet.sklist) { 4058 struct packet_sock *po = pkt_sk(sk); 4059 4060 switch (msg) { 4061 case NETDEV_UNREGISTER: 4062 if (po->mclist) 4063 packet_dev_mclist_delete(dev, &po->mclist); 4064 /* fallthrough */ 4065 4066 case NETDEV_DOWN: 4067 if (dev->ifindex == po->ifindex) { 4068 spin_lock(&po->bind_lock); 4069 if (po->running) { 4070 __unregister_prot_hook(sk, false); 4071 sk->sk_err = ENETDOWN; 4072 if (!sock_flag(sk, SOCK_DEAD)) 4073 sk->sk_error_report(sk); 4074 } 4075 if (msg == NETDEV_UNREGISTER) { 4076 packet_cached_dev_reset(po); 4077 po->ifindex = -1; 4078 if (po->prot_hook.dev) 4079 dev_put(po->prot_hook.dev); 4080 po->prot_hook.dev = NULL; 4081 } 4082 spin_unlock(&po->bind_lock); 4083 } 4084 break; 4085 case NETDEV_UP: 4086 if (dev->ifindex == po->ifindex) { 4087 spin_lock(&po->bind_lock); 4088 if (po->num) 4089 register_prot_hook(sk); 4090 spin_unlock(&po->bind_lock); 4091 } 4092 break; 4093 } 4094 } 4095 rcu_read_unlock(); 4096 return NOTIFY_DONE; 4097 } 4098 4099 4100 static int packet_ioctl(struct socket *sock, unsigned int cmd, 4101 unsigned long arg) 4102 { 4103 struct sock *sk = sock->sk; 4104 4105 switch (cmd) { 4106 case SIOCOUTQ: 4107 { 4108 int amount = sk_wmem_alloc_get(sk); 4109 4110 return put_user(amount, (int __user *)arg); 4111 } 4112 case SIOCINQ: 4113 { 4114 struct sk_buff *skb; 4115 int amount = 0; 4116 4117 spin_lock_bh(&sk->sk_receive_queue.lock); 4118 skb = skb_peek(&sk->sk_receive_queue); 4119 if (skb) 4120 amount = skb->len; 4121 spin_unlock_bh(&sk->sk_receive_queue.lock); 4122 return put_user(amount, (int __user *)arg); 4123 } 4124 #ifdef CONFIG_INET 4125 case SIOCADDRT: 4126 case SIOCDELRT: 4127 case SIOCDARP: 4128 case SIOCGARP: 4129 case SIOCSARP: 4130 case SIOCGIFADDR: 4131 case SIOCSIFADDR: 4132 case SIOCGIFBRDADDR: 4133 case SIOCSIFBRDADDR: 4134 case SIOCGIFNETMASK: 4135 case SIOCSIFNETMASK: 4136 case SIOCGIFDSTADDR: 4137 case SIOCSIFDSTADDR: 4138 case SIOCSIFFLAGS: 4139 return inet_dgram_ops.ioctl(sock, cmd, arg); 4140 #endif 4141 4142 default: 4143 return -ENOIOCTLCMD; 4144 } 4145 return 0; 4146 } 4147 4148 static __poll_t packet_poll(struct file *file, struct socket *sock, 4149 poll_table *wait) 4150 { 4151 struct sock *sk = sock->sk; 4152 struct packet_sock *po = pkt_sk(sk); 4153 __poll_t mask = datagram_poll(file, sock, wait); 4154 4155 spin_lock_bh(&sk->sk_receive_queue.lock); 4156 if (po->rx_ring.pg_vec) { 4157 if (!packet_previous_rx_frame(po, &po->rx_ring, 4158 TP_STATUS_KERNEL)) 4159 mask |= EPOLLIN | EPOLLRDNORM; 4160 } 4161 packet_rcv_try_clear_pressure(po); 4162 spin_unlock_bh(&sk->sk_receive_queue.lock); 4163 spin_lock_bh(&sk->sk_write_queue.lock); 4164 if (po->tx_ring.pg_vec) { 4165 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4166 mask |= EPOLLOUT | EPOLLWRNORM; 4167 } 4168 spin_unlock_bh(&sk->sk_write_queue.lock); 4169 return mask; 4170 } 4171 4172 4173 /* Dirty? Well, I still did not learn better way to account 4174 * for user mmaps. 4175 */ 4176 4177 static void packet_mm_open(struct vm_area_struct *vma) 4178 { 4179 struct file *file = vma->vm_file; 4180 struct socket *sock = file->private_data; 4181 struct sock *sk = sock->sk; 4182 4183 if (sk) 4184 atomic_inc(&pkt_sk(sk)->mapped); 4185 } 4186 4187 static void packet_mm_close(struct vm_area_struct *vma) 4188 { 4189 struct file *file = vma->vm_file; 4190 struct socket *sock = file->private_data; 4191 struct sock *sk = sock->sk; 4192 4193 if (sk) 4194 atomic_dec(&pkt_sk(sk)->mapped); 4195 } 4196 4197 static const struct vm_operations_struct packet_mmap_ops = { 4198 .open = packet_mm_open, 4199 .close = packet_mm_close, 4200 }; 4201 4202 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 4203 unsigned int len) 4204 { 4205 int i; 4206 4207 for (i = 0; i < len; i++) { 4208 if (likely(pg_vec[i].buffer)) { 4209 if (is_vmalloc_addr(pg_vec[i].buffer)) 4210 vfree(pg_vec[i].buffer); 4211 else 4212 free_pages((unsigned long)pg_vec[i].buffer, 4213 order); 4214 pg_vec[i].buffer = NULL; 4215 } 4216 } 4217 kfree(pg_vec); 4218 } 4219 4220 static char *alloc_one_pg_vec_page(unsigned long order) 4221 { 4222 char *buffer; 4223 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 4224 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 4225 4226 buffer = (char *) __get_free_pages(gfp_flags, order); 4227 if (buffer) 4228 return buffer; 4229 4230 /* __get_free_pages failed, fall back to vmalloc */ 4231 buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); 4232 if (buffer) 4233 return buffer; 4234 4235 /* vmalloc failed, lets dig into swap here */ 4236 gfp_flags &= ~__GFP_NORETRY; 4237 buffer = (char *) __get_free_pages(gfp_flags, order); 4238 if (buffer) 4239 return buffer; 4240 4241 /* complete and utter failure */ 4242 return NULL; 4243 } 4244 4245 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 4246 { 4247 unsigned int block_nr = req->tp_block_nr; 4248 struct pgv *pg_vec; 4249 int i; 4250 4251 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN); 4252 if (unlikely(!pg_vec)) 4253 goto out; 4254 4255 for (i = 0; i < block_nr; i++) { 4256 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 4257 if (unlikely(!pg_vec[i].buffer)) 4258 goto out_free_pgvec; 4259 } 4260 4261 out: 4262 return pg_vec; 4263 4264 out_free_pgvec: 4265 free_pg_vec(pg_vec, order, block_nr); 4266 pg_vec = NULL; 4267 goto out; 4268 } 4269 4270 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 4271 int closing, int tx_ring) 4272 { 4273 struct pgv *pg_vec = NULL; 4274 struct packet_sock *po = pkt_sk(sk); 4275 unsigned long *rx_owner_map = NULL; 4276 int was_running, order = 0; 4277 struct packet_ring_buffer *rb; 4278 struct sk_buff_head *rb_queue; 4279 __be16 num; 4280 int err; 4281 /* Added to avoid minimal code churn */ 4282 struct tpacket_req *req = &req_u->req; 4283 4284 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4285 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4286 4287 err = -EBUSY; 4288 if (!closing) { 4289 if (atomic_read(&po->mapped)) 4290 goto out; 4291 if (packet_read_pending(rb)) 4292 goto out; 4293 } 4294 4295 if (req->tp_block_nr) { 4296 unsigned int min_frame_size; 4297 4298 /* Sanity tests and some calculations */ 4299 err = -EBUSY; 4300 if (unlikely(rb->pg_vec)) 4301 goto out; 4302 4303 switch (po->tp_version) { 4304 case TPACKET_V1: 4305 po->tp_hdrlen = TPACKET_HDRLEN; 4306 break; 4307 case TPACKET_V2: 4308 po->tp_hdrlen = TPACKET2_HDRLEN; 4309 break; 4310 case TPACKET_V3: 4311 po->tp_hdrlen = TPACKET3_HDRLEN; 4312 break; 4313 } 4314 4315 err = -EINVAL; 4316 if (unlikely((int)req->tp_block_size <= 0)) 4317 goto out; 4318 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4319 goto out; 4320 min_frame_size = po->tp_hdrlen + po->tp_reserve; 4321 if (po->tp_version >= TPACKET_V3 && 4322 req->tp_block_size < 4323 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) 4324 goto out; 4325 if (unlikely(req->tp_frame_size < min_frame_size)) 4326 goto out; 4327 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4328 goto out; 4329 4330 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4331 if (unlikely(rb->frames_per_block == 0)) 4332 goto out; 4333 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) 4334 goto out; 4335 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4336 req->tp_frame_nr)) 4337 goto out; 4338 4339 err = -ENOMEM; 4340 order = get_order(req->tp_block_size); 4341 pg_vec = alloc_pg_vec(req, order); 4342 if (unlikely(!pg_vec)) 4343 goto out; 4344 switch (po->tp_version) { 4345 case TPACKET_V3: 4346 /* Block transmit is not supported yet */ 4347 if (!tx_ring) { 4348 init_prb_bdqc(po, rb, pg_vec, req_u); 4349 } else { 4350 struct tpacket_req3 *req3 = &req_u->req3; 4351 4352 if (req3->tp_retire_blk_tov || 4353 req3->tp_sizeof_priv || 4354 req3->tp_feature_req_word) { 4355 err = -EINVAL; 4356 goto out_free_pg_vec; 4357 } 4358 } 4359 break; 4360 default: 4361 if (!tx_ring) { 4362 rx_owner_map = bitmap_alloc(req->tp_frame_nr, 4363 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 4364 if (!rx_owner_map) 4365 goto out_free_pg_vec; 4366 } 4367 break; 4368 } 4369 } 4370 /* Done */ 4371 else { 4372 err = -EINVAL; 4373 if (unlikely(req->tp_frame_nr)) 4374 goto out; 4375 } 4376 4377 4378 /* Detach socket from network */ 4379 spin_lock(&po->bind_lock); 4380 was_running = po->running; 4381 num = po->num; 4382 if (was_running) { 4383 po->num = 0; 4384 __unregister_prot_hook(sk, false); 4385 } 4386 spin_unlock(&po->bind_lock); 4387 4388 synchronize_net(); 4389 4390 err = -EBUSY; 4391 mutex_lock(&po->pg_vec_lock); 4392 if (closing || atomic_read(&po->mapped) == 0) { 4393 err = 0; 4394 spin_lock_bh(&rb_queue->lock); 4395 swap(rb->pg_vec, pg_vec); 4396 if (po->tp_version <= TPACKET_V2) 4397 swap(rb->rx_owner_map, rx_owner_map); 4398 rb->frame_max = (req->tp_frame_nr - 1); 4399 rb->head = 0; 4400 rb->frame_size = req->tp_frame_size; 4401 spin_unlock_bh(&rb_queue->lock); 4402 4403 swap(rb->pg_vec_order, order); 4404 swap(rb->pg_vec_len, req->tp_block_nr); 4405 4406 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4407 po->prot_hook.func = (po->rx_ring.pg_vec) ? 4408 tpacket_rcv : packet_rcv; 4409 skb_queue_purge(rb_queue); 4410 if (atomic_read(&po->mapped)) 4411 pr_err("packet_mmap: vma is busy: %d\n", 4412 atomic_read(&po->mapped)); 4413 } 4414 mutex_unlock(&po->pg_vec_lock); 4415 4416 spin_lock(&po->bind_lock); 4417 if (was_running) { 4418 po->num = num; 4419 register_prot_hook(sk); 4420 } 4421 spin_unlock(&po->bind_lock); 4422 if (pg_vec && (po->tp_version > TPACKET_V2)) { 4423 /* Because we don't support block-based V3 on tx-ring */ 4424 if (!tx_ring) 4425 prb_shutdown_retire_blk_timer(po, rb_queue); 4426 } 4427 4428 out_free_pg_vec: 4429 bitmap_free(rx_owner_map); 4430 if (pg_vec) 4431 free_pg_vec(pg_vec, order, req->tp_block_nr); 4432 out: 4433 return err; 4434 } 4435 4436 static int packet_mmap(struct file *file, struct socket *sock, 4437 struct vm_area_struct *vma) 4438 { 4439 struct sock *sk = sock->sk; 4440 struct packet_sock *po = pkt_sk(sk); 4441 unsigned long size, expected_size; 4442 struct packet_ring_buffer *rb; 4443 unsigned long start; 4444 int err = -EINVAL; 4445 int i; 4446 4447 if (vma->vm_pgoff) 4448 return -EINVAL; 4449 4450 mutex_lock(&po->pg_vec_lock); 4451 4452 expected_size = 0; 4453 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4454 if (rb->pg_vec) { 4455 expected_size += rb->pg_vec_len 4456 * rb->pg_vec_pages 4457 * PAGE_SIZE; 4458 } 4459 } 4460 4461 if (expected_size == 0) 4462 goto out; 4463 4464 size = vma->vm_end - vma->vm_start; 4465 if (size != expected_size) 4466 goto out; 4467 4468 start = vma->vm_start; 4469 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4470 if (rb->pg_vec == NULL) 4471 continue; 4472 4473 for (i = 0; i < rb->pg_vec_len; i++) { 4474 struct page *page; 4475 void *kaddr = rb->pg_vec[i].buffer; 4476 int pg_num; 4477 4478 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 4479 page = pgv_to_page(kaddr); 4480 err = vm_insert_page(vma, start, page); 4481 if (unlikely(err)) 4482 goto out; 4483 start += PAGE_SIZE; 4484 kaddr += PAGE_SIZE; 4485 } 4486 } 4487 } 4488 4489 atomic_inc(&po->mapped); 4490 vma->vm_ops = &packet_mmap_ops; 4491 err = 0; 4492 4493 out: 4494 mutex_unlock(&po->pg_vec_lock); 4495 return err; 4496 } 4497 4498 static const struct proto_ops packet_ops_spkt = { 4499 .family = PF_PACKET, 4500 .owner = THIS_MODULE, 4501 .release = packet_release, 4502 .bind = packet_bind_spkt, 4503 .connect = sock_no_connect, 4504 .socketpair = sock_no_socketpair, 4505 .accept = sock_no_accept, 4506 .getname = packet_getname_spkt, 4507 .poll = datagram_poll, 4508 .ioctl = packet_ioctl, 4509 .gettstamp = sock_gettstamp, 4510 .listen = sock_no_listen, 4511 .shutdown = sock_no_shutdown, 4512 .sendmsg = packet_sendmsg_spkt, 4513 .recvmsg = packet_recvmsg, 4514 .mmap = sock_no_mmap, 4515 .sendpage = sock_no_sendpage, 4516 }; 4517 4518 static const struct proto_ops packet_ops = { 4519 .family = PF_PACKET, 4520 .owner = THIS_MODULE, 4521 .release = packet_release, 4522 .bind = packet_bind, 4523 .connect = sock_no_connect, 4524 .socketpair = sock_no_socketpair, 4525 .accept = sock_no_accept, 4526 .getname = packet_getname, 4527 .poll = packet_poll, 4528 .ioctl = packet_ioctl, 4529 .gettstamp = sock_gettstamp, 4530 .listen = sock_no_listen, 4531 .shutdown = sock_no_shutdown, 4532 .setsockopt = packet_setsockopt, 4533 .getsockopt = packet_getsockopt, 4534 .sendmsg = packet_sendmsg, 4535 .recvmsg = packet_recvmsg, 4536 .mmap = packet_mmap, 4537 .sendpage = sock_no_sendpage, 4538 }; 4539 4540 static const struct net_proto_family packet_family_ops = { 4541 .family = PF_PACKET, 4542 .create = packet_create, 4543 .owner = THIS_MODULE, 4544 }; 4545 4546 static struct notifier_block packet_netdev_notifier = { 4547 .notifier_call = packet_notifier, 4548 }; 4549 4550 #ifdef CONFIG_PROC_FS 4551 4552 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4553 __acquires(RCU) 4554 { 4555 struct net *net = seq_file_net(seq); 4556 4557 rcu_read_lock(); 4558 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4559 } 4560 4561 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4562 { 4563 struct net *net = seq_file_net(seq); 4564 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4565 } 4566 4567 static void packet_seq_stop(struct seq_file *seq, void *v) 4568 __releases(RCU) 4569 { 4570 rcu_read_unlock(); 4571 } 4572 4573 static int packet_seq_show(struct seq_file *seq, void *v) 4574 { 4575 if (v == SEQ_START_TOKEN) 4576 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 4577 else { 4578 struct sock *s = sk_entry(v); 4579 const struct packet_sock *po = pkt_sk(s); 4580 4581 seq_printf(seq, 4582 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 4583 s, 4584 refcount_read(&s->sk_refcnt), 4585 s->sk_type, 4586 ntohs(po->num), 4587 po->ifindex, 4588 po->running, 4589 atomic_read(&s->sk_rmem_alloc), 4590 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4591 sock_i_ino(s)); 4592 } 4593 4594 return 0; 4595 } 4596 4597 static const struct seq_operations packet_seq_ops = { 4598 .start = packet_seq_start, 4599 .next = packet_seq_next, 4600 .stop = packet_seq_stop, 4601 .show = packet_seq_show, 4602 }; 4603 #endif 4604 4605 static int __net_init packet_net_init(struct net *net) 4606 { 4607 mutex_init(&net->packet.sklist_lock); 4608 INIT_HLIST_HEAD(&net->packet.sklist); 4609 4610 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, 4611 sizeof(struct seq_net_private))) 4612 return -ENOMEM; 4613 4614 return 0; 4615 } 4616 4617 static void __net_exit packet_net_exit(struct net *net) 4618 { 4619 remove_proc_entry("packet", net->proc_net); 4620 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); 4621 } 4622 4623 static struct pernet_operations packet_net_ops = { 4624 .init = packet_net_init, 4625 .exit = packet_net_exit, 4626 }; 4627 4628 4629 static void __exit packet_exit(void) 4630 { 4631 unregister_netdevice_notifier(&packet_netdev_notifier); 4632 unregister_pernet_subsys(&packet_net_ops); 4633 sock_unregister(PF_PACKET); 4634 proto_unregister(&packet_proto); 4635 } 4636 4637 static int __init packet_init(void) 4638 { 4639 int rc; 4640 4641 rc = proto_register(&packet_proto, 0); 4642 if (rc) 4643 goto out; 4644 rc = sock_register(&packet_family_ops); 4645 if (rc) 4646 goto out_proto; 4647 rc = register_pernet_subsys(&packet_net_ops); 4648 if (rc) 4649 goto out_sock; 4650 rc = register_netdevice_notifier(&packet_netdev_notifier); 4651 if (rc) 4652 goto out_pernet; 4653 4654 return 0; 4655 4656 out_pernet: 4657 unregister_pernet_subsys(&packet_net_ops); 4658 out_sock: 4659 sock_unregister(PF_PACKET); 4660 out_proto: 4661 proto_unregister(&packet_proto); 4662 out: 4663 return rc; 4664 } 4665 4666 module_init(packet_init); 4667 module_exit(packet_exit); 4668 MODULE_LICENSE("GPL"); 4669 MODULE_ALIAS_NETPROTO(PF_PACKET); 4670