1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * PACKET - implements raw packet sockets. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * 13 * Fixes: 14 * Alan Cox : verify_area() now used correctly 15 * Alan Cox : new skbuff lists, look ma no backlogs! 16 * Alan Cox : tidied skbuff lists. 17 * Alan Cox : Now uses generic datagram routines I 18 * added. Also fixed the peek/read crash 19 * from all old Linux datagram code. 20 * Alan Cox : Uses the improved datagram code. 21 * Alan Cox : Added NULL's for socket options. 22 * Alan Cox : Re-commented the code. 23 * Alan Cox : Use new kernel side addressing 24 * Rob Janssen : Correct MTU usage. 25 * Dave Platt : Counter leaks caused by incorrect 26 * interrupt locking and some slightly 27 * dubious gcc output. Can you read 28 * compiler: it said _VOLATILE_ 29 * Richard Kooijman : Timestamp fixes. 30 * Alan Cox : New buffers. Use sk->mac.raw. 31 * Alan Cox : sendmsg/recvmsg support. 32 * Alan Cox : Protocol setting support 33 * Alexey Kuznetsov : Untied from IPv4 stack. 34 * Cyrus Durgin : Fixed kerneld for kmod. 35 * Michal Ostrowski : Module initialization cleanup. 36 * Ulises Alonso : Frame number limit removal and 37 * packet_set_ring memory leak. 38 * Eric Biederman : Allow for > 8 byte hardware addresses. 39 * The convention is that longer addresses 40 * will simply extend the hardware address 41 * byte arrays at the end of sockaddr_ll 42 * and packet_mreq. 43 * Johann Baudy : Added TX RING. 44 * Chetan Loke : Implemented TPACKET_V3 block abstraction 45 * layer. 46 * Copyright (C) 2011, <lokec@ccs.neu.edu> 47 */ 48 49 #include <linux/types.h> 50 #include <linux/mm.h> 51 #include <linux/capability.h> 52 #include <linux/fcntl.h> 53 #include <linux/socket.h> 54 #include <linux/in.h> 55 #include <linux/inet.h> 56 #include <linux/netdevice.h> 57 #include <linux/if_packet.h> 58 #include <linux/wireless.h> 59 #include <linux/kernel.h> 60 #include <linux/kmod.h> 61 #include <linux/slab.h> 62 #include <linux/vmalloc.h> 63 #include <net/net_namespace.h> 64 #include <net/ip.h> 65 #include <net/protocol.h> 66 #include <linux/skbuff.h> 67 #include <net/sock.h> 68 #include <linux/errno.h> 69 #include <linux/timer.h> 70 #include <linux/uaccess.h> 71 #include <asm/ioctls.h> 72 #include <asm/page.h> 73 #include <asm/cacheflush.h> 74 #include <asm/io.h> 75 #include <linux/proc_fs.h> 76 #include <linux/seq_file.h> 77 #include <linux/poll.h> 78 #include <linux/module.h> 79 #include <linux/init.h> 80 #include <linux/mutex.h> 81 #include <linux/if_vlan.h> 82 #include <linux/virtio_net.h> 83 #include <linux/errqueue.h> 84 #include <linux/net_tstamp.h> 85 #include <linux/percpu.h> 86 #ifdef CONFIG_INET 87 #include <net/inet_common.h> 88 #endif 89 #include <linux/bpf.h> 90 #include <net/compat.h> 91 92 #include "internal.h" 93 94 /* 95 Assumptions: 96 - if device has no dev->hard_header routine, it adds and removes ll header 97 inside itself. In this case ll header is invisible outside of device, 98 but higher levels still should reserve dev->hard_header_len. 99 Some devices are enough clever to reallocate skb, when header 100 will not fit to reserved space (tunnel), another ones are silly 101 (PPP). 102 - packet socket receives packets with pulled ll header, 103 so that SOCK_RAW should push it back. 104 105 On receive: 106 ----------- 107 108 Incoming, dev->hard_header!=NULL 109 mac_header -> ll header 110 data -> data 111 112 Outgoing, dev->hard_header!=NULL 113 mac_header -> ll header 114 data -> ll header 115 116 Incoming, dev->hard_header==NULL 117 mac_header -> UNKNOWN position. It is very likely, that it points to ll 118 header. PPP makes it, that is wrong, because introduce 119 assymetry between rx and tx paths. 120 data -> data 121 122 Outgoing, dev->hard_header==NULL 123 mac_header -> data. ll header is still not built! 124 data -> data 125 126 Resume 127 If dev->hard_header==NULL we are unlikely to restore sensible ll header. 128 129 130 On transmit: 131 ------------ 132 133 dev->hard_header != NULL 134 mac_header -> ll header 135 data -> ll header 136 137 dev->hard_header == NULL (ll header is added by device, we cannot control it) 138 mac_header -> data 139 data -> data 140 141 We should set nh.raw on output to correct posistion, 142 packet classifier depends on it. 143 */ 144 145 /* Private packet socket structures. */ 146 147 /* identical to struct packet_mreq except it has 148 * a longer address field. 149 */ 150 struct packet_mreq_max { 151 int mr_ifindex; 152 unsigned short mr_type; 153 unsigned short mr_alen; 154 unsigned char mr_address[MAX_ADDR_LEN]; 155 }; 156 157 union tpacket_uhdr { 158 struct tpacket_hdr *h1; 159 struct tpacket2_hdr *h2; 160 struct tpacket3_hdr *h3; 161 void *raw; 162 }; 163 164 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 165 int closing, int tx_ring); 166 167 #define V3_ALIGNMENT (8) 168 169 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 170 171 #define BLK_PLUS_PRIV(sz_of_priv) \ 172 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 173 174 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 175 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 176 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 177 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 178 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 179 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 180 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) 181 182 struct packet_sock; 183 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 184 struct packet_type *pt, struct net_device *orig_dev); 185 186 static void *packet_previous_frame(struct packet_sock *po, 187 struct packet_ring_buffer *rb, 188 int status); 189 static void packet_increment_head(struct packet_ring_buffer *buff); 190 static int prb_curr_blk_in_use(struct tpacket_block_desc *); 191 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 192 struct packet_sock *); 193 static void prb_retire_current_block(struct tpacket_kbdq_core *, 194 struct packet_sock *, unsigned int status); 195 static int prb_queue_frozen(struct tpacket_kbdq_core *); 196 static void prb_open_block(struct tpacket_kbdq_core *, 197 struct tpacket_block_desc *); 198 static void prb_retire_rx_blk_timer_expired(struct timer_list *); 199 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); 200 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 201 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 202 struct tpacket3_hdr *); 203 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 204 struct tpacket3_hdr *); 205 static void packet_flush_mclist(struct sock *sk); 206 static u16 packet_pick_tx_queue(struct sk_buff *skb); 207 208 struct packet_skb_cb { 209 union { 210 struct sockaddr_pkt pkt; 211 union { 212 /* Trick: alias skb original length with 213 * ll.sll_family and ll.protocol in order 214 * to save room. 215 */ 216 unsigned int origlen; 217 struct sockaddr_ll ll; 218 }; 219 } sa; 220 }; 221 222 #define vio_le() virtio_legacy_is_little_endian() 223 224 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 225 226 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 227 #define GET_PBLOCK_DESC(x, bid) \ 228 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 229 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 230 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 231 #define GET_NEXT_PRB_BLK_NUM(x) \ 232 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 233 ((x)->kactive_blk_num+1) : 0) 234 235 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 236 static void __fanout_link(struct sock *sk, struct packet_sock *po); 237 238 static int packet_direct_xmit(struct sk_buff *skb) 239 { 240 return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); 241 } 242 243 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 244 { 245 struct net_device *dev; 246 247 rcu_read_lock(); 248 dev = rcu_dereference(po->cached_dev); 249 if (likely(dev)) 250 dev_hold(dev); 251 rcu_read_unlock(); 252 253 return dev; 254 } 255 256 static void packet_cached_dev_assign(struct packet_sock *po, 257 struct net_device *dev) 258 { 259 rcu_assign_pointer(po->cached_dev, dev); 260 } 261 262 static void packet_cached_dev_reset(struct packet_sock *po) 263 { 264 RCU_INIT_POINTER(po->cached_dev, NULL); 265 } 266 267 static bool packet_use_direct_xmit(const struct packet_sock *po) 268 { 269 return po->xmit == packet_direct_xmit; 270 } 271 272 static u16 packet_pick_tx_queue(struct sk_buff *skb) 273 { 274 struct net_device *dev = skb->dev; 275 const struct net_device_ops *ops = dev->netdev_ops; 276 int cpu = raw_smp_processor_id(); 277 u16 queue_index; 278 279 #ifdef CONFIG_XPS 280 skb->sender_cpu = cpu + 1; 281 #endif 282 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); 283 if (ops->ndo_select_queue) { 284 queue_index = ops->ndo_select_queue(dev, skb, NULL); 285 queue_index = netdev_cap_txqueue(dev, queue_index); 286 } else { 287 queue_index = netdev_pick_tx(dev, skb, NULL); 288 } 289 290 return queue_index; 291 } 292 293 /* __register_prot_hook must be invoked through register_prot_hook 294 * or from a context in which asynchronous accesses to the packet 295 * socket is not possible (packet_create()). 296 */ 297 static void __register_prot_hook(struct sock *sk) 298 { 299 struct packet_sock *po = pkt_sk(sk); 300 301 if (!po->running) { 302 if (po->fanout) 303 __fanout_link(sk, po); 304 else 305 dev_add_pack(&po->prot_hook); 306 307 sock_hold(sk); 308 po->running = 1; 309 } 310 } 311 312 static void register_prot_hook(struct sock *sk) 313 { 314 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); 315 __register_prot_hook(sk); 316 } 317 318 /* If the sync parameter is true, we will temporarily drop 319 * the po->bind_lock and do a synchronize_net to make sure no 320 * asynchronous packet processing paths still refer to the elements 321 * of po->prot_hook. If the sync parameter is false, it is the 322 * callers responsibility to take care of this. 323 */ 324 static void __unregister_prot_hook(struct sock *sk, bool sync) 325 { 326 struct packet_sock *po = pkt_sk(sk); 327 328 lockdep_assert_held_once(&po->bind_lock); 329 330 po->running = 0; 331 332 if (po->fanout) 333 __fanout_unlink(sk, po); 334 else 335 __dev_remove_pack(&po->prot_hook); 336 337 __sock_put(sk); 338 339 if (sync) { 340 spin_unlock(&po->bind_lock); 341 synchronize_net(); 342 spin_lock(&po->bind_lock); 343 } 344 } 345 346 static void unregister_prot_hook(struct sock *sk, bool sync) 347 { 348 struct packet_sock *po = pkt_sk(sk); 349 350 if (po->running) 351 __unregister_prot_hook(sk, sync); 352 } 353 354 static inline struct page * __pure pgv_to_page(void *addr) 355 { 356 if (is_vmalloc_addr(addr)) 357 return vmalloc_to_page(addr); 358 return virt_to_page(addr); 359 } 360 361 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 362 { 363 union tpacket_uhdr h; 364 365 h.raw = frame; 366 switch (po->tp_version) { 367 case TPACKET_V1: 368 h.h1->tp_status = status; 369 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 370 break; 371 case TPACKET_V2: 372 h.h2->tp_status = status; 373 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 374 break; 375 case TPACKET_V3: 376 h.h3->tp_status = status; 377 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 378 break; 379 default: 380 WARN(1, "TPACKET version not supported.\n"); 381 BUG(); 382 } 383 384 smp_wmb(); 385 } 386 387 static int __packet_get_status(const struct packet_sock *po, void *frame) 388 { 389 union tpacket_uhdr h; 390 391 smp_rmb(); 392 393 h.raw = frame; 394 switch (po->tp_version) { 395 case TPACKET_V1: 396 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 397 return h.h1->tp_status; 398 case TPACKET_V2: 399 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 400 return h.h2->tp_status; 401 case TPACKET_V3: 402 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 403 return h.h3->tp_status; 404 default: 405 WARN(1, "TPACKET version not supported.\n"); 406 BUG(); 407 return 0; 408 } 409 } 410 411 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts, 412 unsigned int flags) 413 { 414 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 415 416 if (shhwtstamps && 417 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 418 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts)) 419 return TP_STATUS_TS_RAW_HARDWARE; 420 421 if (ktime_to_timespec64_cond(skb->tstamp, ts)) 422 return TP_STATUS_TS_SOFTWARE; 423 424 return 0; 425 } 426 427 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 428 struct sk_buff *skb) 429 { 430 union tpacket_uhdr h; 431 struct timespec64 ts; 432 __u32 ts_status; 433 434 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 435 return 0; 436 437 h.raw = frame; 438 /* 439 * versions 1 through 3 overflow the timestamps in y2106, since they 440 * all store the seconds in a 32-bit unsigned integer. 441 * If we create a version 4, that should have a 64-bit timestamp, 442 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit 443 * nanoseconds. 444 */ 445 switch (po->tp_version) { 446 case TPACKET_V1: 447 h.h1->tp_sec = ts.tv_sec; 448 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 449 break; 450 case TPACKET_V2: 451 h.h2->tp_sec = ts.tv_sec; 452 h.h2->tp_nsec = ts.tv_nsec; 453 break; 454 case TPACKET_V3: 455 h.h3->tp_sec = ts.tv_sec; 456 h.h3->tp_nsec = ts.tv_nsec; 457 break; 458 default: 459 WARN(1, "TPACKET version not supported.\n"); 460 BUG(); 461 } 462 463 /* one flush is safe, as both fields always lie on the same cacheline */ 464 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 465 smp_wmb(); 466 467 return ts_status; 468 } 469 470 static void *packet_lookup_frame(const struct packet_sock *po, 471 const struct packet_ring_buffer *rb, 472 unsigned int position, 473 int status) 474 { 475 unsigned int pg_vec_pos, frame_offset; 476 union tpacket_uhdr h; 477 478 pg_vec_pos = position / rb->frames_per_block; 479 frame_offset = position % rb->frames_per_block; 480 481 h.raw = rb->pg_vec[pg_vec_pos].buffer + 482 (frame_offset * rb->frame_size); 483 484 if (status != __packet_get_status(po, h.raw)) 485 return NULL; 486 487 return h.raw; 488 } 489 490 static void *packet_current_frame(struct packet_sock *po, 491 struct packet_ring_buffer *rb, 492 int status) 493 { 494 return packet_lookup_frame(po, rb, rb->head, status); 495 } 496 497 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 498 { 499 del_timer_sync(&pkc->retire_blk_timer); 500 } 501 502 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 503 struct sk_buff_head *rb_queue) 504 { 505 struct tpacket_kbdq_core *pkc; 506 507 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 508 509 spin_lock_bh(&rb_queue->lock); 510 pkc->delete_blk_timer = 1; 511 spin_unlock_bh(&rb_queue->lock); 512 513 prb_del_retire_blk_timer(pkc); 514 } 515 516 static void prb_setup_retire_blk_timer(struct packet_sock *po) 517 { 518 struct tpacket_kbdq_core *pkc; 519 520 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 521 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired, 522 0); 523 pkc->retire_blk_timer.expires = jiffies; 524 } 525 526 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 527 int blk_size_in_bytes) 528 { 529 struct net_device *dev; 530 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; 531 struct ethtool_link_ksettings ecmd; 532 int err; 533 534 rtnl_lock(); 535 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 536 if (unlikely(!dev)) { 537 rtnl_unlock(); 538 return DEFAULT_PRB_RETIRE_TOV; 539 } 540 err = __ethtool_get_link_ksettings(dev, &ecmd); 541 rtnl_unlock(); 542 if (!err) { 543 /* 544 * If the link speed is so slow you don't really 545 * need to worry about perf anyways 546 */ 547 if (ecmd.base.speed < SPEED_1000 || 548 ecmd.base.speed == SPEED_UNKNOWN) { 549 return DEFAULT_PRB_RETIRE_TOV; 550 } else { 551 msec = 1; 552 div = ecmd.base.speed / 1000; 553 } 554 } 555 556 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 557 558 if (div) 559 mbits /= div; 560 561 tmo = mbits * msec; 562 563 if (div) 564 return tmo+1; 565 return tmo; 566 } 567 568 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 569 union tpacket_req_u *req_u) 570 { 571 p1->feature_req_word = req_u->req3.tp_feature_req_word; 572 } 573 574 static void init_prb_bdqc(struct packet_sock *po, 575 struct packet_ring_buffer *rb, 576 struct pgv *pg_vec, 577 union tpacket_req_u *req_u) 578 { 579 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 580 struct tpacket_block_desc *pbd; 581 582 memset(p1, 0x0, sizeof(*p1)); 583 584 p1->knxt_seq_num = 1; 585 p1->pkbdq = pg_vec; 586 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 587 p1->pkblk_start = pg_vec[0].buffer; 588 p1->kblk_size = req_u->req3.tp_block_size; 589 p1->knum_blocks = req_u->req3.tp_block_nr; 590 p1->hdrlen = po->tp_hdrlen; 591 p1->version = po->tp_version; 592 p1->last_kactive_blk_num = 0; 593 po->stats.stats3.tp_freeze_q_cnt = 0; 594 if (req_u->req3.tp_retire_blk_tov) 595 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; 596 else 597 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, 598 req_u->req3.tp_block_size); 599 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); 600 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 601 602 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 603 prb_init_ft_ops(p1, req_u); 604 prb_setup_retire_blk_timer(po); 605 prb_open_block(p1, pbd); 606 } 607 608 /* Do NOT update the last_blk_num first. 609 * Assumes sk_buff_head lock is held. 610 */ 611 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) 612 { 613 mod_timer(&pkc->retire_blk_timer, 614 jiffies + pkc->tov_in_jiffies); 615 pkc->last_kactive_blk_num = pkc->kactive_blk_num; 616 } 617 618 /* 619 * Timer logic: 620 * 1) We refresh the timer only when we open a block. 621 * By doing this we don't waste cycles refreshing the timer 622 * on packet-by-packet basis. 623 * 624 * With a 1MB block-size, on a 1Gbps line, it will take 625 * i) ~8 ms to fill a block + ii) memcpy etc. 626 * In this cut we are not accounting for the memcpy time. 627 * 628 * So, if the user sets the 'tmo' to 10ms then the timer 629 * will never fire while the block is still getting filled 630 * (which is what we want). However, the user could choose 631 * to close a block early and that's fine. 632 * 633 * But when the timer does fire, we check whether or not to refresh it. 634 * Since the tmo granularity is in msecs, it is not too expensive 635 * to refresh the timer, lets say every '8' msecs. 636 * Either the user can set the 'tmo' or we can derive it based on 637 * a) line-speed and b) block-size. 638 * prb_calc_retire_blk_tmo() calculates the tmo. 639 * 640 */ 641 static void prb_retire_rx_blk_timer_expired(struct timer_list *t) 642 { 643 struct packet_sock *po = 644 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); 645 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 646 unsigned int frozen; 647 struct tpacket_block_desc *pbd; 648 649 spin_lock(&po->sk.sk_receive_queue.lock); 650 651 frozen = prb_queue_frozen(pkc); 652 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 653 654 if (unlikely(pkc->delete_blk_timer)) 655 goto out; 656 657 /* We only need to plug the race when the block is partially filled. 658 * tpacket_rcv: 659 * lock(); increment BLOCK_NUM_PKTS; unlock() 660 * copy_bits() is in progress ... 661 * timer fires on other cpu: 662 * we can't retire the current block because copy_bits 663 * is in progress. 664 * 665 */ 666 if (BLOCK_NUM_PKTS(pbd)) { 667 while (atomic_read(&pkc->blk_fill_in_prog)) { 668 /* Waiting for skb_copy_bits to finish... */ 669 cpu_relax(); 670 } 671 } 672 673 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 674 if (!frozen) { 675 if (!BLOCK_NUM_PKTS(pbd)) { 676 /* An empty block. Just refresh the timer. */ 677 goto refresh_timer; 678 } 679 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 680 if (!prb_dispatch_next_block(pkc, po)) 681 goto refresh_timer; 682 else 683 goto out; 684 } else { 685 /* Case 1. Queue was frozen because user-space was 686 * lagging behind. 687 */ 688 if (prb_curr_blk_in_use(pbd)) { 689 /* 690 * Ok, user-space is still behind. 691 * So just refresh the timer. 692 */ 693 goto refresh_timer; 694 } else { 695 /* Case 2. queue was frozen,user-space caught up, 696 * now the link went idle && the timer fired. 697 * We don't have a block to close.So we open this 698 * block and restart the timer. 699 * opening a block thaws the queue,restarts timer 700 * Thawing/timer-refresh is a side effect. 701 */ 702 prb_open_block(pkc, pbd); 703 goto out; 704 } 705 } 706 } 707 708 refresh_timer: 709 _prb_refresh_rx_retire_blk_timer(pkc); 710 711 out: 712 spin_unlock(&po->sk.sk_receive_queue.lock); 713 } 714 715 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 716 struct tpacket_block_desc *pbd1, __u32 status) 717 { 718 /* Flush everything minus the block header */ 719 720 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 721 u8 *start, *end; 722 723 start = (u8 *)pbd1; 724 725 /* Skip the block header(we know header WILL fit in 4K) */ 726 start += PAGE_SIZE; 727 728 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 729 for (; start < end; start += PAGE_SIZE) 730 flush_dcache_page(pgv_to_page(start)); 731 732 smp_wmb(); 733 #endif 734 735 /* Now update the block status. */ 736 737 BLOCK_STATUS(pbd1) = status; 738 739 /* Flush the block header */ 740 741 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 742 start = (u8 *)pbd1; 743 flush_dcache_page(pgv_to_page(start)); 744 745 smp_wmb(); 746 #endif 747 } 748 749 /* 750 * Side effect: 751 * 752 * 1) flush the block 753 * 2) Increment active_blk_num 754 * 755 * Note:We DONT refresh the timer on purpose. 756 * Because almost always the next block will be opened. 757 */ 758 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 759 struct tpacket_block_desc *pbd1, 760 struct packet_sock *po, unsigned int stat) 761 { 762 __u32 status = TP_STATUS_USER | stat; 763 764 struct tpacket3_hdr *last_pkt; 765 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 766 struct sock *sk = &po->sk; 767 768 if (atomic_read(&po->tp_drops)) 769 status |= TP_STATUS_LOSING; 770 771 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 772 last_pkt->tp_next_offset = 0; 773 774 /* Get the ts of the last pkt */ 775 if (BLOCK_NUM_PKTS(pbd1)) { 776 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 777 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 778 } else { 779 /* Ok, we tmo'd - so get the current time. 780 * 781 * It shouldn't really happen as we don't close empty 782 * blocks. See prb_retire_rx_blk_timer_expired(). 783 */ 784 struct timespec64 ts; 785 ktime_get_real_ts64(&ts); 786 h1->ts_last_pkt.ts_sec = ts.tv_sec; 787 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 788 } 789 790 smp_wmb(); 791 792 /* Flush the block */ 793 prb_flush_block(pkc1, pbd1, status); 794 795 sk->sk_data_ready(sk); 796 797 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 798 } 799 800 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 801 { 802 pkc->reset_pending_on_curr_blk = 0; 803 } 804 805 /* 806 * Side effect of opening a block: 807 * 808 * 1) prb_queue is thawed. 809 * 2) retire_blk_timer is refreshed. 810 * 811 */ 812 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 813 struct tpacket_block_desc *pbd1) 814 { 815 struct timespec64 ts; 816 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 817 818 smp_rmb(); 819 820 /* We could have just memset this but we will lose the 821 * flexibility of making the priv area sticky 822 */ 823 824 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 825 BLOCK_NUM_PKTS(pbd1) = 0; 826 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 827 828 ktime_get_real_ts64(&ts); 829 830 h1->ts_first_pkt.ts_sec = ts.tv_sec; 831 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 832 833 pkc1->pkblk_start = (char *)pbd1; 834 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 835 836 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 837 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 838 839 pbd1->version = pkc1->version; 840 pkc1->prev = pkc1->nxt_offset; 841 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 842 843 prb_thaw_queue(pkc1); 844 _prb_refresh_rx_retire_blk_timer(pkc1); 845 846 smp_wmb(); 847 } 848 849 /* 850 * Queue freeze logic: 851 * 1) Assume tp_block_nr = 8 blocks. 852 * 2) At time 't0', user opens Rx ring. 853 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 854 * 4) user-space is either sleeping or processing block '0'. 855 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 856 * it will close block-7,loop around and try to fill block '0'. 857 * call-flow: 858 * __packet_lookup_frame_in_block 859 * prb_retire_current_block() 860 * prb_dispatch_next_block() 861 * |->(BLOCK_STATUS == USER) evaluates to true 862 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 863 * 6) Now there are two cases: 864 * 6.1) Link goes idle right after the queue is frozen. 865 * But remember, the last open_block() refreshed the timer. 866 * When this timer expires,it will refresh itself so that we can 867 * re-open block-0 in near future. 868 * 6.2) Link is busy and keeps on receiving packets. This is a simple 869 * case and __packet_lookup_frame_in_block will check if block-0 870 * is free and can now be re-used. 871 */ 872 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 873 struct packet_sock *po) 874 { 875 pkc->reset_pending_on_curr_blk = 1; 876 po->stats.stats3.tp_freeze_q_cnt++; 877 } 878 879 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 880 881 /* 882 * If the next block is free then we will dispatch it 883 * and return a good offset. 884 * Else, we will freeze the queue. 885 * So, caller must check the return value. 886 */ 887 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 888 struct packet_sock *po) 889 { 890 struct tpacket_block_desc *pbd; 891 892 smp_rmb(); 893 894 /* 1. Get current block num */ 895 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 896 897 /* 2. If this block is currently in_use then freeze the queue */ 898 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 899 prb_freeze_queue(pkc, po); 900 return NULL; 901 } 902 903 /* 904 * 3. 905 * open this block and return the offset where the first packet 906 * needs to get stored. 907 */ 908 prb_open_block(pkc, pbd); 909 return (void *)pkc->nxt_offset; 910 } 911 912 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 913 struct packet_sock *po, unsigned int status) 914 { 915 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 916 917 /* retire/close the current block */ 918 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 919 /* 920 * Plug the case where copy_bits() is in progress on 921 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 922 * have space to copy the pkt in the current block and 923 * called prb_retire_current_block() 924 * 925 * We don't need to worry about the TMO case because 926 * the timer-handler already handled this case. 927 */ 928 if (!(status & TP_STATUS_BLK_TMO)) { 929 while (atomic_read(&pkc->blk_fill_in_prog)) { 930 /* Waiting for skb_copy_bits to finish... */ 931 cpu_relax(); 932 } 933 } 934 prb_close_block(pkc, pbd, po, status); 935 return; 936 } 937 } 938 939 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) 940 { 941 return TP_STATUS_USER & BLOCK_STATUS(pbd); 942 } 943 944 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 945 { 946 return pkc->reset_pending_on_curr_blk; 947 } 948 949 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 950 { 951 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 952 atomic_dec(&pkc->blk_fill_in_prog); 953 } 954 955 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 956 struct tpacket3_hdr *ppd) 957 { 958 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 959 } 960 961 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 962 struct tpacket3_hdr *ppd) 963 { 964 ppd->hv1.tp_rxhash = 0; 965 } 966 967 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 968 struct tpacket3_hdr *ppd) 969 { 970 if (skb_vlan_tag_present(pkc->skb)) { 971 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 972 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 973 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 974 } else { 975 ppd->hv1.tp_vlan_tci = 0; 976 ppd->hv1.tp_vlan_tpid = 0; 977 ppd->tp_status = TP_STATUS_AVAILABLE; 978 } 979 } 980 981 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 982 struct tpacket3_hdr *ppd) 983 { 984 ppd->hv1.tp_padding = 0; 985 prb_fill_vlan_info(pkc, ppd); 986 987 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 988 prb_fill_rxhash(pkc, ppd); 989 else 990 prb_clear_rxhash(pkc, ppd); 991 } 992 993 static void prb_fill_curr_block(char *curr, 994 struct tpacket_kbdq_core *pkc, 995 struct tpacket_block_desc *pbd, 996 unsigned int len) 997 { 998 struct tpacket3_hdr *ppd; 999 1000 ppd = (struct tpacket3_hdr *)curr; 1001 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 1002 pkc->prev = curr; 1003 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1004 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1005 BLOCK_NUM_PKTS(pbd) += 1; 1006 atomic_inc(&pkc->blk_fill_in_prog); 1007 prb_run_all_ft_ops(pkc, ppd); 1008 } 1009 1010 /* Assumes caller has the sk->rx_queue.lock */ 1011 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1012 struct sk_buff *skb, 1013 unsigned int len 1014 ) 1015 { 1016 struct tpacket_kbdq_core *pkc; 1017 struct tpacket_block_desc *pbd; 1018 char *curr, *end; 1019 1020 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1021 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1022 1023 /* Queue is frozen when user space is lagging behind */ 1024 if (prb_queue_frozen(pkc)) { 1025 /* 1026 * Check if that last block which caused the queue to freeze, 1027 * is still in_use by user-space. 1028 */ 1029 if (prb_curr_blk_in_use(pbd)) { 1030 /* Can't record this packet */ 1031 return NULL; 1032 } else { 1033 /* 1034 * Ok, the block was released by user-space. 1035 * Now let's open that block. 1036 * opening a block also thaws the queue. 1037 * Thawing is a side effect. 1038 */ 1039 prb_open_block(pkc, pbd); 1040 } 1041 } 1042 1043 smp_mb(); 1044 curr = pkc->nxt_offset; 1045 pkc->skb = skb; 1046 end = (char *)pbd + pkc->kblk_size; 1047 1048 /* first try the current block */ 1049 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1050 prb_fill_curr_block(curr, pkc, pbd, len); 1051 return (void *)curr; 1052 } 1053 1054 /* Ok, close the current block */ 1055 prb_retire_current_block(pkc, po, 0); 1056 1057 /* Now, try to dispatch the next block */ 1058 curr = (char *)prb_dispatch_next_block(pkc, po); 1059 if (curr) { 1060 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1061 prb_fill_curr_block(curr, pkc, pbd, len); 1062 return (void *)curr; 1063 } 1064 1065 /* 1066 * No free blocks are available.user_space hasn't caught up yet. 1067 * Queue was just frozen and now this packet will get dropped. 1068 */ 1069 return NULL; 1070 } 1071 1072 static void *packet_current_rx_frame(struct packet_sock *po, 1073 struct sk_buff *skb, 1074 int status, unsigned int len) 1075 { 1076 char *curr = NULL; 1077 switch (po->tp_version) { 1078 case TPACKET_V1: 1079 case TPACKET_V2: 1080 curr = packet_lookup_frame(po, &po->rx_ring, 1081 po->rx_ring.head, status); 1082 return curr; 1083 case TPACKET_V3: 1084 return __packet_lookup_frame_in_block(po, skb, len); 1085 default: 1086 WARN(1, "TPACKET version not supported\n"); 1087 BUG(); 1088 return NULL; 1089 } 1090 } 1091 1092 static void *prb_lookup_block(const struct packet_sock *po, 1093 const struct packet_ring_buffer *rb, 1094 unsigned int idx, 1095 int status) 1096 { 1097 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1098 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1099 1100 if (status != BLOCK_STATUS(pbd)) 1101 return NULL; 1102 return pbd; 1103 } 1104 1105 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1106 { 1107 unsigned int prev; 1108 if (rb->prb_bdqc.kactive_blk_num) 1109 prev = rb->prb_bdqc.kactive_blk_num-1; 1110 else 1111 prev = rb->prb_bdqc.knum_blocks-1; 1112 return prev; 1113 } 1114 1115 /* Assumes caller has held the rx_queue.lock */ 1116 static void *__prb_previous_block(struct packet_sock *po, 1117 struct packet_ring_buffer *rb, 1118 int status) 1119 { 1120 unsigned int previous = prb_previous_blk_num(rb); 1121 return prb_lookup_block(po, rb, previous, status); 1122 } 1123 1124 static void *packet_previous_rx_frame(struct packet_sock *po, 1125 struct packet_ring_buffer *rb, 1126 int status) 1127 { 1128 if (po->tp_version <= TPACKET_V2) 1129 return packet_previous_frame(po, rb, status); 1130 1131 return __prb_previous_block(po, rb, status); 1132 } 1133 1134 static void packet_increment_rx_head(struct packet_sock *po, 1135 struct packet_ring_buffer *rb) 1136 { 1137 switch (po->tp_version) { 1138 case TPACKET_V1: 1139 case TPACKET_V2: 1140 return packet_increment_head(rb); 1141 case TPACKET_V3: 1142 default: 1143 WARN(1, "TPACKET version not supported.\n"); 1144 BUG(); 1145 return; 1146 } 1147 } 1148 1149 static void *packet_previous_frame(struct packet_sock *po, 1150 struct packet_ring_buffer *rb, 1151 int status) 1152 { 1153 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1154 return packet_lookup_frame(po, rb, previous, status); 1155 } 1156 1157 static void packet_increment_head(struct packet_ring_buffer *buff) 1158 { 1159 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1160 } 1161 1162 static void packet_inc_pending(struct packet_ring_buffer *rb) 1163 { 1164 this_cpu_inc(*rb->pending_refcnt); 1165 } 1166 1167 static void packet_dec_pending(struct packet_ring_buffer *rb) 1168 { 1169 this_cpu_dec(*rb->pending_refcnt); 1170 } 1171 1172 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1173 { 1174 unsigned int refcnt = 0; 1175 int cpu; 1176 1177 /* We don't use pending refcount in rx_ring. */ 1178 if (rb->pending_refcnt == NULL) 1179 return 0; 1180 1181 for_each_possible_cpu(cpu) 1182 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1183 1184 return refcnt; 1185 } 1186 1187 static int packet_alloc_pending(struct packet_sock *po) 1188 { 1189 po->rx_ring.pending_refcnt = NULL; 1190 1191 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1192 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1193 return -ENOBUFS; 1194 1195 return 0; 1196 } 1197 1198 static void packet_free_pending(struct packet_sock *po) 1199 { 1200 free_percpu(po->tx_ring.pending_refcnt); 1201 } 1202 1203 #define ROOM_POW_OFF 2 1204 #define ROOM_NONE 0x0 1205 #define ROOM_LOW 0x1 1206 #define ROOM_NORMAL 0x2 1207 1208 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off) 1209 { 1210 int idx, len; 1211 1212 len = READ_ONCE(po->rx_ring.frame_max) + 1; 1213 idx = READ_ONCE(po->rx_ring.head); 1214 if (pow_off) 1215 idx += len >> pow_off; 1216 if (idx >= len) 1217 idx -= len; 1218 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1219 } 1220 1221 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off) 1222 { 1223 int idx, len; 1224 1225 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); 1226 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); 1227 if (pow_off) 1228 idx += len >> pow_off; 1229 if (idx >= len) 1230 idx -= len; 1231 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1232 } 1233 1234 static int __packet_rcv_has_room(const struct packet_sock *po, 1235 const struct sk_buff *skb) 1236 { 1237 const struct sock *sk = &po->sk; 1238 int ret = ROOM_NONE; 1239 1240 if (po->prot_hook.func != tpacket_rcv) { 1241 int rcvbuf = READ_ONCE(sk->sk_rcvbuf); 1242 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc) 1243 - (skb ? skb->truesize : 0); 1244 1245 if (avail > (rcvbuf >> ROOM_POW_OFF)) 1246 return ROOM_NORMAL; 1247 else if (avail > 0) 1248 return ROOM_LOW; 1249 else 1250 return ROOM_NONE; 1251 } 1252 1253 if (po->tp_version == TPACKET_V3) { 1254 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) 1255 ret = ROOM_NORMAL; 1256 else if (__tpacket_v3_has_room(po, 0)) 1257 ret = ROOM_LOW; 1258 } else { 1259 if (__tpacket_has_room(po, ROOM_POW_OFF)) 1260 ret = ROOM_NORMAL; 1261 else if (__tpacket_has_room(po, 0)) 1262 ret = ROOM_LOW; 1263 } 1264 1265 return ret; 1266 } 1267 1268 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1269 { 1270 int pressure, ret; 1271 1272 ret = __packet_rcv_has_room(po, skb); 1273 pressure = ret != ROOM_NORMAL; 1274 1275 if (READ_ONCE(po->pressure) != pressure) 1276 WRITE_ONCE(po->pressure, pressure); 1277 1278 return ret; 1279 } 1280 1281 static void packet_rcv_try_clear_pressure(struct packet_sock *po) 1282 { 1283 if (READ_ONCE(po->pressure) && 1284 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 1285 WRITE_ONCE(po->pressure, 0); 1286 } 1287 1288 static void packet_sock_destruct(struct sock *sk) 1289 { 1290 skb_queue_purge(&sk->sk_error_queue); 1291 1292 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1293 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 1294 1295 if (!sock_flag(sk, SOCK_DEAD)) { 1296 pr_err("Attempt to release alive packet socket: %p\n", sk); 1297 return; 1298 } 1299 1300 sk_refcnt_debug_dec(sk); 1301 } 1302 1303 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) 1304 { 1305 u32 *history = po->rollover->history; 1306 u32 victim, rxhash; 1307 int i, count = 0; 1308 1309 rxhash = skb_get_hash(skb); 1310 for (i = 0; i < ROLLOVER_HLEN; i++) 1311 if (READ_ONCE(history[i]) == rxhash) 1312 count++; 1313 1314 victim = prandom_u32() % ROLLOVER_HLEN; 1315 1316 /* Avoid dirtying the cache line if possible */ 1317 if (READ_ONCE(history[victim]) != rxhash) 1318 WRITE_ONCE(history[victim], rxhash); 1319 1320 return count > (ROLLOVER_HLEN >> 1); 1321 } 1322 1323 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1324 struct sk_buff *skb, 1325 unsigned int num) 1326 { 1327 return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1328 } 1329 1330 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1331 struct sk_buff *skb, 1332 unsigned int num) 1333 { 1334 unsigned int val = atomic_inc_return(&f->rr_cur); 1335 1336 return val % num; 1337 } 1338 1339 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1340 struct sk_buff *skb, 1341 unsigned int num) 1342 { 1343 return smp_processor_id() % num; 1344 } 1345 1346 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1347 struct sk_buff *skb, 1348 unsigned int num) 1349 { 1350 return prandom_u32_max(num); 1351 } 1352 1353 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1354 struct sk_buff *skb, 1355 unsigned int idx, bool try_self, 1356 unsigned int num) 1357 { 1358 struct packet_sock *po, *po_next, *po_skip = NULL; 1359 unsigned int i, j, room = ROOM_NONE; 1360 1361 po = pkt_sk(f->arr[idx]); 1362 1363 if (try_self) { 1364 room = packet_rcv_has_room(po, skb); 1365 if (room == ROOM_NORMAL || 1366 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) 1367 return idx; 1368 po_skip = po; 1369 } 1370 1371 i = j = min_t(int, po->rollover->sock, num - 1); 1372 do { 1373 po_next = pkt_sk(f->arr[i]); 1374 if (po_next != po_skip && !READ_ONCE(po_next->pressure) && 1375 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { 1376 if (i != j) 1377 po->rollover->sock = i; 1378 atomic_long_inc(&po->rollover->num); 1379 if (room == ROOM_LOW) 1380 atomic_long_inc(&po->rollover->num_huge); 1381 return i; 1382 } 1383 1384 if (++i == num) 1385 i = 0; 1386 } while (i != j); 1387 1388 atomic_long_inc(&po->rollover->num_failed); 1389 return idx; 1390 } 1391 1392 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1393 struct sk_buff *skb, 1394 unsigned int num) 1395 { 1396 return skb_get_queue_mapping(skb) % num; 1397 } 1398 1399 static unsigned int fanout_demux_bpf(struct packet_fanout *f, 1400 struct sk_buff *skb, 1401 unsigned int num) 1402 { 1403 struct bpf_prog *prog; 1404 unsigned int ret = 0; 1405 1406 rcu_read_lock(); 1407 prog = rcu_dereference(f->bpf_prog); 1408 if (prog) 1409 ret = bpf_prog_run_clear_cb(prog, skb) % num; 1410 rcu_read_unlock(); 1411 1412 return ret; 1413 } 1414 1415 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1416 { 1417 return f->flags & (flag >> 8); 1418 } 1419 1420 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1421 struct packet_type *pt, struct net_device *orig_dev) 1422 { 1423 struct packet_fanout *f = pt->af_packet_priv; 1424 unsigned int num = READ_ONCE(f->num_members); 1425 struct net *net = read_pnet(&f->net); 1426 struct packet_sock *po; 1427 unsigned int idx; 1428 1429 if (!net_eq(dev_net(dev), net) || !num) { 1430 kfree_skb(skb); 1431 return 0; 1432 } 1433 1434 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1435 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); 1436 if (!skb) 1437 return 0; 1438 } 1439 switch (f->type) { 1440 case PACKET_FANOUT_HASH: 1441 default: 1442 idx = fanout_demux_hash(f, skb, num); 1443 break; 1444 case PACKET_FANOUT_LB: 1445 idx = fanout_demux_lb(f, skb, num); 1446 break; 1447 case PACKET_FANOUT_CPU: 1448 idx = fanout_demux_cpu(f, skb, num); 1449 break; 1450 case PACKET_FANOUT_RND: 1451 idx = fanout_demux_rnd(f, skb, num); 1452 break; 1453 case PACKET_FANOUT_QM: 1454 idx = fanout_demux_qm(f, skb, num); 1455 break; 1456 case PACKET_FANOUT_ROLLOVER: 1457 idx = fanout_demux_rollover(f, skb, 0, false, num); 1458 break; 1459 case PACKET_FANOUT_CBPF: 1460 case PACKET_FANOUT_EBPF: 1461 idx = fanout_demux_bpf(f, skb, num); 1462 break; 1463 } 1464 1465 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) 1466 idx = fanout_demux_rollover(f, skb, idx, true, num); 1467 1468 po = pkt_sk(f->arr[idx]); 1469 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1470 } 1471 1472 DEFINE_MUTEX(fanout_mutex); 1473 EXPORT_SYMBOL_GPL(fanout_mutex); 1474 static LIST_HEAD(fanout_list); 1475 static u16 fanout_next_id; 1476 1477 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1478 { 1479 struct packet_fanout *f = po->fanout; 1480 1481 spin_lock(&f->lock); 1482 f->arr[f->num_members] = sk; 1483 smp_wmb(); 1484 f->num_members++; 1485 if (f->num_members == 1) 1486 dev_add_pack(&f->prot_hook); 1487 spin_unlock(&f->lock); 1488 } 1489 1490 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1491 { 1492 struct packet_fanout *f = po->fanout; 1493 int i; 1494 1495 spin_lock(&f->lock); 1496 for (i = 0; i < f->num_members; i++) { 1497 if (f->arr[i] == sk) 1498 break; 1499 } 1500 BUG_ON(i >= f->num_members); 1501 f->arr[i] = f->arr[f->num_members - 1]; 1502 f->num_members--; 1503 if (f->num_members == 0) 1504 __dev_remove_pack(&f->prot_hook); 1505 spin_unlock(&f->lock); 1506 } 1507 1508 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1509 { 1510 if (sk->sk_family != PF_PACKET) 1511 return false; 1512 1513 return ptype->af_packet_priv == pkt_sk(sk)->fanout; 1514 } 1515 1516 static void fanout_init_data(struct packet_fanout *f) 1517 { 1518 switch (f->type) { 1519 case PACKET_FANOUT_LB: 1520 atomic_set(&f->rr_cur, 0); 1521 break; 1522 case PACKET_FANOUT_CBPF: 1523 case PACKET_FANOUT_EBPF: 1524 RCU_INIT_POINTER(f->bpf_prog, NULL); 1525 break; 1526 } 1527 } 1528 1529 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) 1530 { 1531 struct bpf_prog *old; 1532 1533 spin_lock(&f->lock); 1534 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); 1535 rcu_assign_pointer(f->bpf_prog, new); 1536 spin_unlock(&f->lock); 1537 1538 if (old) { 1539 synchronize_net(); 1540 bpf_prog_destroy(old); 1541 } 1542 } 1543 1544 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, 1545 unsigned int len) 1546 { 1547 struct bpf_prog *new; 1548 struct sock_fprog fprog; 1549 int ret; 1550 1551 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1552 return -EPERM; 1553 if (len != sizeof(fprog)) 1554 return -EINVAL; 1555 if (copy_from_user(&fprog, data, len)) 1556 return -EFAULT; 1557 1558 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); 1559 if (ret) 1560 return ret; 1561 1562 __fanout_set_data_bpf(po->fanout, new); 1563 return 0; 1564 } 1565 1566 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, 1567 unsigned int len) 1568 { 1569 struct bpf_prog *new; 1570 u32 fd; 1571 1572 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1573 return -EPERM; 1574 if (len != sizeof(fd)) 1575 return -EINVAL; 1576 if (copy_from_user(&fd, data, len)) 1577 return -EFAULT; 1578 1579 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 1580 if (IS_ERR(new)) 1581 return PTR_ERR(new); 1582 1583 __fanout_set_data_bpf(po->fanout, new); 1584 return 0; 1585 } 1586 1587 static int fanout_set_data(struct packet_sock *po, char __user *data, 1588 unsigned int len) 1589 { 1590 switch (po->fanout->type) { 1591 case PACKET_FANOUT_CBPF: 1592 return fanout_set_data_cbpf(po, data, len); 1593 case PACKET_FANOUT_EBPF: 1594 return fanout_set_data_ebpf(po, data, len); 1595 default: 1596 return -EINVAL; 1597 } 1598 } 1599 1600 static void fanout_release_data(struct packet_fanout *f) 1601 { 1602 switch (f->type) { 1603 case PACKET_FANOUT_CBPF: 1604 case PACKET_FANOUT_EBPF: 1605 __fanout_set_data_bpf(f, NULL); 1606 } 1607 } 1608 1609 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) 1610 { 1611 struct packet_fanout *f; 1612 1613 list_for_each_entry(f, &fanout_list, list) { 1614 if (f->id == candidate_id && 1615 read_pnet(&f->net) == sock_net(sk)) { 1616 return false; 1617 } 1618 } 1619 return true; 1620 } 1621 1622 static bool fanout_find_new_id(struct sock *sk, u16 *new_id) 1623 { 1624 u16 id = fanout_next_id; 1625 1626 do { 1627 if (__fanout_id_is_free(sk, id)) { 1628 *new_id = id; 1629 fanout_next_id = id + 1; 1630 return true; 1631 } 1632 1633 id++; 1634 } while (id != fanout_next_id); 1635 1636 return false; 1637 } 1638 1639 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1640 { 1641 struct packet_rollover *rollover = NULL; 1642 struct packet_sock *po = pkt_sk(sk); 1643 struct packet_fanout *f, *match; 1644 u8 type = type_flags & 0xff; 1645 u8 flags = type_flags >> 8; 1646 int err; 1647 1648 switch (type) { 1649 case PACKET_FANOUT_ROLLOVER: 1650 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1651 return -EINVAL; 1652 case PACKET_FANOUT_HASH: 1653 case PACKET_FANOUT_LB: 1654 case PACKET_FANOUT_CPU: 1655 case PACKET_FANOUT_RND: 1656 case PACKET_FANOUT_QM: 1657 case PACKET_FANOUT_CBPF: 1658 case PACKET_FANOUT_EBPF: 1659 break; 1660 default: 1661 return -EINVAL; 1662 } 1663 1664 mutex_lock(&fanout_mutex); 1665 1666 err = -EALREADY; 1667 if (po->fanout) 1668 goto out; 1669 1670 if (type == PACKET_FANOUT_ROLLOVER || 1671 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1672 err = -ENOMEM; 1673 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); 1674 if (!rollover) 1675 goto out; 1676 atomic_long_set(&rollover->num, 0); 1677 atomic_long_set(&rollover->num_huge, 0); 1678 atomic_long_set(&rollover->num_failed, 0); 1679 } 1680 1681 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { 1682 if (id != 0) { 1683 err = -EINVAL; 1684 goto out; 1685 } 1686 if (!fanout_find_new_id(sk, &id)) { 1687 err = -ENOMEM; 1688 goto out; 1689 } 1690 /* ephemeral flag for the first socket in the group: drop it */ 1691 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); 1692 } 1693 1694 match = NULL; 1695 list_for_each_entry(f, &fanout_list, list) { 1696 if (f->id == id && 1697 read_pnet(&f->net) == sock_net(sk)) { 1698 match = f; 1699 break; 1700 } 1701 } 1702 err = -EINVAL; 1703 if (match && match->flags != flags) 1704 goto out; 1705 if (!match) { 1706 err = -ENOMEM; 1707 match = kzalloc(sizeof(*match), GFP_KERNEL); 1708 if (!match) 1709 goto out; 1710 write_pnet(&match->net, sock_net(sk)); 1711 match->id = id; 1712 match->type = type; 1713 match->flags = flags; 1714 INIT_LIST_HEAD(&match->list); 1715 spin_lock_init(&match->lock); 1716 refcount_set(&match->sk_ref, 0); 1717 fanout_init_data(match); 1718 match->prot_hook.type = po->prot_hook.type; 1719 match->prot_hook.dev = po->prot_hook.dev; 1720 match->prot_hook.func = packet_rcv_fanout; 1721 match->prot_hook.af_packet_priv = match; 1722 match->prot_hook.id_match = match_fanout_group; 1723 list_add(&match->list, &fanout_list); 1724 } 1725 err = -EINVAL; 1726 1727 spin_lock(&po->bind_lock); 1728 if (po->running && 1729 match->type == type && 1730 match->prot_hook.type == po->prot_hook.type && 1731 match->prot_hook.dev == po->prot_hook.dev) { 1732 err = -ENOSPC; 1733 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 1734 __dev_remove_pack(&po->prot_hook); 1735 po->fanout = match; 1736 po->rollover = rollover; 1737 rollover = NULL; 1738 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1739 __fanout_link(sk, po); 1740 err = 0; 1741 } 1742 } 1743 spin_unlock(&po->bind_lock); 1744 1745 if (err && !refcount_read(&match->sk_ref)) { 1746 list_del(&match->list); 1747 kfree(match); 1748 } 1749 1750 out: 1751 kfree(rollover); 1752 mutex_unlock(&fanout_mutex); 1753 return err; 1754 } 1755 1756 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes 1757 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. 1758 * It is the responsibility of the caller to call fanout_release_data() and 1759 * free the returned packet_fanout (after synchronize_net()) 1760 */ 1761 static struct packet_fanout *fanout_release(struct sock *sk) 1762 { 1763 struct packet_sock *po = pkt_sk(sk); 1764 struct packet_fanout *f; 1765 1766 mutex_lock(&fanout_mutex); 1767 f = po->fanout; 1768 if (f) { 1769 po->fanout = NULL; 1770 1771 if (refcount_dec_and_test(&f->sk_ref)) 1772 list_del(&f->list); 1773 else 1774 f = NULL; 1775 } 1776 mutex_unlock(&fanout_mutex); 1777 1778 return f; 1779 } 1780 1781 static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1782 struct sk_buff *skb) 1783 { 1784 /* Earlier code assumed this would be a VLAN pkt, double-check 1785 * this now that we have the actual packet in hand. We can only 1786 * do this check on Ethernet devices. 1787 */ 1788 if (unlikely(dev->type != ARPHRD_ETHER)) 1789 return false; 1790 1791 skb_reset_mac_header(skb); 1792 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); 1793 } 1794 1795 static const struct proto_ops packet_ops; 1796 1797 static const struct proto_ops packet_ops_spkt; 1798 1799 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1800 struct packet_type *pt, struct net_device *orig_dev) 1801 { 1802 struct sock *sk; 1803 struct sockaddr_pkt *spkt; 1804 1805 /* 1806 * When we registered the protocol we saved the socket in the data 1807 * field for just this event. 1808 */ 1809 1810 sk = pt->af_packet_priv; 1811 1812 /* 1813 * Yank back the headers [hope the device set this 1814 * right or kerboom...] 1815 * 1816 * Incoming packets have ll header pulled, 1817 * push it back. 1818 * 1819 * For outgoing ones skb->data == skb_mac_header(skb) 1820 * so that this procedure is noop. 1821 */ 1822 1823 if (skb->pkt_type == PACKET_LOOPBACK) 1824 goto out; 1825 1826 if (!net_eq(dev_net(dev), sock_net(sk))) 1827 goto out; 1828 1829 skb = skb_share_check(skb, GFP_ATOMIC); 1830 if (skb == NULL) 1831 goto oom; 1832 1833 /* drop any routing info */ 1834 skb_dst_drop(skb); 1835 1836 /* drop conntrack reference */ 1837 nf_reset_ct(skb); 1838 1839 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1840 1841 skb_push(skb, skb->data - skb_mac_header(skb)); 1842 1843 /* 1844 * The SOCK_PACKET socket receives _all_ frames. 1845 */ 1846 1847 spkt->spkt_family = dev->type; 1848 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1849 spkt->spkt_protocol = skb->protocol; 1850 1851 /* 1852 * Charge the memory to the socket. This is done specifically 1853 * to prevent sockets using all the memory up. 1854 */ 1855 1856 if (sock_queue_rcv_skb(sk, skb) == 0) 1857 return 0; 1858 1859 out: 1860 kfree_skb(skb); 1861 oom: 1862 return 0; 1863 } 1864 1865 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) 1866 { 1867 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && 1868 sock->type == SOCK_RAW) { 1869 skb_reset_mac_header(skb); 1870 skb->protocol = dev_parse_header_protocol(skb); 1871 } 1872 1873 skb_probe_transport_header(skb); 1874 } 1875 1876 /* 1877 * Output a raw packet to a device layer. This bypasses all the other 1878 * protocol layers and you must therefore supply it with a complete frame 1879 */ 1880 1881 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, 1882 size_t len) 1883 { 1884 struct sock *sk = sock->sk; 1885 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1886 struct sk_buff *skb = NULL; 1887 struct net_device *dev; 1888 struct sockcm_cookie sockc; 1889 __be16 proto = 0; 1890 int err; 1891 int extra_len = 0; 1892 1893 /* 1894 * Get and verify the address. 1895 */ 1896 1897 if (saddr) { 1898 if (msg->msg_namelen < sizeof(struct sockaddr)) 1899 return -EINVAL; 1900 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1901 proto = saddr->spkt_protocol; 1902 } else 1903 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1904 1905 /* 1906 * Find the device first to size check it 1907 */ 1908 1909 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1910 retry: 1911 rcu_read_lock(); 1912 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1913 err = -ENODEV; 1914 if (dev == NULL) 1915 goto out_unlock; 1916 1917 err = -ENETDOWN; 1918 if (!(dev->flags & IFF_UP)) 1919 goto out_unlock; 1920 1921 /* 1922 * You may not queue a frame bigger than the mtu. This is the lowest level 1923 * raw protocol and you must do your own fragmentation at this level. 1924 */ 1925 1926 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1927 if (!netif_supports_nofcs(dev)) { 1928 err = -EPROTONOSUPPORT; 1929 goto out_unlock; 1930 } 1931 extra_len = 4; /* We're doing our own CRC */ 1932 } 1933 1934 err = -EMSGSIZE; 1935 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 1936 goto out_unlock; 1937 1938 if (!skb) { 1939 size_t reserved = LL_RESERVED_SPACE(dev); 1940 int tlen = dev->needed_tailroom; 1941 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 1942 1943 rcu_read_unlock(); 1944 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 1945 if (skb == NULL) 1946 return -ENOBUFS; 1947 /* FIXME: Save some space for broken drivers that write a hard 1948 * header at transmission time by themselves. PPP is the notable 1949 * one here. This should really be fixed at the driver level. 1950 */ 1951 skb_reserve(skb, reserved); 1952 skb_reset_network_header(skb); 1953 1954 /* Try to align data part correctly */ 1955 if (hhlen) { 1956 skb->data -= hhlen; 1957 skb->tail -= hhlen; 1958 if (len < hhlen) 1959 skb_reset_network_header(skb); 1960 } 1961 err = memcpy_from_msg(skb_put(skb, len), msg, len); 1962 if (err) 1963 goto out_free; 1964 goto retry; 1965 } 1966 1967 if (!dev_validate_header(dev, skb->data, len)) { 1968 err = -EINVAL; 1969 goto out_unlock; 1970 } 1971 if (len > (dev->mtu + dev->hard_header_len + extra_len) && 1972 !packet_extra_vlan_len_allowed(dev, skb)) { 1973 err = -EMSGSIZE; 1974 goto out_unlock; 1975 } 1976 1977 sockcm_init(&sockc, sk); 1978 if (msg->msg_controllen) { 1979 err = sock_cmsg_send(sk, msg, &sockc); 1980 if (unlikely(err)) 1981 goto out_unlock; 1982 } 1983 1984 skb->protocol = proto; 1985 skb->dev = dev; 1986 skb->priority = sk->sk_priority; 1987 skb->mark = sk->sk_mark; 1988 skb->tstamp = sockc.transmit_time; 1989 1990 skb_setup_tx_timestamp(skb, sockc.tsflags); 1991 1992 if (unlikely(extra_len == 4)) 1993 skb->no_fcs = 1; 1994 1995 packet_parse_headers(skb, sock); 1996 1997 dev_queue_xmit(skb); 1998 rcu_read_unlock(); 1999 return len; 2000 2001 out_unlock: 2002 rcu_read_unlock(); 2003 out_free: 2004 kfree_skb(skb); 2005 return err; 2006 } 2007 2008 static unsigned int run_filter(struct sk_buff *skb, 2009 const struct sock *sk, 2010 unsigned int res) 2011 { 2012 struct sk_filter *filter; 2013 2014 rcu_read_lock(); 2015 filter = rcu_dereference(sk->sk_filter); 2016 if (filter != NULL) 2017 res = bpf_prog_run_clear_cb(filter->prog, skb); 2018 rcu_read_unlock(); 2019 2020 return res; 2021 } 2022 2023 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, 2024 size_t *len) 2025 { 2026 struct virtio_net_hdr vnet_hdr; 2027 2028 if (*len < sizeof(vnet_hdr)) 2029 return -EINVAL; 2030 *len -= sizeof(vnet_hdr); 2031 2032 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0)) 2033 return -EINVAL; 2034 2035 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); 2036 } 2037 2038 /* 2039 * This function makes lazy skb cloning in hope that most of packets 2040 * are discarded by BPF. 2041 * 2042 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 2043 * and skb->cb are mangled. It works because (and until) packets 2044 * falling here are owned by current CPU. Output packets are cloned 2045 * by dev_queue_xmit_nit(), input packets are processed by net_bh 2046 * sequencially, so that if we return skb to original state on exit, 2047 * we will not harm anyone. 2048 */ 2049 2050 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 2051 struct packet_type *pt, struct net_device *orig_dev) 2052 { 2053 struct sock *sk; 2054 struct sockaddr_ll *sll; 2055 struct packet_sock *po; 2056 u8 *skb_head = skb->data; 2057 int skb_len = skb->len; 2058 unsigned int snaplen, res; 2059 bool is_drop_n_account = false; 2060 2061 if (skb->pkt_type == PACKET_LOOPBACK) 2062 goto drop; 2063 2064 sk = pt->af_packet_priv; 2065 po = pkt_sk(sk); 2066 2067 if (!net_eq(dev_net(dev), sock_net(sk))) 2068 goto drop; 2069 2070 skb->dev = dev; 2071 2072 if (dev->header_ops) { 2073 /* The device has an explicit notion of ll header, 2074 * exported to higher levels. 2075 * 2076 * Otherwise, the device hides details of its frame 2077 * structure, so that corresponding packet head is 2078 * never delivered to user. 2079 */ 2080 if (sk->sk_type != SOCK_DGRAM) 2081 skb_push(skb, skb->data - skb_mac_header(skb)); 2082 else if (skb->pkt_type == PACKET_OUTGOING) { 2083 /* Special case: outgoing packets have ll header at head */ 2084 skb_pull(skb, skb_network_offset(skb)); 2085 } 2086 } 2087 2088 snaplen = skb->len; 2089 2090 res = run_filter(skb, sk, snaplen); 2091 if (!res) 2092 goto drop_n_restore; 2093 if (snaplen > res) 2094 snaplen = res; 2095 2096 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2097 goto drop_n_acct; 2098 2099 if (skb_shared(skb)) { 2100 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2101 if (nskb == NULL) 2102 goto drop_n_acct; 2103 2104 if (skb_head != skb->data) { 2105 skb->data = skb_head; 2106 skb->len = skb_len; 2107 } 2108 consume_skb(skb); 2109 skb = nskb; 2110 } 2111 2112 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); 2113 2114 sll = &PACKET_SKB_CB(skb)->sa.ll; 2115 sll->sll_hatype = dev->type; 2116 sll->sll_pkttype = skb->pkt_type; 2117 if (unlikely(po->origdev)) 2118 sll->sll_ifindex = orig_dev->ifindex; 2119 else 2120 sll->sll_ifindex = dev->ifindex; 2121 2122 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2123 2124 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). 2125 * Use their space for storing the original skb length. 2126 */ 2127 PACKET_SKB_CB(skb)->sa.origlen = skb->len; 2128 2129 if (pskb_trim(skb, snaplen)) 2130 goto drop_n_acct; 2131 2132 skb_set_owner_r(skb, sk); 2133 skb->dev = NULL; 2134 skb_dst_drop(skb); 2135 2136 /* drop conntrack reference */ 2137 nf_reset_ct(skb); 2138 2139 spin_lock(&sk->sk_receive_queue.lock); 2140 po->stats.stats1.tp_packets++; 2141 sock_skb_set_dropcount(sk, skb); 2142 __skb_queue_tail(&sk->sk_receive_queue, skb); 2143 spin_unlock(&sk->sk_receive_queue.lock); 2144 sk->sk_data_ready(sk); 2145 return 0; 2146 2147 drop_n_acct: 2148 is_drop_n_account = true; 2149 atomic_inc(&po->tp_drops); 2150 atomic_inc(&sk->sk_drops); 2151 2152 drop_n_restore: 2153 if (skb_head != skb->data && skb_shared(skb)) { 2154 skb->data = skb_head; 2155 skb->len = skb_len; 2156 } 2157 drop: 2158 if (!is_drop_n_account) 2159 consume_skb(skb); 2160 else 2161 kfree_skb(skb); 2162 return 0; 2163 } 2164 2165 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 2166 struct packet_type *pt, struct net_device *orig_dev) 2167 { 2168 struct sock *sk; 2169 struct packet_sock *po; 2170 struct sockaddr_ll *sll; 2171 union tpacket_uhdr h; 2172 u8 *skb_head = skb->data; 2173 int skb_len = skb->len; 2174 unsigned int snaplen, res; 2175 unsigned long status = TP_STATUS_USER; 2176 unsigned short macoff, netoff, hdrlen; 2177 struct sk_buff *copy_skb = NULL; 2178 struct timespec64 ts; 2179 __u32 ts_status; 2180 bool is_drop_n_account = false; 2181 bool do_vnet = false; 2182 2183 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2184 * We may add members to them until current aligned size without forcing 2185 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 2186 */ 2187 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 2188 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 2189 2190 if (skb->pkt_type == PACKET_LOOPBACK) 2191 goto drop; 2192 2193 sk = pt->af_packet_priv; 2194 po = pkt_sk(sk); 2195 2196 if (!net_eq(dev_net(dev), sock_net(sk))) 2197 goto drop; 2198 2199 if (dev->header_ops) { 2200 if (sk->sk_type != SOCK_DGRAM) 2201 skb_push(skb, skb->data - skb_mac_header(skb)); 2202 else if (skb->pkt_type == PACKET_OUTGOING) { 2203 /* Special case: outgoing packets have ll header at head */ 2204 skb_pull(skb, skb_network_offset(skb)); 2205 } 2206 } 2207 2208 snaplen = skb->len; 2209 2210 res = run_filter(skb, sk, snaplen); 2211 if (!res) 2212 goto drop_n_restore; 2213 2214 /* If we are flooded, just give up */ 2215 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) { 2216 atomic_inc(&po->tp_drops); 2217 goto drop_n_restore; 2218 } 2219 2220 if (skb->ip_summed == CHECKSUM_PARTIAL) 2221 status |= TP_STATUS_CSUMNOTREADY; 2222 else if (skb->pkt_type != PACKET_OUTGOING && 2223 (skb->ip_summed == CHECKSUM_COMPLETE || 2224 skb_csum_unnecessary(skb))) 2225 status |= TP_STATUS_CSUM_VALID; 2226 2227 if (snaplen > res) 2228 snaplen = res; 2229 2230 if (sk->sk_type == SOCK_DGRAM) { 2231 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 2232 po->tp_reserve; 2233 } else { 2234 unsigned int maclen = skb_network_offset(skb); 2235 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2236 (maclen < 16 ? 16 : maclen)) + 2237 po->tp_reserve; 2238 if (po->has_vnet_hdr) { 2239 netoff += sizeof(struct virtio_net_hdr); 2240 do_vnet = true; 2241 } 2242 macoff = netoff - maclen; 2243 } 2244 if (po->tp_version <= TPACKET_V2) { 2245 if (macoff + snaplen > po->rx_ring.frame_size) { 2246 if (po->copy_thresh && 2247 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 2248 if (skb_shared(skb)) { 2249 copy_skb = skb_clone(skb, GFP_ATOMIC); 2250 } else { 2251 copy_skb = skb_get(skb); 2252 skb_head = skb->data; 2253 } 2254 if (copy_skb) 2255 skb_set_owner_r(copy_skb, sk); 2256 } 2257 snaplen = po->rx_ring.frame_size - macoff; 2258 if ((int)snaplen < 0) { 2259 snaplen = 0; 2260 do_vnet = false; 2261 } 2262 } 2263 } else if (unlikely(macoff + snaplen > 2264 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2265 u32 nval; 2266 2267 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 2268 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 2269 snaplen, nval, macoff); 2270 snaplen = nval; 2271 if (unlikely((int)snaplen < 0)) { 2272 snaplen = 0; 2273 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2274 do_vnet = false; 2275 } 2276 } 2277 spin_lock(&sk->sk_receive_queue.lock); 2278 h.raw = packet_current_rx_frame(po, skb, 2279 TP_STATUS_KERNEL, (macoff+snaplen)); 2280 if (!h.raw) 2281 goto drop_n_account; 2282 if (po->tp_version <= TPACKET_V2) { 2283 packet_increment_rx_head(po, &po->rx_ring); 2284 /* 2285 * LOSING will be reported till you read the stats, 2286 * because it's COR - Clear On Read. 2287 * Anyways, moving it for V1/V2 only as V3 doesn't need this 2288 * at packet level. 2289 */ 2290 if (atomic_read(&po->tp_drops)) 2291 status |= TP_STATUS_LOSING; 2292 } 2293 2294 if (do_vnet && 2295 virtio_net_hdr_from_skb(skb, h.raw + macoff - 2296 sizeof(struct virtio_net_hdr), 2297 vio_le(), true, 0)) 2298 goto drop_n_account; 2299 2300 po->stats.stats1.tp_packets++; 2301 if (copy_skb) { 2302 status |= TP_STATUS_COPY; 2303 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 2304 } 2305 spin_unlock(&sk->sk_receive_queue.lock); 2306 2307 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 2308 2309 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) 2310 ktime_get_real_ts64(&ts); 2311 2312 status |= ts_status; 2313 2314 switch (po->tp_version) { 2315 case TPACKET_V1: 2316 h.h1->tp_len = skb->len; 2317 h.h1->tp_snaplen = snaplen; 2318 h.h1->tp_mac = macoff; 2319 h.h1->tp_net = netoff; 2320 h.h1->tp_sec = ts.tv_sec; 2321 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 2322 hdrlen = sizeof(*h.h1); 2323 break; 2324 case TPACKET_V2: 2325 h.h2->tp_len = skb->len; 2326 h.h2->tp_snaplen = snaplen; 2327 h.h2->tp_mac = macoff; 2328 h.h2->tp_net = netoff; 2329 h.h2->tp_sec = ts.tv_sec; 2330 h.h2->tp_nsec = ts.tv_nsec; 2331 if (skb_vlan_tag_present(skb)) { 2332 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2333 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2334 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2335 } else { 2336 h.h2->tp_vlan_tci = 0; 2337 h.h2->tp_vlan_tpid = 0; 2338 } 2339 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2340 hdrlen = sizeof(*h.h2); 2341 break; 2342 case TPACKET_V3: 2343 /* tp_nxt_offset,vlan are already populated above. 2344 * So DONT clear those fields here 2345 */ 2346 h.h3->tp_status |= status; 2347 h.h3->tp_len = skb->len; 2348 h.h3->tp_snaplen = snaplen; 2349 h.h3->tp_mac = macoff; 2350 h.h3->tp_net = netoff; 2351 h.h3->tp_sec = ts.tv_sec; 2352 h.h3->tp_nsec = ts.tv_nsec; 2353 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2354 hdrlen = sizeof(*h.h3); 2355 break; 2356 default: 2357 BUG(); 2358 } 2359 2360 sll = h.raw + TPACKET_ALIGN(hdrlen); 2361 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2362 sll->sll_family = AF_PACKET; 2363 sll->sll_hatype = dev->type; 2364 sll->sll_protocol = skb->protocol; 2365 sll->sll_pkttype = skb->pkt_type; 2366 if (unlikely(po->origdev)) 2367 sll->sll_ifindex = orig_dev->ifindex; 2368 else 2369 sll->sll_ifindex = dev->ifindex; 2370 2371 smp_mb(); 2372 2373 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2374 if (po->tp_version <= TPACKET_V2) { 2375 u8 *start, *end; 2376 2377 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2378 macoff + snaplen); 2379 2380 for (start = h.raw; start < end; start += PAGE_SIZE) 2381 flush_dcache_page(pgv_to_page(start)); 2382 } 2383 smp_wmb(); 2384 #endif 2385 2386 if (po->tp_version <= TPACKET_V2) { 2387 __packet_set_status(po, h.raw, status); 2388 sk->sk_data_ready(sk); 2389 } else { 2390 prb_clear_blk_fill_status(&po->rx_ring); 2391 } 2392 2393 drop_n_restore: 2394 if (skb_head != skb->data && skb_shared(skb)) { 2395 skb->data = skb_head; 2396 skb->len = skb_len; 2397 } 2398 drop: 2399 if (!is_drop_n_account) 2400 consume_skb(skb); 2401 else 2402 kfree_skb(skb); 2403 return 0; 2404 2405 drop_n_account: 2406 spin_unlock(&sk->sk_receive_queue.lock); 2407 atomic_inc(&po->tp_drops); 2408 is_drop_n_account = true; 2409 2410 sk->sk_data_ready(sk); 2411 kfree_skb(copy_skb); 2412 goto drop_n_restore; 2413 } 2414 2415 static void tpacket_destruct_skb(struct sk_buff *skb) 2416 { 2417 struct packet_sock *po = pkt_sk(skb->sk); 2418 2419 if (likely(po->tx_ring.pg_vec)) { 2420 void *ph; 2421 __u32 ts; 2422 2423 ph = skb_zcopy_get_nouarg(skb); 2424 packet_dec_pending(&po->tx_ring); 2425 2426 ts = __packet_set_timestamp(po, ph, skb); 2427 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2428 2429 if (!packet_read_pending(&po->tx_ring)) 2430 complete(&po->skb_completion); 2431 } 2432 2433 sock_wfree(skb); 2434 } 2435 2436 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) 2437 { 2438 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2439 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2440 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > 2441 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) 2442 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), 2443 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2444 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); 2445 2446 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) 2447 return -EINVAL; 2448 2449 return 0; 2450 } 2451 2452 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, 2453 struct virtio_net_hdr *vnet_hdr) 2454 { 2455 if (*len < sizeof(*vnet_hdr)) 2456 return -EINVAL; 2457 *len -= sizeof(*vnet_hdr); 2458 2459 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) 2460 return -EFAULT; 2461 2462 return __packet_snd_vnet_parse(vnet_hdr, *len); 2463 } 2464 2465 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2466 void *frame, struct net_device *dev, void *data, int tp_len, 2467 __be16 proto, unsigned char *addr, int hlen, int copylen, 2468 const struct sockcm_cookie *sockc) 2469 { 2470 union tpacket_uhdr ph; 2471 int to_write, offset, len, nr_frags, len_max; 2472 struct socket *sock = po->sk.sk_socket; 2473 struct page *page; 2474 int err; 2475 2476 ph.raw = frame; 2477 2478 skb->protocol = proto; 2479 skb->dev = dev; 2480 skb->priority = po->sk.sk_priority; 2481 skb->mark = po->sk.sk_mark; 2482 skb->tstamp = sockc->transmit_time; 2483 skb_setup_tx_timestamp(skb, sockc->tsflags); 2484 skb_zcopy_set_nouarg(skb, ph.raw); 2485 2486 skb_reserve(skb, hlen); 2487 skb_reset_network_header(skb); 2488 2489 to_write = tp_len; 2490 2491 if (sock->type == SOCK_DGRAM) { 2492 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2493 NULL, tp_len); 2494 if (unlikely(err < 0)) 2495 return -EINVAL; 2496 } else if (copylen) { 2497 int hdrlen = min_t(int, copylen, tp_len); 2498 2499 skb_push(skb, dev->hard_header_len); 2500 skb_put(skb, copylen - dev->hard_header_len); 2501 err = skb_store_bits(skb, 0, data, hdrlen); 2502 if (unlikely(err)) 2503 return err; 2504 if (!dev_validate_header(dev, skb->data, hdrlen)) 2505 return -EINVAL; 2506 2507 data += hdrlen; 2508 to_write -= hdrlen; 2509 } 2510 2511 offset = offset_in_page(data); 2512 len_max = PAGE_SIZE - offset; 2513 len = ((to_write > len_max) ? len_max : to_write); 2514 2515 skb->data_len = to_write; 2516 skb->len += to_write; 2517 skb->truesize += to_write; 2518 refcount_add(to_write, &po->sk.sk_wmem_alloc); 2519 2520 while (likely(to_write)) { 2521 nr_frags = skb_shinfo(skb)->nr_frags; 2522 2523 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2524 pr_err("Packet exceed the number of skb frags(%lu)\n", 2525 MAX_SKB_FRAGS); 2526 return -EFAULT; 2527 } 2528 2529 page = pgv_to_page(data); 2530 data += len; 2531 flush_dcache_page(page); 2532 get_page(page); 2533 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2534 to_write -= len; 2535 offset = 0; 2536 len_max = PAGE_SIZE; 2537 len = ((to_write > len_max) ? len_max : to_write); 2538 } 2539 2540 packet_parse_headers(skb, sock); 2541 2542 return tp_len; 2543 } 2544 2545 static int tpacket_parse_header(struct packet_sock *po, void *frame, 2546 int size_max, void **data) 2547 { 2548 union tpacket_uhdr ph; 2549 int tp_len, off; 2550 2551 ph.raw = frame; 2552 2553 switch (po->tp_version) { 2554 case TPACKET_V3: 2555 if (ph.h3->tp_next_offset != 0) { 2556 pr_warn_once("variable sized slot not supported"); 2557 return -EINVAL; 2558 } 2559 tp_len = ph.h3->tp_len; 2560 break; 2561 case TPACKET_V2: 2562 tp_len = ph.h2->tp_len; 2563 break; 2564 default: 2565 tp_len = ph.h1->tp_len; 2566 break; 2567 } 2568 if (unlikely(tp_len > size_max)) { 2569 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2570 return -EMSGSIZE; 2571 } 2572 2573 if (unlikely(po->tp_tx_has_off)) { 2574 int off_min, off_max; 2575 2576 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2577 off_max = po->tx_ring.frame_size - tp_len; 2578 if (po->sk.sk_type == SOCK_DGRAM) { 2579 switch (po->tp_version) { 2580 case TPACKET_V3: 2581 off = ph.h3->tp_net; 2582 break; 2583 case TPACKET_V2: 2584 off = ph.h2->tp_net; 2585 break; 2586 default: 2587 off = ph.h1->tp_net; 2588 break; 2589 } 2590 } else { 2591 switch (po->tp_version) { 2592 case TPACKET_V3: 2593 off = ph.h3->tp_mac; 2594 break; 2595 case TPACKET_V2: 2596 off = ph.h2->tp_mac; 2597 break; 2598 default: 2599 off = ph.h1->tp_mac; 2600 break; 2601 } 2602 } 2603 if (unlikely((off < off_min) || (off_max < off))) 2604 return -EINVAL; 2605 } else { 2606 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2607 } 2608 2609 *data = frame + off; 2610 return tp_len; 2611 } 2612 2613 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2614 { 2615 struct sk_buff *skb = NULL; 2616 struct net_device *dev; 2617 struct virtio_net_hdr *vnet_hdr = NULL; 2618 struct sockcm_cookie sockc; 2619 __be16 proto; 2620 int err, reserve = 0; 2621 void *ph; 2622 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2623 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2624 unsigned char *addr = NULL; 2625 int tp_len, size_max; 2626 void *data; 2627 int len_sum = 0; 2628 int status = TP_STATUS_AVAILABLE; 2629 int hlen, tlen, copylen = 0; 2630 long timeo = 0; 2631 2632 mutex_lock(&po->pg_vec_lock); 2633 2634 /* packet_sendmsg() check on tx_ring.pg_vec was lockless, 2635 * we need to confirm it under protection of pg_vec_lock. 2636 */ 2637 if (unlikely(!po->tx_ring.pg_vec)) { 2638 err = -EBUSY; 2639 goto out; 2640 } 2641 if (likely(saddr == NULL)) { 2642 dev = packet_cached_dev_get(po); 2643 proto = po->num; 2644 } else { 2645 err = -EINVAL; 2646 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2647 goto out; 2648 if (msg->msg_namelen < (saddr->sll_halen 2649 + offsetof(struct sockaddr_ll, 2650 sll_addr))) 2651 goto out; 2652 proto = saddr->sll_protocol; 2653 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2654 if (po->sk.sk_socket->type == SOCK_DGRAM) { 2655 if (dev && msg->msg_namelen < dev->addr_len + 2656 offsetof(struct sockaddr_ll, sll_addr)) 2657 goto out_put; 2658 addr = saddr->sll_addr; 2659 } 2660 } 2661 2662 err = -ENXIO; 2663 if (unlikely(dev == NULL)) 2664 goto out; 2665 err = -ENETDOWN; 2666 if (unlikely(!(dev->flags & IFF_UP))) 2667 goto out_put; 2668 2669 sockcm_init(&sockc, &po->sk); 2670 if (msg->msg_controllen) { 2671 err = sock_cmsg_send(&po->sk, msg, &sockc); 2672 if (unlikely(err)) 2673 goto out_put; 2674 } 2675 2676 if (po->sk.sk_socket->type == SOCK_RAW) 2677 reserve = dev->hard_header_len; 2678 size_max = po->tx_ring.frame_size 2679 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2680 2681 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) 2682 size_max = dev->mtu + reserve + VLAN_HLEN; 2683 2684 reinit_completion(&po->skb_completion); 2685 2686 do { 2687 ph = packet_current_frame(po, &po->tx_ring, 2688 TP_STATUS_SEND_REQUEST); 2689 if (unlikely(ph == NULL)) { 2690 if (need_wait && skb) { 2691 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); 2692 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); 2693 if (timeo <= 0) { 2694 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; 2695 goto out_put; 2696 } 2697 } 2698 /* check for additional frames */ 2699 continue; 2700 } 2701 2702 skb = NULL; 2703 tp_len = tpacket_parse_header(po, ph, size_max, &data); 2704 if (tp_len < 0) 2705 goto tpacket_error; 2706 2707 status = TP_STATUS_SEND_REQUEST; 2708 hlen = LL_RESERVED_SPACE(dev); 2709 tlen = dev->needed_tailroom; 2710 if (po->has_vnet_hdr) { 2711 vnet_hdr = data; 2712 data += sizeof(*vnet_hdr); 2713 tp_len -= sizeof(*vnet_hdr); 2714 if (tp_len < 0 || 2715 __packet_snd_vnet_parse(vnet_hdr, tp_len)) { 2716 tp_len = -EINVAL; 2717 goto tpacket_error; 2718 } 2719 copylen = __virtio16_to_cpu(vio_le(), 2720 vnet_hdr->hdr_len); 2721 } 2722 copylen = max_t(int, copylen, dev->hard_header_len); 2723 skb = sock_alloc_send_skb(&po->sk, 2724 hlen + tlen + sizeof(struct sockaddr_ll) + 2725 (copylen - dev->hard_header_len), 2726 !need_wait, &err); 2727 2728 if (unlikely(skb == NULL)) { 2729 /* we assume the socket was initially writeable ... */ 2730 if (likely(len_sum > 0)) 2731 err = len_sum; 2732 goto out_status; 2733 } 2734 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, 2735 addr, hlen, copylen, &sockc); 2736 if (likely(tp_len >= 0) && 2737 tp_len > dev->mtu + reserve && 2738 !po->has_vnet_hdr && 2739 !packet_extra_vlan_len_allowed(dev, skb)) 2740 tp_len = -EMSGSIZE; 2741 2742 if (unlikely(tp_len < 0)) { 2743 tpacket_error: 2744 if (po->tp_loss) { 2745 __packet_set_status(po, ph, 2746 TP_STATUS_AVAILABLE); 2747 packet_increment_head(&po->tx_ring); 2748 kfree_skb(skb); 2749 continue; 2750 } else { 2751 status = TP_STATUS_WRONG_FORMAT; 2752 err = tp_len; 2753 goto out_status; 2754 } 2755 } 2756 2757 if (po->has_vnet_hdr) { 2758 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { 2759 tp_len = -EINVAL; 2760 goto tpacket_error; 2761 } 2762 virtio_net_hdr_set_proto(skb, vnet_hdr); 2763 } 2764 2765 skb->destructor = tpacket_destruct_skb; 2766 __packet_set_status(po, ph, TP_STATUS_SENDING); 2767 packet_inc_pending(&po->tx_ring); 2768 2769 status = TP_STATUS_SEND_REQUEST; 2770 err = po->xmit(skb); 2771 if (unlikely(err > 0)) { 2772 err = net_xmit_errno(err); 2773 if (err && __packet_get_status(po, ph) == 2774 TP_STATUS_AVAILABLE) { 2775 /* skb was destructed already */ 2776 skb = NULL; 2777 goto out_status; 2778 } 2779 /* 2780 * skb was dropped but not destructed yet; 2781 * let's treat it like congestion or err < 0 2782 */ 2783 err = 0; 2784 } 2785 packet_increment_head(&po->tx_ring); 2786 len_sum += tp_len; 2787 } while (likely((ph != NULL) || 2788 /* Note: packet_read_pending() might be slow if we have 2789 * to call it as it's per_cpu variable, but in fast-path 2790 * we already short-circuit the loop with the first 2791 * condition, and luckily don't have to go that path 2792 * anyway. 2793 */ 2794 (need_wait && packet_read_pending(&po->tx_ring)))); 2795 2796 err = len_sum; 2797 goto out_put; 2798 2799 out_status: 2800 __packet_set_status(po, ph, status); 2801 kfree_skb(skb); 2802 out_put: 2803 dev_put(dev); 2804 out: 2805 mutex_unlock(&po->pg_vec_lock); 2806 return err; 2807 } 2808 2809 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2810 size_t reserve, size_t len, 2811 size_t linear, int noblock, 2812 int *err) 2813 { 2814 struct sk_buff *skb; 2815 2816 /* Under a page? Don't bother with paged skb. */ 2817 if (prepad + len < PAGE_SIZE || !linear) 2818 linear = len; 2819 2820 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2821 err, 0); 2822 if (!skb) 2823 return NULL; 2824 2825 skb_reserve(skb, reserve); 2826 skb_put(skb, linear); 2827 skb->data_len = len - linear; 2828 skb->len += len - linear; 2829 2830 return skb; 2831 } 2832 2833 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2834 { 2835 struct sock *sk = sock->sk; 2836 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2837 struct sk_buff *skb; 2838 struct net_device *dev; 2839 __be16 proto; 2840 unsigned char *addr = NULL; 2841 int err, reserve = 0; 2842 struct sockcm_cookie sockc; 2843 struct virtio_net_hdr vnet_hdr = { 0 }; 2844 int offset = 0; 2845 struct packet_sock *po = pkt_sk(sk); 2846 bool has_vnet_hdr = false; 2847 int hlen, tlen, linear; 2848 int extra_len = 0; 2849 2850 /* 2851 * Get and verify the address. 2852 */ 2853 2854 if (likely(saddr == NULL)) { 2855 dev = packet_cached_dev_get(po); 2856 proto = po->num; 2857 } else { 2858 err = -EINVAL; 2859 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2860 goto out; 2861 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2862 goto out; 2863 proto = saddr->sll_protocol; 2864 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2865 if (sock->type == SOCK_DGRAM) { 2866 if (dev && msg->msg_namelen < dev->addr_len + 2867 offsetof(struct sockaddr_ll, sll_addr)) 2868 goto out_unlock; 2869 addr = saddr->sll_addr; 2870 } 2871 } 2872 2873 err = -ENXIO; 2874 if (unlikely(dev == NULL)) 2875 goto out_unlock; 2876 err = -ENETDOWN; 2877 if (unlikely(!(dev->flags & IFF_UP))) 2878 goto out_unlock; 2879 2880 sockcm_init(&sockc, sk); 2881 sockc.mark = sk->sk_mark; 2882 if (msg->msg_controllen) { 2883 err = sock_cmsg_send(sk, msg, &sockc); 2884 if (unlikely(err)) 2885 goto out_unlock; 2886 } 2887 2888 if (sock->type == SOCK_RAW) 2889 reserve = dev->hard_header_len; 2890 if (po->has_vnet_hdr) { 2891 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); 2892 if (err) 2893 goto out_unlock; 2894 has_vnet_hdr = true; 2895 } 2896 2897 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 2898 if (!netif_supports_nofcs(dev)) { 2899 err = -EPROTONOSUPPORT; 2900 goto out_unlock; 2901 } 2902 extra_len = 4; /* We're doing our own CRC */ 2903 } 2904 2905 err = -EMSGSIZE; 2906 if (!vnet_hdr.gso_type && 2907 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 2908 goto out_unlock; 2909 2910 err = -ENOBUFS; 2911 hlen = LL_RESERVED_SPACE(dev); 2912 tlen = dev->needed_tailroom; 2913 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 2914 linear = max(linear, min_t(int, len, dev->hard_header_len)); 2915 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 2916 msg->msg_flags & MSG_DONTWAIT, &err); 2917 if (skb == NULL) 2918 goto out_unlock; 2919 2920 skb_reset_network_header(skb); 2921 2922 err = -EINVAL; 2923 if (sock->type == SOCK_DGRAM) { 2924 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2925 if (unlikely(offset < 0)) 2926 goto out_free; 2927 } else if (reserve) { 2928 skb_reserve(skb, -reserve); 2929 if (len < reserve + sizeof(struct ipv6hdr) && 2930 dev->min_header_len != dev->hard_header_len) 2931 skb_reset_network_header(skb); 2932 } 2933 2934 /* Returns -EFAULT on error */ 2935 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 2936 if (err) 2937 goto out_free; 2938 2939 if (sock->type == SOCK_RAW && 2940 !dev_validate_header(dev, skb->data, len)) { 2941 err = -EINVAL; 2942 goto out_free; 2943 } 2944 2945 skb_setup_tx_timestamp(skb, sockc.tsflags); 2946 2947 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 2948 !packet_extra_vlan_len_allowed(dev, skb)) { 2949 err = -EMSGSIZE; 2950 goto out_free; 2951 } 2952 2953 skb->protocol = proto; 2954 skb->dev = dev; 2955 skb->priority = sk->sk_priority; 2956 skb->mark = sockc.mark; 2957 skb->tstamp = sockc.transmit_time; 2958 2959 if (has_vnet_hdr) { 2960 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 2961 if (err) 2962 goto out_free; 2963 len += sizeof(vnet_hdr); 2964 virtio_net_hdr_set_proto(skb, &vnet_hdr); 2965 } 2966 2967 packet_parse_headers(skb, sock); 2968 2969 if (unlikely(extra_len == 4)) 2970 skb->no_fcs = 1; 2971 2972 err = po->xmit(skb); 2973 if (err > 0 && (err = net_xmit_errno(err)) != 0) 2974 goto out_unlock; 2975 2976 dev_put(dev); 2977 2978 return len; 2979 2980 out_free: 2981 kfree_skb(skb); 2982 out_unlock: 2983 if (dev) 2984 dev_put(dev); 2985 out: 2986 return err; 2987 } 2988 2989 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 2990 { 2991 struct sock *sk = sock->sk; 2992 struct packet_sock *po = pkt_sk(sk); 2993 2994 if (po->tx_ring.pg_vec) 2995 return tpacket_snd(po, msg); 2996 else 2997 return packet_snd(sock, msg, len); 2998 } 2999 3000 /* 3001 * Close a PACKET socket. This is fairly simple. We immediately go 3002 * to 'closed' state and remove our protocol entry in the device list. 3003 */ 3004 3005 static int packet_release(struct socket *sock) 3006 { 3007 struct sock *sk = sock->sk; 3008 struct packet_sock *po; 3009 struct packet_fanout *f; 3010 struct net *net; 3011 union tpacket_req_u req_u; 3012 3013 if (!sk) 3014 return 0; 3015 3016 net = sock_net(sk); 3017 po = pkt_sk(sk); 3018 3019 mutex_lock(&net->packet.sklist_lock); 3020 sk_del_node_init_rcu(sk); 3021 mutex_unlock(&net->packet.sklist_lock); 3022 3023 preempt_disable(); 3024 sock_prot_inuse_add(net, sk->sk_prot, -1); 3025 preempt_enable(); 3026 3027 spin_lock(&po->bind_lock); 3028 unregister_prot_hook(sk, false); 3029 packet_cached_dev_reset(po); 3030 3031 if (po->prot_hook.dev) { 3032 dev_put(po->prot_hook.dev); 3033 po->prot_hook.dev = NULL; 3034 } 3035 spin_unlock(&po->bind_lock); 3036 3037 packet_flush_mclist(sk); 3038 3039 lock_sock(sk); 3040 if (po->rx_ring.pg_vec) { 3041 memset(&req_u, 0, sizeof(req_u)); 3042 packet_set_ring(sk, &req_u, 1, 0); 3043 } 3044 3045 if (po->tx_ring.pg_vec) { 3046 memset(&req_u, 0, sizeof(req_u)); 3047 packet_set_ring(sk, &req_u, 1, 1); 3048 } 3049 release_sock(sk); 3050 3051 f = fanout_release(sk); 3052 3053 synchronize_net(); 3054 3055 kfree(po->rollover); 3056 if (f) { 3057 fanout_release_data(f); 3058 kfree(f); 3059 } 3060 /* 3061 * Now the socket is dead. No more input will appear. 3062 */ 3063 sock_orphan(sk); 3064 sock->sk = NULL; 3065 3066 /* Purge queues */ 3067 3068 skb_queue_purge(&sk->sk_receive_queue); 3069 packet_free_pending(po); 3070 sk_refcnt_debug_release(sk); 3071 3072 sock_put(sk); 3073 return 0; 3074 } 3075 3076 /* 3077 * Attach a packet hook. 3078 */ 3079 3080 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 3081 __be16 proto) 3082 { 3083 struct packet_sock *po = pkt_sk(sk); 3084 struct net_device *dev_curr; 3085 __be16 proto_curr; 3086 bool need_rehook; 3087 struct net_device *dev = NULL; 3088 int ret = 0; 3089 bool unlisted = false; 3090 3091 lock_sock(sk); 3092 spin_lock(&po->bind_lock); 3093 rcu_read_lock(); 3094 3095 if (po->fanout) { 3096 ret = -EINVAL; 3097 goto out_unlock; 3098 } 3099 3100 if (name) { 3101 dev = dev_get_by_name_rcu(sock_net(sk), name); 3102 if (!dev) { 3103 ret = -ENODEV; 3104 goto out_unlock; 3105 } 3106 } else if (ifindex) { 3107 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3108 if (!dev) { 3109 ret = -ENODEV; 3110 goto out_unlock; 3111 } 3112 } 3113 3114 if (dev) 3115 dev_hold(dev); 3116 3117 proto_curr = po->prot_hook.type; 3118 dev_curr = po->prot_hook.dev; 3119 3120 need_rehook = proto_curr != proto || dev_curr != dev; 3121 3122 if (need_rehook) { 3123 if (po->running) { 3124 rcu_read_unlock(); 3125 /* prevents packet_notifier() from calling 3126 * register_prot_hook() 3127 */ 3128 po->num = 0; 3129 __unregister_prot_hook(sk, true); 3130 rcu_read_lock(); 3131 dev_curr = po->prot_hook.dev; 3132 if (dev) 3133 unlisted = !dev_get_by_index_rcu(sock_net(sk), 3134 dev->ifindex); 3135 } 3136 3137 BUG_ON(po->running); 3138 po->num = proto; 3139 po->prot_hook.type = proto; 3140 3141 if (unlikely(unlisted)) { 3142 dev_put(dev); 3143 po->prot_hook.dev = NULL; 3144 po->ifindex = -1; 3145 packet_cached_dev_reset(po); 3146 } else { 3147 po->prot_hook.dev = dev; 3148 po->ifindex = dev ? dev->ifindex : 0; 3149 packet_cached_dev_assign(po, dev); 3150 } 3151 } 3152 if (dev_curr) 3153 dev_put(dev_curr); 3154 3155 if (proto == 0 || !need_rehook) 3156 goto out_unlock; 3157 3158 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 3159 register_prot_hook(sk); 3160 } else { 3161 sk->sk_err = ENETDOWN; 3162 if (!sock_flag(sk, SOCK_DEAD)) 3163 sk->sk_error_report(sk); 3164 } 3165 3166 out_unlock: 3167 rcu_read_unlock(); 3168 spin_unlock(&po->bind_lock); 3169 release_sock(sk); 3170 return ret; 3171 } 3172 3173 /* 3174 * Bind a packet socket to a device 3175 */ 3176 3177 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 3178 int addr_len) 3179 { 3180 struct sock *sk = sock->sk; 3181 char name[sizeof(uaddr->sa_data) + 1]; 3182 3183 /* 3184 * Check legality 3185 */ 3186 3187 if (addr_len != sizeof(struct sockaddr)) 3188 return -EINVAL; 3189 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be 3190 * zero-terminated. 3191 */ 3192 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); 3193 name[sizeof(uaddr->sa_data)] = 0; 3194 3195 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3196 } 3197 3198 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3199 { 3200 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3201 struct sock *sk = sock->sk; 3202 3203 /* 3204 * Check legality 3205 */ 3206 3207 if (addr_len < sizeof(struct sockaddr_ll)) 3208 return -EINVAL; 3209 if (sll->sll_family != AF_PACKET) 3210 return -EINVAL; 3211 3212 return packet_do_bind(sk, NULL, sll->sll_ifindex, 3213 sll->sll_protocol ? : pkt_sk(sk)->num); 3214 } 3215 3216 static struct proto packet_proto = { 3217 .name = "PACKET", 3218 .owner = THIS_MODULE, 3219 .obj_size = sizeof(struct packet_sock), 3220 }; 3221 3222 /* 3223 * Create a packet of type SOCK_PACKET. 3224 */ 3225 3226 static int packet_create(struct net *net, struct socket *sock, int protocol, 3227 int kern) 3228 { 3229 struct sock *sk; 3230 struct packet_sock *po; 3231 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 3232 int err; 3233 3234 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 3235 return -EPERM; 3236 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 3237 sock->type != SOCK_PACKET) 3238 return -ESOCKTNOSUPPORT; 3239 3240 sock->state = SS_UNCONNECTED; 3241 3242 err = -ENOBUFS; 3243 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); 3244 if (sk == NULL) 3245 goto out; 3246 3247 sock->ops = &packet_ops; 3248 if (sock->type == SOCK_PACKET) 3249 sock->ops = &packet_ops_spkt; 3250 3251 sock_init_data(sock, sk); 3252 3253 po = pkt_sk(sk); 3254 init_completion(&po->skb_completion); 3255 sk->sk_family = PF_PACKET; 3256 po->num = proto; 3257 po->xmit = dev_queue_xmit; 3258 3259 err = packet_alloc_pending(po); 3260 if (err) 3261 goto out2; 3262 3263 packet_cached_dev_reset(po); 3264 3265 sk->sk_destruct = packet_sock_destruct; 3266 sk_refcnt_debug_inc(sk); 3267 3268 /* 3269 * Attach a protocol block 3270 */ 3271 3272 spin_lock_init(&po->bind_lock); 3273 mutex_init(&po->pg_vec_lock); 3274 po->rollover = NULL; 3275 po->prot_hook.func = packet_rcv; 3276 3277 if (sock->type == SOCK_PACKET) 3278 po->prot_hook.func = packet_rcv_spkt; 3279 3280 po->prot_hook.af_packet_priv = sk; 3281 3282 if (proto) { 3283 po->prot_hook.type = proto; 3284 __register_prot_hook(sk); 3285 } 3286 3287 mutex_lock(&net->packet.sklist_lock); 3288 sk_add_node_tail_rcu(sk, &net->packet.sklist); 3289 mutex_unlock(&net->packet.sklist_lock); 3290 3291 preempt_disable(); 3292 sock_prot_inuse_add(net, &packet_proto, 1); 3293 preempt_enable(); 3294 3295 return 0; 3296 out2: 3297 sk_free(sk); 3298 out: 3299 return err; 3300 } 3301 3302 /* 3303 * Pull a packet from our receive queue and hand it to the user. 3304 * If necessary we block. 3305 */ 3306 3307 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 3308 int flags) 3309 { 3310 struct sock *sk = sock->sk; 3311 struct sk_buff *skb; 3312 int copied, err; 3313 int vnet_hdr_len = 0; 3314 unsigned int origlen = 0; 3315 3316 err = -EINVAL; 3317 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 3318 goto out; 3319 3320 #if 0 3321 /* What error should we return now? EUNATTACH? */ 3322 if (pkt_sk(sk)->ifindex < 0) 3323 return -ENODEV; 3324 #endif 3325 3326 if (flags & MSG_ERRQUEUE) { 3327 err = sock_recv_errqueue(sk, msg, len, 3328 SOL_PACKET, PACKET_TX_TIMESTAMP); 3329 goto out; 3330 } 3331 3332 /* 3333 * Call the generic datagram receiver. This handles all sorts 3334 * of horrible races and re-entrancy so we can forget about it 3335 * in the protocol layers. 3336 * 3337 * Now it will return ENETDOWN, if device have just gone down, 3338 * but then it will block. 3339 */ 3340 3341 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); 3342 3343 /* 3344 * An error occurred so return it. Because skb_recv_datagram() 3345 * handles the blocking we don't see and worry about blocking 3346 * retries. 3347 */ 3348 3349 if (skb == NULL) 3350 goto out; 3351 3352 packet_rcv_try_clear_pressure(pkt_sk(sk)); 3353 3354 if (pkt_sk(sk)->has_vnet_hdr) { 3355 err = packet_rcv_vnet(msg, skb, &len); 3356 if (err) 3357 goto out_free; 3358 vnet_hdr_len = sizeof(struct virtio_net_hdr); 3359 } 3360 3361 /* You lose any data beyond the buffer you gave. If it worries 3362 * a user program they can ask the device for its MTU 3363 * anyway. 3364 */ 3365 copied = skb->len; 3366 if (copied > len) { 3367 copied = len; 3368 msg->msg_flags |= MSG_TRUNC; 3369 } 3370 3371 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3372 if (err) 3373 goto out_free; 3374 3375 if (sock->type != SOCK_PACKET) { 3376 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3377 3378 /* Original length was stored in sockaddr_ll fields */ 3379 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3380 sll->sll_family = AF_PACKET; 3381 sll->sll_protocol = skb->protocol; 3382 } 3383 3384 sock_recv_ts_and_drops(msg, sk, skb); 3385 3386 if (msg->msg_name) { 3387 int copy_len; 3388 3389 /* If the address length field is there to be filled 3390 * in, we fill it in now. 3391 */ 3392 if (sock->type == SOCK_PACKET) { 3393 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3394 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3395 copy_len = msg->msg_namelen; 3396 } else { 3397 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3398 3399 msg->msg_namelen = sll->sll_halen + 3400 offsetof(struct sockaddr_ll, sll_addr); 3401 copy_len = msg->msg_namelen; 3402 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) { 3403 memset(msg->msg_name + 3404 offsetof(struct sockaddr_ll, sll_addr), 3405 0, sizeof(sll->sll_addr)); 3406 msg->msg_namelen = sizeof(struct sockaddr_ll); 3407 } 3408 } 3409 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); 3410 } 3411 3412 if (pkt_sk(sk)->auxdata) { 3413 struct tpacket_auxdata aux; 3414 3415 aux.tp_status = TP_STATUS_USER; 3416 if (skb->ip_summed == CHECKSUM_PARTIAL) 3417 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3418 else if (skb->pkt_type != PACKET_OUTGOING && 3419 (skb->ip_summed == CHECKSUM_COMPLETE || 3420 skb_csum_unnecessary(skb))) 3421 aux.tp_status |= TP_STATUS_CSUM_VALID; 3422 3423 aux.tp_len = origlen; 3424 aux.tp_snaplen = skb->len; 3425 aux.tp_mac = 0; 3426 aux.tp_net = skb_network_offset(skb); 3427 if (skb_vlan_tag_present(skb)) { 3428 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3429 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3430 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3431 } else { 3432 aux.tp_vlan_tci = 0; 3433 aux.tp_vlan_tpid = 0; 3434 } 3435 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3436 } 3437 3438 /* 3439 * Free or return the buffer as appropriate. Again this 3440 * hides all the races and re-entrancy issues from us. 3441 */ 3442 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3443 3444 out_free: 3445 skb_free_datagram(sk, skb); 3446 out: 3447 return err; 3448 } 3449 3450 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3451 int peer) 3452 { 3453 struct net_device *dev; 3454 struct sock *sk = sock->sk; 3455 3456 if (peer) 3457 return -EOPNOTSUPP; 3458 3459 uaddr->sa_family = AF_PACKET; 3460 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); 3461 rcu_read_lock(); 3462 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 3463 if (dev) 3464 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); 3465 rcu_read_unlock(); 3466 3467 return sizeof(*uaddr); 3468 } 3469 3470 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3471 int peer) 3472 { 3473 struct net_device *dev; 3474 struct sock *sk = sock->sk; 3475 struct packet_sock *po = pkt_sk(sk); 3476 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3477 3478 if (peer) 3479 return -EOPNOTSUPP; 3480 3481 sll->sll_family = AF_PACKET; 3482 sll->sll_ifindex = po->ifindex; 3483 sll->sll_protocol = po->num; 3484 sll->sll_pkttype = 0; 3485 rcu_read_lock(); 3486 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 3487 if (dev) { 3488 sll->sll_hatype = dev->type; 3489 sll->sll_halen = dev->addr_len; 3490 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); 3491 } else { 3492 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3493 sll->sll_halen = 0; 3494 } 3495 rcu_read_unlock(); 3496 3497 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3498 } 3499 3500 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3501 int what) 3502 { 3503 switch (i->type) { 3504 case PACKET_MR_MULTICAST: 3505 if (i->alen != dev->addr_len) 3506 return -EINVAL; 3507 if (what > 0) 3508 return dev_mc_add(dev, i->addr); 3509 else 3510 return dev_mc_del(dev, i->addr); 3511 break; 3512 case PACKET_MR_PROMISC: 3513 return dev_set_promiscuity(dev, what); 3514 case PACKET_MR_ALLMULTI: 3515 return dev_set_allmulti(dev, what); 3516 case PACKET_MR_UNICAST: 3517 if (i->alen != dev->addr_len) 3518 return -EINVAL; 3519 if (what > 0) 3520 return dev_uc_add(dev, i->addr); 3521 else 3522 return dev_uc_del(dev, i->addr); 3523 break; 3524 default: 3525 break; 3526 } 3527 return 0; 3528 } 3529 3530 static void packet_dev_mclist_delete(struct net_device *dev, 3531 struct packet_mclist **mlp) 3532 { 3533 struct packet_mclist *ml; 3534 3535 while ((ml = *mlp) != NULL) { 3536 if (ml->ifindex == dev->ifindex) { 3537 packet_dev_mc(dev, ml, -1); 3538 *mlp = ml->next; 3539 kfree(ml); 3540 } else 3541 mlp = &ml->next; 3542 } 3543 } 3544 3545 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3546 { 3547 struct packet_sock *po = pkt_sk(sk); 3548 struct packet_mclist *ml, *i; 3549 struct net_device *dev; 3550 int err; 3551 3552 rtnl_lock(); 3553 3554 err = -ENODEV; 3555 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3556 if (!dev) 3557 goto done; 3558 3559 err = -EINVAL; 3560 if (mreq->mr_alen > dev->addr_len) 3561 goto done; 3562 3563 err = -ENOBUFS; 3564 i = kmalloc(sizeof(*i), GFP_KERNEL); 3565 if (i == NULL) 3566 goto done; 3567 3568 err = 0; 3569 for (ml = po->mclist; ml; ml = ml->next) { 3570 if (ml->ifindex == mreq->mr_ifindex && 3571 ml->type == mreq->mr_type && 3572 ml->alen == mreq->mr_alen && 3573 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3574 ml->count++; 3575 /* Free the new element ... */ 3576 kfree(i); 3577 goto done; 3578 } 3579 } 3580 3581 i->type = mreq->mr_type; 3582 i->ifindex = mreq->mr_ifindex; 3583 i->alen = mreq->mr_alen; 3584 memcpy(i->addr, mreq->mr_address, i->alen); 3585 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3586 i->count = 1; 3587 i->next = po->mclist; 3588 po->mclist = i; 3589 err = packet_dev_mc(dev, i, 1); 3590 if (err) { 3591 po->mclist = i->next; 3592 kfree(i); 3593 } 3594 3595 done: 3596 rtnl_unlock(); 3597 return err; 3598 } 3599 3600 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3601 { 3602 struct packet_mclist *ml, **mlp; 3603 3604 rtnl_lock(); 3605 3606 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3607 if (ml->ifindex == mreq->mr_ifindex && 3608 ml->type == mreq->mr_type && 3609 ml->alen == mreq->mr_alen && 3610 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3611 if (--ml->count == 0) { 3612 struct net_device *dev; 3613 *mlp = ml->next; 3614 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3615 if (dev) 3616 packet_dev_mc(dev, ml, -1); 3617 kfree(ml); 3618 } 3619 break; 3620 } 3621 } 3622 rtnl_unlock(); 3623 return 0; 3624 } 3625 3626 static void packet_flush_mclist(struct sock *sk) 3627 { 3628 struct packet_sock *po = pkt_sk(sk); 3629 struct packet_mclist *ml; 3630 3631 if (!po->mclist) 3632 return; 3633 3634 rtnl_lock(); 3635 while ((ml = po->mclist) != NULL) { 3636 struct net_device *dev; 3637 3638 po->mclist = ml->next; 3639 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3640 if (dev != NULL) 3641 packet_dev_mc(dev, ml, -1); 3642 kfree(ml); 3643 } 3644 rtnl_unlock(); 3645 } 3646 3647 static int 3648 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 3649 { 3650 struct sock *sk = sock->sk; 3651 struct packet_sock *po = pkt_sk(sk); 3652 int ret; 3653 3654 if (level != SOL_PACKET) 3655 return -ENOPROTOOPT; 3656 3657 switch (optname) { 3658 case PACKET_ADD_MEMBERSHIP: 3659 case PACKET_DROP_MEMBERSHIP: 3660 { 3661 struct packet_mreq_max mreq; 3662 int len = optlen; 3663 memset(&mreq, 0, sizeof(mreq)); 3664 if (len < sizeof(struct packet_mreq)) 3665 return -EINVAL; 3666 if (len > sizeof(mreq)) 3667 len = sizeof(mreq); 3668 if (copy_from_user(&mreq, optval, len)) 3669 return -EFAULT; 3670 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3671 return -EINVAL; 3672 if (optname == PACKET_ADD_MEMBERSHIP) 3673 ret = packet_mc_add(sk, &mreq); 3674 else 3675 ret = packet_mc_drop(sk, &mreq); 3676 return ret; 3677 } 3678 3679 case PACKET_RX_RING: 3680 case PACKET_TX_RING: 3681 { 3682 union tpacket_req_u req_u; 3683 int len; 3684 3685 lock_sock(sk); 3686 switch (po->tp_version) { 3687 case TPACKET_V1: 3688 case TPACKET_V2: 3689 len = sizeof(req_u.req); 3690 break; 3691 case TPACKET_V3: 3692 default: 3693 len = sizeof(req_u.req3); 3694 break; 3695 } 3696 if (optlen < len) { 3697 ret = -EINVAL; 3698 } else { 3699 if (copy_from_user(&req_u.req, optval, len)) 3700 ret = -EFAULT; 3701 else 3702 ret = packet_set_ring(sk, &req_u, 0, 3703 optname == PACKET_TX_RING); 3704 } 3705 release_sock(sk); 3706 return ret; 3707 } 3708 case PACKET_COPY_THRESH: 3709 { 3710 int val; 3711 3712 if (optlen != sizeof(val)) 3713 return -EINVAL; 3714 if (copy_from_user(&val, optval, sizeof(val))) 3715 return -EFAULT; 3716 3717 pkt_sk(sk)->copy_thresh = val; 3718 return 0; 3719 } 3720 case PACKET_VERSION: 3721 { 3722 int val; 3723 3724 if (optlen != sizeof(val)) 3725 return -EINVAL; 3726 if (copy_from_user(&val, optval, sizeof(val))) 3727 return -EFAULT; 3728 switch (val) { 3729 case TPACKET_V1: 3730 case TPACKET_V2: 3731 case TPACKET_V3: 3732 break; 3733 default: 3734 return -EINVAL; 3735 } 3736 lock_sock(sk); 3737 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3738 ret = -EBUSY; 3739 } else { 3740 po->tp_version = val; 3741 ret = 0; 3742 } 3743 release_sock(sk); 3744 return ret; 3745 } 3746 case PACKET_RESERVE: 3747 { 3748 unsigned int val; 3749 3750 if (optlen != sizeof(val)) 3751 return -EINVAL; 3752 if (copy_from_user(&val, optval, sizeof(val))) 3753 return -EFAULT; 3754 if (val > INT_MAX) 3755 return -EINVAL; 3756 lock_sock(sk); 3757 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3758 ret = -EBUSY; 3759 } else { 3760 po->tp_reserve = val; 3761 ret = 0; 3762 } 3763 release_sock(sk); 3764 return ret; 3765 } 3766 case PACKET_LOSS: 3767 { 3768 unsigned int val; 3769 3770 if (optlen != sizeof(val)) 3771 return -EINVAL; 3772 if (copy_from_user(&val, optval, sizeof(val))) 3773 return -EFAULT; 3774 3775 lock_sock(sk); 3776 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3777 ret = -EBUSY; 3778 } else { 3779 po->tp_loss = !!val; 3780 ret = 0; 3781 } 3782 release_sock(sk); 3783 return ret; 3784 } 3785 case PACKET_AUXDATA: 3786 { 3787 int val; 3788 3789 if (optlen < sizeof(val)) 3790 return -EINVAL; 3791 if (copy_from_user(&val, optval, sizeof(val))) 3792 return -EFAULT; 3793 3794 lock_sock(sk); 3795 po->auxdata = !!val; 3796 release_sock(sk); 3797 return 0; 3798 } 3799 case PACKET_ORIGDEV: 3800 { 3801 int val; 3802 3803 if (optlen < sizeof(val)) 3804 return -EINVAL; 3805 if (copy_from_user(&val, optval, sizeof(val))) 3806 return -EFAULT; 3807 3808 lock_sock(sk); 3809 po->origdev = !!val; 3810 release_sock(sk); 3811 return 0; 3812 } 3813 case PACKET_VNET_HDR: 3814 { 3815 int val; 3816 3817 if (sock->type != SOCK_RAW) 3818 return -EINVAL; 3819 if (optlen < sizeof(val)) 3820 return -EINVAL; 3821 if (copy_from_user(&val, optval, sizeof(val))) 3822 return -EFAULT; 3823 3824 lock_sock(sk); 3825 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3826 ret = -EBUSY; 3827 } else { 3828 po->has_vnet_hdr = !!val; 3829 ret = 0; 3830 } 3831 release_sock(sk); 3832 return ret; 3833 } 3834 case PACKET_TIMESTAMP: 3835 { 3836 int val; 3837 3838 if (optlen != sizeof(val)) 3839 return -EINVAL; 3840 if (copy_from_user(&val, optval, sizeof(val))) 3841 return -EFAULT; 3842 3843 po->tp_tstamp = val; 3844 return 0; 3845 } 3846 case PACKET_FANOUT: 3847 { 3848 int val; 3849 3850 if (optlen != sizeof(val)) 3851 return -EINVAL; 3852 if (copy_from_user(&val, optval, sizeof(val))) 3853 return -EFAULT; 3854 3855 return fanout_add(sk, val & 0xffff, val >> 16); 3856 } 3857 case PACKET_FANOUT_DATA: 3858 { 3859 if (!po->fanout) 3860 return -EINVAL; 3861 3862 return fanout_set_data(po, optval, optlen); 3863 } 3864 case PACKET_IGNORE_OUTGOING: 3865 { 3866 int val; 3867 3868 if (optlen != sizeof(val)) 3869 return -EINVAL; 3870 if (copy_from_user(&val, optval, sizeof(val))) 3871 return -EFAULT; 3872 if (val < 0 || val > 1) 3873 return -EINVAL; 3874 3875 po->prot_hook.ignore_outgoing = !!val; 3876 return 0; 3877 } 3878 case PACKET_TX_HAS_OFF: 3879 { 3880 unsigned int val; 3881 3882 if (optlen != sizeof(val)) 3883 return -EINVAL; 3884 if (copy_from_user(&val, optval, sizeof(val))) 3885 return -EFAULT; 3886 3887 lock_sock(sk); 3888 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3889 ret = -EBUSY; 3890 } else { 3891 po->tp_tx_has_off = !!val; 3892 ret = 0; 3893 } 3894 release_sock(sk); 3895 return 0; 3896 } 3897 case PACKET_QDISC_BYPASS: 3898 { 3899 int val; 3900 3901 if (optlen != sizeof(val)) 3902 return -EINVAL; 3903 if (copy_from_user(&val, optval, sizeof(val))) 3904 return -EFAULT; 3905 3906 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; 3907 return 0; 3908 } 3909 default: 3910 return -ENOPROTOOPT; 3911 } 3912 } 3913 3914 static int packet_getsockopt(struct socket *sock, int level, int optname, 3915 char __user *optval, int __user *optlen) 3916 { 3917 int len; 3918 int val, lv = sizeof(val); 3919 struct sock *sk = sock->sk; 3920 struct packet_sock *po = pkt_sk(sk); 3921 void *data = &val; 3922 union tpacket_stats_u st; 3923 struct tpacket_rollover_stats rstats; 3924 int drops; 3925 3926 if (level != SOL_PACKET) 3927 return -ENOPROTOOPT; 3928 3929 if (get_user(len, optlen)) 3930 return -EFAULT; 3931 3932 if (len < 0) 3933 return -EINVAL; 3934 3935 switch (optname) { 3936 case PACKET_STATISTICS: 3937 spin_lock_bh(&sk->sk_receive_queue.lock); 3938 memcpy(&st, &po->stats, sizeof(st)); 3939 memset(&po->stats, 0, sizeof(po->stats)); 3940 spin_unlock_bh(&sk->sk_receive_queue.lock); 3941 drops = atomic_xchg(&po->tp_drops, 0); 3942 3943 if (po->tp_version == TPACKET_V3) { 3944 lv = sizeof(struct tpacket_stats_v3); 3945 st.stats3.tp_drops = drops; 3946 st.stats3.tp_packets += drops; 3947 data = &st.stats3; 3948 } else { 3949 lv = sizeof(struct tpacket_stats); 3950 st.stats1.tp_drops = drops; 3951 st.stats1.tp_packets += drops; 3952 data = &st.stats1; 3953 } 3954 3955 break; 3956 case PACKET_AUXDATA: 3957 val = po->auxdata; 3958 break; 3959 case PACKET_ORIGDEV: 3960 val = po->origdev; 3961 break; 3962 case PACKET_VNET_HDR: 3963 val = po->has_vnet_hdr; 3964 break; 3965 case PACKET_VERSION: 3966 val = po->tp_version; 3967 break; 3968 case PACKET_HDRLEN: 3969 if (len > sizeof(int)) 3970 len = sizeof(int); 3971 if (len < sizeof(int)) 3972 return -EINVAL; 3973 if (copy_from_user(&val, optval, len)) 3974 return -EFAULT; 3975 switch (val) { 3976 case TPACKET_V1: 3977 val = sizeof(struct tpacket_hdr); 3978 break; 3979 case TPACKET_V2: 3980 val = sizeof(struct tpacket2_hdr); 3981 break; 3982 case TPACKET_V3: 3983 val = sizeof(struct tpacket3_hdr); 3984 break; 3985 default: 3986 return -EINVAL; 3987 } 3988 break; 3989 case PACKET_RESERVE: 3990 val = po->tp_reserve; 3991 break; 3992 case PACKET_LOSS: 3993 val = po->tp_loss; 3994 break; 3995 case PACKET_TIMESTAMP: 3996 val = po->tp_tstamp; 3997 break; 3998 case PACKET_FANOUT: 3999 val = (po->fanout ? 4000 ((u32)po->fanout->id | 4001 ((u32)po->fanout->type << 16) | 4002 ((u32)po->fanout->flags << 24)) : 4003 0); 4004 break; 4005 case PACKET_IGNORE_OUTGOING: 4006 val = po->prot_hook.ignore_outgoing; 4007 break; 4008 case PACKET_ROLLOVER_STATS: 4009 if (!po->rollover) 4010 return -EINVAL; 4011 rstats.tp_all = atomic_long_read(&po->rollover->num); 4012 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 4013 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 4014 data = &rstats; 4015 lv = sizeof(rstats); 4016 break; 4017 case PACKET_TX_HAS_OFF: 4018 val = po->tp_tx_has_off; 4019 break; 4020 case PACKET_QDISC_BYPASS: 4021 val = packet_use_direct_xmit(po); 4022 break; 4023 default: 4024 return -ENOPROTOOPT; 4025 } 4026 4027 if (len > lv) 4028 len = lv; 4029 if (put_user(len, optlen)) 4030 return -EFAULT; 4031 if (copy_to_user(optval, data, len)) 4032 return -EFAULT; 4033 return 0; 4034 } 4035 4036 4037 #ifdef CONFIG_COMPAT 4038 static int compat_packet_setsockopt(struct socket *sock, int level, int optname, 4039 char __user *optval, unsigned int optlen) 4040 { 4041 struct packet_sock *po = pkt_sk(sock->sk); 4042 4043 if (level != SOL_PACKET) 4044 return -ENOPROTOOPT; 4045 4046 if (optname == PACKET_FANOUT_DATA && 4047 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { 4048 optval = (char __user *)get_compat_bpf_fprog(optval); 4049 if (!optval) 4050 return -EFAULT; 4051 optlen = sizeof(struct sock_fprog); 4052 } 4053 4054 return packet_setsockopt(sock, level, optname, optval, optlen); 4055 } 4056 #endif 4057 4058 static int packet_notifier(struct notifier_block *this, 4059 unsigned long msg, void *ptr) 4060 { 4061 struct sock *sk; 4062 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4063 struct net *net = dev_net(dev); 4064 4065 rcu_read_lock(); 4066 sk_for_each_rcu(sk, &net->packet.sklist) { 4067 struct packet_sock *po = pkt_sk(sk); 4068 4069 switch (msg) { 4070 case NETDEV_UNREGISTER: 4071 if (po->mclist) 4072 packet_dev_mclist_delete(dev, &po->mclist); 4073 /* fallthrough */ 4074 4075 case NETDEV_DOWN: 4076 if (dev->ifindex == po->ifindex) { 4077 spin_lock(&po->bind_lock); 4078 if (po->running) { 4079 __unregister_prot_hook(sk, false); 4080 sk->sk_err = ENETDOWN; 4081 if (!sock_flag(sk, SOCK_DEAD)) 4082 sk->sk_error_report(sk); 4083 } 4084 if (msg == NETDEV_UNREGISTER) { 4085 packet_cached_dev_reset(po); 4086 po->ifindex = -1; 4087 if (po->prot_hook.dev) 4088 dev_put(po->prot_hook.dev); 4089 po->prot_hook.dev = NULL; 4090 } 4091 spin_unlock(&po->bind_lock); 4092 } 4093 break; 4094 case NETDEV_UP: 4095 if (dev->ifindex == po->ifindex) { 4096 spin_lock(&po->bind_lock); 4097 if (po->num) 4098 register_prot_hook(sk); 4099 spin_unlock(&po->bind_lock); 4100 } 4101 break; 4102 } 4103 } 4104 rcu_read_unlock(); 4105 return NOTIFY_DONE; 4106 } 4107 4108 4109 static int packet_ioctl(struct socket *sock, unsigned int cmd, 4110 unsigned long arg) 4111 { 4112 struct sock *sk = sock->sk; 4113 4114 switch (cmd) { 4115 case SIOCOUTQ: 4116 { 4117 int amount = sk_wmem_alloc_get(sk); 4118 4119 return put_user(amount, (int __user *)arg); 4120 } 4121 case SIOCINQ: 4122 { 4123 struct sk_buff *skb; 4124 int amount = 0; 4125 4126 spin_lock_bh(&sk->sk_receive_queue.lock); 4127 skb = skb_peek(&sk->sk_receive_queue); 4128 if (skb) 4129 amount = skb->len; 4130 spin_unlock_bh(&sk->sk_receive_queue.lock); 4131 return put_user(amount, (int __user *)arg); 4132 } 4133 #ifdef CONFIG_INET 4134 case SIOCADDRT: 4135 case SIOCDELRT: 4136 case SIOCDARP: 4137 case SIOCGARP: 4138 case SIOCSARP: 4139 case SIOCGIFADDR: 4140 case SIOCSIFADDR: 4141 case SIOCGIFBRDADDR: 4142 case SIOCSIFBRDADDR: 4143 case SIOCGIFNETMASK: 4144 case SIOCSIFNETMASK: 4145 case SIOCGIFDSTADDR: 4146 case SIOCSIFDSTADDR: 4147 case SIOCSIFFLAGS: 4148 return inet_dgram_ops.ioctl(sock, cmd, arg); 4149 #endif 4150 4151 default: 4152 return -ENOIOCTLCMD; 4153 } 4154 return 0; 4155 } 4156 4157 static __poll_t packet_poll(struct file *file, struct socket *sock, 4158 poll_table *wait) 4159 { 4160 struct sock *sk = sock->sk; 4161 struct packet_sock *po = pkt_sk(sk); 4162 __poll_t mask = datagram_poll(file, sock, wait); 4163 4164 spin_lock_bh(&sk->sk_receive_queue.lock); 4165 if (po->rx_ring.pg_vec) { 4166 if (!packet_previous_rx_frame(po, &po->rx_ring, 4167 TP_STATUS_KERNEL)) 4168 mask |= EPOLLIN | EPOLLRDNORM; 4169 } 4170 packet_rcv_try_clear_pressure(po); 4171 spin_unlock_bh(&sk->sk_receive_queue.lock); 4172 spin_lock_bh(&sk->sk_write_queue.lock); 4173 if (po->tx_ring.pg_vec) { 4174 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4175 mask |= EPOLLOUT | EPOLLWRNORM; 4176 } 4177 spin_unlock_bh(&sk->sk_write_queue.lock); 4178 return mask; 4179 } 4180 4181 4182 /* Dirty? Well, I still did not learn better way to account 4183 * for user mmaps. 4184 */ 4185 4186 static void packet_mm_open(struct vm_area_struct *vma) 4187 { 4188 struct file *file = vma->vm_file; 4189 struct socket *sock = file->private_data; 4190 struct sock *sk = sock->sk; 4191 4192 if (sk) 4193 atomic_inc(&pkt_sk(sk)->mapped); 4194 } 4195 4196 static void packet_mm_close(struct vm_area_struct *vma) 4197 { 4198 struct file *file = vma->vm_file; 4199 struct socket *sock = file->private_data; 4200 struct sock *sk = sock->sk; 4201 4202 if (sk) 4203 atomic_dec(&pkt_sk(sk)->mapped); 4204 } 4205 4206 static const struct vm_operations_struct packet_mmap_ops = { 4207 .open = packet_mm_open, 4208 .close = packet_mm_close, 4209 }; 4210 4211 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 4212 unsigned int len) 4213 { 4214 int i; 4215 4216 for (i = 0; i < len; i++) { 4217 if (likely(pg_vec[i].buffer)) { 4218 if (is_vmalloc_addr(pg_vec[i].buffer)) 4219 vfree(pg_vec[i].buffer); 4220 else 4221 free_pages((unsigned long)pg_vec[i].buffer, 4222 order); 4223 pg_vec[i].buffer = NULL; 4224 } 4225 } 4226 kfree(pg_vec); 4227 } 4228 4229 static char *alloc_one_pg_vec_page(unsigned long order) 4230 { 4231 char *buffer; 4232 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 4233 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 4234 4235 buffer = (char *) __get_free_pages(gfp_flags, order); 4236 if (buffer) 4237 return buffer; 4238 4239 /* __get_free_pages failed, fall back to vmalloc */ 4240 buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); 4241 if (buffer) 4242 return buffer; 4243 4244 /* vmalloc failed, lets dig into swap here */ 4245 gfp_flags &= ~__GFP_NORETRY; 4246 buffer = (char *) __get_free_pages(gfp_flags, order); 4247 if (buffer) 4248 return buffer; 4249 4250 /* complete and utter failure */ 4251 return NULL; 4252 } 4253 4254 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 4255 { 4256 unsigned int block_nr = req->tp_block_nr; 4257 struct pgv *pg_vec; 4258 int i; 4259 4260 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN); 4261 if (unlikely(!pg_vec)) 4262 goto out; 4263 4264 for (i = 0; i < block_nr; i++) { 4265 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 4266 if (unlikely(!pg_vec[i].buffer)) 4267 goto out_free_pgvec; 4268 } 4269 4270 out: 4271 return pg_vec; 4272 4273 out_free_pgvec: 4274 free_pg_vec(pg_vec, order, block_nr); 4275 pg_vec = NULL; 4276 goto out; 4277 } 4278 4279 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 4280 int closing, int tx_ring) 4281 { 4282 struct pgv *pg_vec = NULL; 4283 struct packet_sock *po = pkt_sk(sk); 4284 int was_running, order = 0; 4285 struct packet_ring_buffer *rb; 4286 struct sk_buff_head *rb_queue; 4287 __be16 num; 4288 int err = -EINVAL; 4289 /* Added to avoid minimal code churn */ 4290 struct tpacket_req *req = &req_u->req; 4291 4292 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4293 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4294 4295 err = -EBUSY; 4296 if (!closing) { 4297 if (atomic_read(&po->mapped)) 4298 goto out; 4299 if (packet_read_pending(rb)) 4300 goto out; 4301 } 4302 4303 if (req->tp_block_nr) { 4304 unsigned int min_frame_size; 4305 4306 /* Sanity tests and some calculations */ 4307 err = -EBUSY; 4308 if (unlikely(rb->pg_vec)) 4309 goto out; 4310 4311 switch (po->tp_version) { 4312 case TPACKET_V1: 4313 po->tp_hdrlen = TPACKET_HDRLEN; 4314 break; 4315 case TPACKET_V2: 4316 po->tp_hdrlen = TPACKET2_HDRLEN; 4317 break; 4318 case TPACKET_V3: 4319 po->tp_hdrlen = TPACKET3_HDRLEN; 4320 break; 4321 } 4322 4323 err = -EINVAL; 4324 if (unlikely((int)req->tp_block_size <= 0)) 4325 goto out; 4326 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4327 goto out; 4328 min_frame_size = po->tp_hdrlen + po->tp_reserve; 4329 if (po->tp_version >= TPACKET_V3 && 4330 req->tp_block_size < 4331 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) 4332 goto out; 4333 if (unlikely(req->tp_frame_size < min_frame_size)) 4334 goto out; 4335 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4336 goto out; 4337 4338 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4339 if (unlikely(rb->frames_per_block == 0)) 4340 goto out; 4341 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) 4342 goto out; 4343 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4344 req->tp_frame_nr)) 4345 goto out; 4346 4347 err = -ENOMEM; 4348 order = get_order(req->tp_block_size); 4349 pg_vec = alloc_pg_vec(req, order); 4350 if (unlikely(!pg_vec)) 4351 goto out; 4352 switch (po->tp_version) { 4353 case TPACKET_V3: 4354 /* Block transmit is not supported yet */ 4355 if (!tx_ring) { 4356 init_prb_bdqc(po, rb, pg_vec, req_u); 4357 } else { 4358 struct tpacket_req3 *req3 = &req_u->req3; 4359 4360 if (req3->tp_retire_blk_tov || 4361 req3->tp_sizeof_priv || 4362 req3->tp_feature_req_word) { 4363 err = -EINVAL; 4364 goto out_free_pg_vec; 4365 } 4366 } 4367 break; 4368 default: 4369 break; 4370 } 4371 } 4372 /* Done */ 4373 else { 4374 err = -EINVAL; 4375 if (unlikely(req->tp_frame_nr)) 4376 goto out; 4377 } 4378 4379 4380 /* Detach socket from network */ 4381 spin_lock(&po->bind_lock); 4382 was_running = po->running; 4383 num = po->num; 4384 if (was_running) { 4385 po->num = 0; 4386 __unregister_prot_hook(sk, false); 4387 } 4388 spin_unlock(&po->bind_lock); 4389 4390 synchronize_net(); 4391 4392 err = -EBUSY; 4393 mutex_lock(&po->pg_vec_lock); 4394 if (closing || atomic_read(&po->mapped) == 0) { 4395 err = 0; 4396 spin_lock_bh(&rb_queue->lock); 4397 swap(rb->pg_vec, pg_vec); 4398 rb->frame_max = (req->tp_frame_nr - 1); 4399 rb->head = 0; 4400 rb->frame_size = req->tp_frame_size; 4401 spin_unlock_bh(&rb_queue->lock); 4402 4403 swap(rb->pg_vec_order, order); 4404 swap(rb->pg_vec_len, req->tp_block_nr); 4405 4406 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4407 po->prot_hook.func = (po->rx_ring.pg_vec) ? 4408 tpacket_rcv : packet_rcv; 4409 skb_queue_purge(rb_queue); 4410 if (atomic_read(&po->mapped)) 4411 pr_err("packet_mmap: vma is busy: %d\n", 4412 atomic_read(&po->mapped)); 4413 } 4414 mutex_unlock(&po->pg_vec_lock); 4415 4416 spin_lock(&po->bind_lock); 4417 if (was_running) { 4418 po->num = num; 4419 register_prot_hook(sk); 4420 } 4421 spin_unlock(&po->bind_lock); 4422 if (pg_vec && (po->tp_version > TPACKET_V2)) { 4423 /* Because we don't support block-based V3 on tx-ring */ 4424 if (!tx_ring) 4425 prb_shutdown_retire_blk_timer(po, rb_queue); 4426 } 4427 4428 out_free_pg_vec: 4429 if (pg_vec) 4430 free_pg_vec(pg_vec, order, req->tp_block_nr); 4431 out: 4432 return err; 4433 } 4434 4435 static int packet_mmap(struct file *file, struct socket *sock, 4436 struct vm_area_struct *vma) 4437 { 4438 struct sock *sk = sock->sk; 4439 struct packet_sock *po = pkt_sk(sk); 4440 unsigned long size, expected_size; 4441 struct packet_ring_buffer *rb; 4442 unsigned long start; 4443 int err = -EINVAL; 4444 int i; 4445 4446 if (vma->vm_pgoff) 4447 return -EINVAL; 4448 4449 mutex_lock(&po->pg_vec_lock); 4450 4451 expected_size = 0; 4452 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4453 if (rb->pg_vec) { 4454 expected_size += rb->pg_vec_len 4455 * rb->pg_vec_pages 4456 * PAGE_SIZE; 4457 } 4458 } 4459 4460 if (expected_size == 0) 4461 goto out; 4462 4463 size = vma->vm_end - vma->vm_start; 4464 if (size != expected_size) 4465 goto out; 4466 4467 start = vma->vm_start; 4468 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4469 if (rb->pg_vec == NULL) 4470 continue; 4471 4472 for (i = 0; i < rb->pg_vec_len; i++) { 4473 struct page *page; 4474 void *kaddr = rb->pg_vec[i].buffer; 4475 int pg_num; 4476 4477 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 4478 page = pgv_to_page(kaddr); 4479 err = vm_insert_page(vma, start, page); 4480 if (unlikely(err)) 4481 goto out; 4482 start += PAGE_SIZE; 4483 kaddr += PAGE_SIZE; 4484 } 4485 } 4486 } 4487 4488 atomic_inc(&po->mapped); 4489 vma->vm_ops = &packet_mmap_ops; 4490 err = 0; 4491 4492 out: 4493 mutex_unlock(&po->pg_vec_lock); 4494 return err; 4495 } 4496 4497 static const struct proto_ops packet_ops_spkt = { 4498 .family = PF_PACKET, 4499 .owner = THIS_MODULE, 4500 .release = packet_release, 4501 .bind = packet_bind_spkt, 4502 .connect = sock_no_connect, 4503 .socketpair = sock_no_socketpair, 4504 .accept = sock_no_accept, 4505 .getname = packet_getname_spkt, 4506 .poll = datagram_poll, 4507 .ioctl = packet_ioctl, 4508 .gettstamp = sock_gettstamp, 4509 .listen = sock_no_listen, 4510 .shutdown = sock_no_shutdown, 4511 .setsockopt = sock_no_setsockopt, 4512 .getsockopt = sock_no_getsockopt, 4513 .sendmsg = packet_sendmsg_spkt, 4514 .recvmsg = packet_recvmsg, 4515 .mmap = sock_no_mmap, 4516 .sendpage = sock_no_sendpage, 4517 }; 4518 4519 static const struct proto_ops packet_ops = { 4520 .family = PF_PACKET, 4521 .owner = THIS_MODULE, 4522 .release = packet_release, 4523 .bind = packet_bind, 4524 .connect = sock_no_connect, 4525 .socketpair = sock_no_socketpair, 4526 .accept = sock_no_accept, 4527 .getname = packet_getname, 4528 .poll = packet_poll, 4529 .ioctl = packet_ioctl, 4530 .gettstamp = sock_gettstamp, 4531 .listen = sock_no_listen, 4532 .shutdown = sock_no_shutdown, 4533 .setsockopt = packet_setsockopt, 4534 .getsockopt = packet_getsockopt, 4535 #ifdef CONFIG_COMPAT 4536 .compat_setsockopt = compat_packet_setsockopt, 4537 #endif 4538 .sendmsg = packet_sendmsg, 4539 .recvmsg = packet_recvmsg, 4540 .mmap = packet_mmap, 4541 .sendpage = sock_no_sendpage, 4542 }; 4543 4544 static const struct net_proto_family packet_family_ops = { 4545 .family = PF_PACKET, 4546 .create = packet_create, 4547 .owner = THIS_MODULE, 4548 }; 4549 4550 static struct notifier_block packet_netdev_notifier = { 4551 .notifier_call = packet_notifier, 4552 }; 4553 4554 #ifdef CONFIG_PROC_FS 4555 4556 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4557 __acquires(RCU) 4558 { 4559 struct net *net = seq_file_net(seq); 4560 4561 rcu_read_lock(); 4562 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4563 } 4564 4565 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4566 { 4567 struct net *net = seq_file_net(seq); 4568 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4569 } 4570 4571 static void packet_seq_stop(struct seq_file *seq, void *v) 4572 __releases(RCU) 4573 { 4574 rcu_read_unlock(); 4575 } 4576 4577 static int packet_seq_show(struct seq_file *seq, void *v) 4578 { 4579 if (v == SEQ_START_TOKEN) 4580 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 4581 else { 4582 struct sock *s = sk_entry(v); 4583 const struct packet_sock *po = pkt_sk(s); 4584 4585 seq_printf(seq, 4586 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 4587 s, 4588 refcount_read(&s->sk_refcnt), 4589 s->sk_type, 4590 ntohs(po->num), 4591 po->ifindex, 4592 po->running, 4593 atomic_read(&s->sk_rmem_alloc), 4594 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4595 sock_i_ino(s)); 4596 } 4597 4598 return 0; 4599 } 4600 4601 static const struct seq_operations packet_seq_ops = { 4602 .start = packet_seq_start, 4603 .next = packet_seq_next, 4604 .stop = packet_seq_stop, 4605 .show = packet_seq_show, 4606 }; 4607 #endif 4608 4609 static int __net_init packet_net_init(struct net *net) 4610 { 4611 mutex_init(&net->packet.sklist_lock); 4612 INIT_HLIST_HEAD(&net->packet.sklist); 4613 4614 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, 4615 sizeof(struct seq_net_private))) 4616 return -ENOMEM; 4617 4618 return 0; 4619 } 4620 4621 static void __net_exit packet_net_exit(struct net *net) 4622 { 4623 remove_proc_entry("packet", net->proc_net); 4624 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); 4625 } 4626 4627 static struct pernet_operations packet_net_ops = { 4628 .init = packet_net_init, 4629 .exit = packet_net_exit, 4630 }; 4631 4632 4633 static void __exit packet_exit(void) 4634 { 4635 unregister_netdevice_notifier(&packet_netdev_notifier); 4636 unregister_pernet_subsys(&packet_net_ops); 4637 sock_unregister(PF_PACKET); 4638 proto_unregister(&packet_proto); 4639 } 4640 4641 static int __init packet_init(void) 4642 { 4643 int rc; 4644 4645 rc = proto_register(&packet_proto, 0); 4646 if (rc) 4647 goto out; 4648 rc = sock_register(&packet_family_ops); 4649 if (rc) 4650 goto out_proto; 4651 rc = register_pernet_subsys(&packet_net_ops); 4652 if (rc) 4653 goto out_sock; 4654 rc = register_netdevice_notifier(&packet_netdev_notifier); 4655 if (rc) 4656 goto out_pernet; 4657 4658 return 0; 4659 4660 out_pernet: 4661 unregister_pernet_subsys(&packet_net_ops); 4662 out_sock: 4663 sock_unregister(PF_PACKET); 4664 out_proto: 4665 proto_unregister(&packet_proto); 4666 out: 4667 return rc; 4668 } 4669 4670 module_init(packet_init); 4671 module_exit(packet_exit); 4672 MODULE_LICENSE("GPL"); 4673 MODULE_ALIAS_NETPROTO(PF_PACKET); 4674