1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * PACKET - implements raw packet sockets.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 *
13 * Fixes:
14 * Alan Cox : verify_area() now used correctly
15 * Alan Cox : new skbuff lists, look ma no backlogs!
16 * Alan Cox : tidied skbuff lists.
17 * Alan Cox : Now uses generic datagram routines I
18 * added. Also fixed the peek/read crash
19 * from all old Linux datagram code.
20 * Alan Cox : Uses the improved datagram code.
21 * Alan Cox : Added NULL's for socket options.
22 * Alan Cox : Re-commented the code.
23 * Alan Cox : Use new kernel side addressing
24 * Rob Janssen : Correct MTU usage.
25 * Dave Platt : Counter leaks caused by incorrect
26 * interrupt locking and some slightly
27 * dubious gcc output. Can you read
28 * compiler: it said _VOLATILE_
29 * Richard Kooijman : Timestamp fixes.
30 * Alan Cox : New buffers. Use sk->mac.raw.
31 * Alan Cox : sendmsg/recvmsg support.
32 * Alan Cox : Protocol setting support
33 * Alexey Kuznetsov : Untied from IPv4 stack.
34 * Cyrus Durgin : Fixed kerneld for kmod.
35 * Michal Ostrowski : Module initialization cleanup.
36 * Ulises Alonso : Frame number limit removal and
37 * packet_set_ring memory leak.
38 * Eric Biederman : Allow for > 8 byte hardware addresses.
39 * The convention is that longer addresses
40 * will simply extend the hardware address
41 * byte arrays at the end of sockaddr_ll
42 * and packet_mreq.
43 * Johann Baudy : Added TX RING.
44 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * layer.
46 * Copyright (C) 2011, <lokec@ccs.neu.edu>
47 */
48
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51 #include <linux/ethtool.h>
52 #include <linux/filter.h>
53 #include <linux/types.h>
54 #include <linux/mm.h>
55 #include <linux/capability.h>
56 #include <linux/fcntl.h>
57 #include <linux/socket.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/if_packet.h>
62 #include <linux/wireless.h>
63 #include <linux/kernel.h>
64 #include <linux/kmod.h>
65 #include <linux/slab.h>
66 #include <linux/vmalloc.h>
67 #include <net/net_namespace.h>
68 #include <net/ip.h>
69 #include <net/protocol.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <linux/errno.h>
73 #include <linux/timer.h>
74 #include <linux/uaccess.h>
75 #include <asm/ioctls.h>
76 #include <asm/page.h>
77 #include <asm/cacheflush.h>
78 #include <asm/io.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
81 #include <linux/poll.h>
82 #include <linux/module.h>
83 #include <linux/init.h>
84 #include <linux/mutex.h>
85 #include <linux/if_vlan.h>
86 #include <linux/virtio_net.h>
87 #include <linux/errqueue.h>
88 #include <linux/net_tstamp.h>
89 #include <linux/percpu.h>
90 #ifdef CONFIG_INET
91 #include <net/inet_common.h>
92 #endif
93 #include <linux/bpf.h>
94 #include <net/compat.h>
95 #include <linux/netfilter_netdev.h>
96
97 #include "internal.h"
98
99 /*
100 Assumptions:
101 - If the device has no dev->header_ops->create, there is no LL header
102 visible above the device. In this case, its hard_header_len should be 0.
103 The device may prepend its own header internally. In this case, its
104 needed_headroom should be set to the space needed for it to add its
105 internal header.
106 For example, a WiFi driver pretending to be an Ethernet driver should
107 set its hard_header_len to be the Ethernet header length, and set its
108 needed_headroom to be (the real WiFi header length - the fake Ethernet
109 header length).
110 - packet socket receives packets with pulled ll header,
111 so that SOCK_RAW should push it back.
112
113 On receive:
114 -----------
115
116 Incoming, dev_has_header(dev) == true
117 mac_header -> ll header
118 data -> data
119
120 Outgoing, dev_has_header(dev) == true
121 mac_header -> ll header
122 data -> ll header
123
124 Incoming, dev_has_header(dev) == false
125 mac_header -> data
126 However drivers often make it point to the ll header.
127 This is incorrect because the ll header should be invisible to us.
128 data -> data
129
130 Outgoing, dev_has_header(dev) == false
131 mac_header -> data. ll header is invisible to us.
132 data -> data
133
134 Resume
135 If dev_has_header(dev) == false we are unable to restore the ll header,
136 because it is invisible to us.
137
138
139 On transmit:
140 ------------
141
142 dev_has_header(dev) == true
143 mac_header -> ll header
144 data -> ll header
145
146 dev_has_header(dev) == false (ll header is invisible to us)
147 mac_header -> data
148 data -> data
149
150 We should set network_header on output to the correct position,
151 packet classifier depends on it.
152 */
153
154 /* Private packet socket structures. */
155
156 /* identical to struct packet_mreq except it has
157 * a longer address field.
158 */
159 struct packet_mreq_max {
160 int mr_ifindex;
161 unsigned short mr_type;
162 unsigned short mr_alen;
163 unsigned char mr_address[MAX_ADDR_LEN];
164 };
165
166 union tpacket_uhdr {
167 struct tpacket_hdr *h1;
168 struct tpacket2_hdr *h2;
169 struct tpacket3_hdr *h3;
170 void *raw;
171 };
172
173 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
174 int closing, int tx_ring);
175
176 #define V3_ALIGNMENT (8)
177
178 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
179
180 #define BLK_PLUS_PRIV(sz_of_priv) \
181 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
182
183 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
184 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
185 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
186 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
187 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
188 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
189
190 struct packet_sock;
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192 struct packet_type *pt, struct net_device *orig_dev);
193
194 static void *packet_previous_frame(struct packet_sock *po,
195 struct packet_ring_buffer *rb,
196 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200 struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202 struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205 struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
209 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
210 struct tpacket3_hdr *);
211 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
212 struct tpacket3_hdr *);
213 static void packet_flush_mclist(struct sock *sk);
214 static u16 packet_pick_tx_queue(struct sk_buff *skb);
215
216 struct packet_skb_cb {
217 union {
218 struct sockaddr_pkt pkt;
219 union {
220 /* Trick: alias skb original length with
221 * ll.sll_family and ll.protocol in order
222 * to save room.
223 */
224 unsigned int origlen;
225 struct sockaddr_ll ll;
226 };
227 } sa;
228 };
229
230 #define vio_le() virtio_legacy_is_little_endian()
231
232 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
233
234 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid) \
236 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
238 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241 ((x)->kactive_blk_num+1) : 0)
242
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245
246 #ifdef CONFIG_NETFILTER_EGRESS
nf_hook_direct_egress(struct sk_buff * skb)247 static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
248 {
249 struct sk_buff *next, *head = NULL, *tail;
250 int rc;
251
252 rcu_read_lock();
253 for (; skb != NULL; skb = next) {
254 next = skb->next;
255 skb_mark_not_on_list(skb);
256
257 if (!nf_hook_egress(skb, &rc, skb->dev))
258 continue;
259
260 if (!head)
261 head = skb;
262 else
263 tail->next = skb;
264
265 tail = skb;
266 }
267 rcu_read_unlock();
268
269 return head;
270 }
271 #endif
272
packet_xmit(const struct packet_sock * po,struct sk_buff * skb)273 static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
274 {
275 if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS))
276 return dev_queue_xmit(skb);
277
278 #ifdef CONFIG_NETFILTER_EGRESS
279 if (nf_hook_egress_active()) {
280 skb = nf_hook_direct_egress(skb);
281 if (!skb)
282 return NET_XMIT_DROP;
283 }
284 #endif
285 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
286 }
287
packet_cached_dev_get(struct packet_sock * po)288 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
289 {
290 struct net_device *dev;
291
292 rcu_read_lock();
293 dev = rcu_dereference(po->cached_dev);
294 dev_hold(dev);
295 rcu_read_unlock();
296
297 return dev;
298 }
299
packet_cached_dev_assign(struct packet_sock * po,struct net_device * dev)300 static void packet_cached_dev_assign(struct packet_sock *po,
301 struct net_device *dev)
302 {
303 rcu_assign_pointer(po->cached_dev, dev);
304 }
305
packet_cached_dev_reset(struct packet_sock * po)306 static void packet_cached_dev_reset(struct packet_sock *po)
307 {
308 RCU_INIT_POINTER(po->cached_dev, NULL);
309 }
310
packet_pick_tx_queue(struct sk_buff * skb)311 static u16 packet_pick_tx_queue(struct sk_buff *skb)
312 {
313 struct net_device *dev = skb->dev;
314 const struct net_device_ops *ops = dev->netdev_ops;
315 int cpu = raw_smp_processor_id();
316 u16 queue_index;
317
318 #ifdef CONFIG_XPS
319 skb->sender_cpu = cpu + 1;
320 #endif
321 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
322 if (ops->ndo_select_queue) {
323 queue_index = ops->ndo_select_queue(dev, skb, NULL);
324 queue_index = netdev_cap_txqueue(dev, queue_index);
325 } else {
326 queue_index = netdev_pick_tx(dev, skb, NULL);
327 }
328
329 return queue_index;
330 }
331
332 /* __register_prot_hook must be invoked through register_prot_hook
333 * or from a context in which asynchronous accesses to the packet
334 * socket is not possible (packet_create()).
335 */
__register_prot_hook(struct sock * sk)336 static void __register_prot_hook(struct sock *sk)
337 {
338 struct packet_sock *po = pkt_sk(sk);
339
340 if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
341 if (po->fanout)
342 __fanout_link(sk, po);
343 else
344 dev_add_pack(&po->prot_hook);
345
346 sock_hold(sk);
347 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1);
348 }
349 }
350
register_prot_hook(struct sock * sk)351 static void register_prot_hook(struct sock *sk)
352 {
353 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
354 __register_prot_hook(sk);
355 }
356
357 /* If the sync parameter is true, we will temporarily drop
358 * the po->bind_lock and do a synchronize_net to make sure no
359 * asynchronous packet processing paths still refer to the elements
360 * of po->prot_hook. If the sync parameter is false, it is the
361 * callers responsibility to take care of this.
362 */
__unregister_prot_hook(struct sock * sk,bool sync)363 static void __unregister_prot_hook(struct sock *sk, bool sync)
364 {
365 struct packet_sock *po = pkt_sk(sk);
366
367 lockdep_assert_held_once(&po->bind_lock);
368
369 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0);
370
371 if (po->fanout)
372 __fanout_unlink(sk, po);
373 else
374 __dev_remove_pack(&po->prot_hook);
375
376 __sock_put(sk);
377
378 if (sync) {
379 spin_unlock(&po->bind_lock);
380 synchronize_net();
381 spin_lock(&po->bind_lock);
382 }
383 }
384
unregister_prot_hook(struct sock * sk,bool sync)385 static void unregister_prot_hook(struct sock *sk, bool sync)
386 {
387 struct packet_sock *po = pkt_sk(sk);
388
389 if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
390 __unregister_prot_hook(sk, sync);
391 }
392
pgv_to_page(void * addr)393 static inline struct page * __pure pgv_to_page(void *addr)
394 {
395 if (is_vmalloc_addr(addr))
396 return vmalloc_to_page(addr);
397 return virt_to_page(addr);
398 }
399
__packet_set_status(struct packet_sock * po,void * frame,int status)400 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
401 {
402 union tpacket_uhdr h;
403
404 /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
405
406 h.raw = frame;
407 switch (po->tp_version) {
408 case TPACKET_V1:
409 WRITE_ONCE(h.h1->tp_status, status);
410 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
411 break;
412 case TPACKET_V2:
413 WRITE_ONCE(h.h2->tp_status, status);
414 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
415 break;
416 case TPACKET_V3:
417 WRITE_ONCE(h.h3->tp_status, status);
418 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
419 break;
420 default:
421 WARN(1, "TPACKET version not supported.\n");
422 BUG();
423 }
424
425 smp_wmb();
426 }
427
__packet_get_status(const struct packet_sock * po,void * frame)428 static int __packet_get_status(const struct packet_sock *po, void *frame)
429 {
430 union tpacket_uhdr h;
431
432 smp_rmb();
433
434 /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
435
436 h.raw = frame;
437 switch (po->tp_version) {
438 case TPACKET_V1:
439 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
440 return READ_ONCE(h.h1->tp_status);
441 case TPACKET_V2:
442 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
443 return READ_ONCE(h.h2->tp_status);
444 case TPACKET_V3:
445 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
446 return READ_ONCE(h.h3->tp_status);
447 default:
448 WARN(1, "TPACKET version not supported.\n");
449 BUG();
450 return 0;
451 }
452 }
453
tpacket_get_timestamp(struct sk_buff * skb,struct timespec64 * ts,unsigned int flags)454 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
455 unsigned int flags)
456 {
457 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
458
459 if (shhwtstamps &&
460 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
461 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
462 return TP_STATUS_TS_RAW_HARDWARE;
463
464 if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
465 ktime_to_timespec64_cond(skb_tstamp(skb), ts))
466 return TP_STATUS_TS_SOFTWARE;
467
468 return 0;
469 }
470
__packet_set_timestamp(struct packet_sock * po,void * frame,struct sk_buff * skb)471 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
472 struct sk_buff *skb)
473 {
474 union tpacket_uhdr h;
475 struct timespec64 ts;
476 __u32 ts_status;
477
478 if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
479 return 0;
480
481 h.raw = frame;
482 /*
483 * versions 1 through 3 overflow the timestamps in y2106, since they
484 * all store the seconds in a 32-bit unsigned integer.
485 * If we create a version 4, that should have a 64-bit timestamp,
486 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
487 * nanoseconds.
488 */
489 switch (po->tp_version) {
490 case TPACKET_V1:
491 h.h1->tp_sec = ts.tv_sec;
492 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
493 break;
494 case TPACKET_V2:
495 h.h2->tp_sec = ts.tv_sec;
496 h.h2->tp_nsec = ts.tv_nsec;
497 break;
498 case TPACKET_V3:
499 h.h3->tp_sec = ts.tv_sec;
500 h.h3->tp_nsec = ts.tv_nsec;
501 break;
502 default:
503 WARN(1, "TPACKET version not supported.\n");
504 BUG();
505 }
506
507 /* one flush is safe, as both fields always lie on the same cacheline */
508 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
509 smp_wmb();
510
511 return ts_status;
512 }
513
packet_lookup_frame(const struct packet_sock * po,const struct packet_ring_buffer * rb,unsigned int position,int status)514 static void *packet_lookup_frame(const struct packet_sock *po,
515 const struct packet_ring_buffer *rb,
516 unsigned int position,
517 int status)
518 {
519 unsigned int pg_vec_pos, frame_offset;
520 union tpacket_uhdr h;
521
522 pg_vec_pos = position / rb->frames_per_block;
523 frame_offset = position % rb->frames_per_block;
524
525 h.raw = rb->pg_vec[pg_vec_pos].buffer +
526 (frame_offset * rb->frame_size);
527
528 if (status != __packet_get_status(po, h.raw))
529 return NULL;
530
531 return h.raw;
532 }
533
packet_current_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)534 static void *packet_current_frame(struct packet_sock *po,
535 struct packet_ring_buffer *rb,
536 int status)
537 {
538 return packet_lookup_frame(po, rb, rb->head, status);
539 }
540
vlan_get_tci(const struct sk_buff * skb,struct net_device * dev)541 static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
542 {
543 struct vlan_hdr vhdr, *vh;
544 unsigned int header_len;
545
546 if (!dev)
547 return 0;
548
549 /* In the SOCK_DGRAM scenario, skb data starts at the network
550 * protocol, which is after the VLAN headers. The outer VLAN
551 * header is at the hard_header_len offset in non-variable
552 * length link layer headers. If it's a VLAN device, the
553 * min_header_len should be used to exclude the VLAN header
554 * size.
555 */
556 if (dev->min_header_len == dev->hard_header_len)
557 header_len = dev->hard_header_len;
558 else if (is_vlan_dev(dev))
559 header_len = dev->min_header_len;
560 else
561 return 0;
562
563 vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
564 sizeof(vhdr), &vhdr);
565 if (unlikely(!vh))
566 return 0;
567
568 return ntohs(vh->h_vlan_TCI);
569 }
570
vlan_get_protocol_dgram(const struct sk_buff * skb)571 static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
572 {
573 __be16 proto = skb->protocol;
574
575 if (unlikely(eth_type_vlan(proto)))
576 proto = __vlan_get_protocol_offset(skb, proto,
577 skb_mac_offset(skb), NULL);
578
579 return proto;
580 }
581
prb_del_retire_blk_timer(struct tpacket_kbdq_core * pkc)582 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
583 {
584 del_timer_sync(&pkc->retire_blk_timer);
585 }
586
prb_shutdown_retire_blk_timer(struct packet_sock * po,struct sk_buff_head * rb_queue)587 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
588 struct sk_buff_head *rb_queue)
589 {
590 struct tpacket_kbdq_core *pkc;
591
592 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
593
594 spin_lock_bh(&rb_queue->lock);
595 pkc->delete_blk_timer = 1;
596 spin_unlock_bh(&rb_queue->lock);
597
598 prb_del_retire_blk_timer(pkc);
599 }
600
prb_setup_retire_blk_timer(struct packet_sock * po)601 static void prb_setup_retire_blk_timer(struct packet_sock *po)
602 {
603 struct tpacket_kbdq_core *pkc;
604
605 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
606 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
607 0);
608 pkc->retire_blk_timer.expires = jiffies;
609 }
610
prb_calc_retire_blk_tmo(struct packet_sock * po,int blk_size_in_bytes)611 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
612 int blk_size_in_bytes)
613 {
614 struct net_device *dev;
615 unsigned int mbits, div;
616 struct ethtool_link_ksettings ecmd;
617 int err;
618
619 rtnl_lock();
620 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
621 if (unlikely(!dev)) {
622 rtnl_unlock();
623 return DEFAULT_PRB_RETIRE_TOV;
624 }
625 err = __ethtool_get_link_ksettings(dev, &ecmd);
626 rtnl_unlock();
627 if (err)
628 return DEFAULT_PRB_RETIRE_TOV;
629
630 /* If the link speed is so slow you don't really
631 * need to worry about perf anyways
632 */
633 if (ecmd.base.speed < SPEED_1000 ||
634 ecmd.base.speed == SPEED_UNKNOWN)
635 return DEFAULT_PRB_RETIRE_TOV;
636
637 div = ecmd.base.speed / 1000;
638 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
639
640 if (div)
641 mbits /= div;
642
643 if (div)
644 return mbits + 1;
645 return mbits;
646 }
647
prb_init_ft_ops(struct tpacket_kbdq_core * p1,union tpacket_req_u * req_u)648 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
649 union tpacket_req_u *req_u)
650 {
651 p1->feature_req_word = req_u->req3.tp_feature_req_word;
652 }
653
init_prb_bdqc(struct packet_sock * po,struct packet_ring_buffer * rb,struct pgv * pg_vec,union tpacket_req_u * req_u)654 static void init_prb_bdqc(struct packet_sock *po,
655 struct packet_ring_buffer *rb,
656 struct pgv *pg_vec,
657 union tpacket_req_u *req_u)
658 {
659 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
660 struct tpacket_block_desc *pbd;
661
662 memset(p1, 0x0, sizeof(*p1));
663
664 p1->knxt_seq_num = 1;
665 p1->pkbdq = pg_vec;
666 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
667 p1->pkblk_start = pg_vec[0].buffer;
668 p1->kblk_size = req_u->req3.tp_block_size;
669 p1->knum_blocks = req_u->req3.tp_block_nr;
670 p1->hdrlen = po->tp_hdrlen;
671 p1->version = po->tp_version;
672 p1->last_kactive_blk_num = 0;
673 po->stats.stats3.tp_freeze_q_cnt = 0;
674 if (req_u->req3.tp_retire_blk_tov)
675 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
676 else
677 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
678 req_u->req3.tp_block_size);
679 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
680 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
681 rwlock_init(&p1->blk_fill_in_prog_lock);
682
683 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
684 prb_init_ft_ops(p1, req_u);
685 prb_setup_retire_blk_timer(po);
686 prb_open_block(p1, pbd);
687 }
688
689 /* Do NOT update the last_blk_num first.
690 * Assumes sk_buff_head lock is held.
691 */
_prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core * pkc)692 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
693 {
694 mod_timer(&pkc->retire_blk_timer,
695 jiffies + pkc->tov_in_jiffies);
696 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
697 }
698
699 /*
700 * Timer logic:
701 * 1) We refresh the timer only when we open a block.
702 * By doing this we don't waste cycles refreshing the timer
703 * on packet-by-packet basis.
704 *
705 * With a 1MB block-size, on a 1Gbps line, it will take
706 * i) ~8 ms to fill a block + ii) memcpy etc.
707 * In this cut we are not accounting for the memcpy time.
708 *
709 * So, if the user sets the 'tmo' to 10ms then the timer
710 * will never fire while the block is still getting filled
711 * (which is what we want). However, the user could choose
712 * to close a block early and that's fine.
713 *
714 * But when the timer does fire, we check whether or not to refresh it.
715 * Since the tmo granularity is in msecs, it is not too expensive
716 * to refresh the timer, lets say every '8' msecs.
717 * Either the user can set the 'tmo' or we can derive it based on
718 * a) line-speed and b) block-size.
719 * prb_calc_retire_blk_tmo() calculates the tmo.
720 *
721 */
prb_retire_rx_blk_timer_expired(struct timer_list * t)722 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
723 {
724 struct packet_sock *po =
725 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
726 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
727 unsigned int frozen;
728 struct tpacket_block_desc *pbd;
729
730 spin_lock(&po->sk.sk_receive_queue.lock);
731
732 frozen = prb_queue_frozen(pkc);
733 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
734
735 if (unlikely(pkc->delete_blk_timer))
736 goto out;
737
738 /* We only need to plug the race when the block is partially filled.
739 * tpacket_rcv:
740 * lock(); increment BLOCK_NUM_PKTS; unlock()
741 * copy_bits() is in progress ...
742 * timer fires on other cpu:
743 * we can't retire the current block because copy_bits
744 * is in progress.
745 *
746 */
747 if (BLOCK_NUM_PKTS(pbd)) {
748 /* Waiting for skb_copy_bits to finish... */
749 write_lock(&pkc->blk_fill_in_prog_lock);
750 write_unlock(&pkc->blk_fill_in_prog_lock);
751 }
752
753 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
754 if (!frozen) {
755 if (!BLOCK_NUM_PKTS(pbd)) {
756 /* An empty block. Just refresh the timer. */
757 goto refresh_timer;
758 }
759 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
760 if (!prb_dispatch_next_block(pkc, po))
761 goto refresh_timer;
762 else
763 goto out;
764 } else {
765 /* Case 1. Queue was frozen because user-space was
766 * lagging behind.
767 */
768 if (prb_curr_blk_in_use(pbd)) {
769 /*
770 * Ok, user-space is still behind.
771 * So just refresh the timer.
772 */
773 goto refresh_timer;
774 } else {
775 /* Case 2. queue was frozen,user-space caught up,
776 * now the link went idle && the timer fired.
777 * We don't have a block to close.So we open this
778 * block and restart the timer.
779 * opening a block thaws the queue,restarts timer
780 * Thawing/timer-refresh is a side effect.
781 */
782 prb_open_block(pkc, pbd);
783 goto out;
784 }
785 }
786 }
787
788 refresh_timer:
789 _prb_refresh_rx_retire_blk_timer(pkc);
790
791 out:
792 spin_unlock(&po->sk.sk_receive_queue.lock);
793 }
794
prb_flush_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,__u32 status)795 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
796 struct tpacket_block_desc *pbd1, __u32 status)
797 {
798 /* Flush everything minus the block header */
799
800 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
801 u8 *start, *end;
802
803 start = (u8 *)pbd1;
804
805 /* Skip the block header(we know header WILL fit in 4K) */
806 start += PAGE_SIZE;
807
808 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
809 for (; start < end; start += PAGE_SIZE)
810 flush_dcache_page(pgv_to_page(start));
811
812 smp_wmb();
813 #endif
814
815 /* Now update the block status. */
816
817 BLOCK_STATUS(pbd1) = status;
818
819 /* Flush the block header */
820
821 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
822 start = (u8 *)pbd1;
823 flush_dcache_page(pgv_to_page(start));
824
825 smp_wmb();
826 #endif
827 }
828
829 /*
830 * Side effect:
831 *
832 * 1) flush the block
833 * 2) Increment active_blk_num
834 *
835 * Note:We DONT refresh the timer on purpose.
836 * Because almost always the next block will be opened.
837 */
prb_close_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,struct packet_sock * po,unsigned int stat)838 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
839 struct tpacket_block_desc *pbd1,
840 struct packet_sock *po, unsigned int stat)
841 {
842 __u32 status = TP_STATUS_USER | stat;
843
844 struct tpacket3_hdr *last_pkt;
845 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
846 struct sock *sk = &po->sk;
847
848 if (atomic_read(&po->tp_drops))
849 status |= TP_STATUS_LOSING;
850
851 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
852 last_pkt->tp_next_offset = 0;
853
854 /* Get the ts of the last pkt */
855 if (BLOCK_NUM_PKTS(pbd1)) {
856 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
857 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
858 } else {
859 /* Ok, we tmo'd - so get the current time.
860 *
861 * It shouldn't really happen as we don't close empty
862 * blocks. See prb_retire_rx_blk_timer_expired().
863 */
864 struct timespec64 ts;
865 ktime_get_real_ts64(&ts);
866 h1->ts_last_pkt.ts_sec = ts.tv_sec;
867 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
868 }
869
870 smp_wmb();
871
872 /* Flush the block */
873 prb_flush_block(pkc1, pbd1, status);
874
875 sk->sk_data_ready(sk);
876
877 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
878 }
879
prb_thaw_queue(struct tpacket_kbdq_core * pkc)880 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
881 {
882 pkc->reset_pending_on_curr_blk = 0;
883 }
884
885 /*
886 * Side effect of opening a block:
887 *
888 * 1) prb_queue is thawed.
889 * 2) retire_blk_timer is refreshed.
890 *
891 */
prb_open_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1)892 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
893 struct tpacket_block_desc *pbd1)
894 {
895 struct timespec64 ts;
896 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
897
898 smp_rmb();
899
900 /* We could have just memset this but we will lose the
901 * flexibility of making the priv area sticky
902 */
903
904 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
905 BLOCK_NUM_PKTS(pbd1) = 0;
906 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
907
908 ktime_get_real_ts64(&ts);
909
910 h1->ts_first_pkt.ts_sec = ts.tv_sec;
911 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
912
913 pkc1->pkblk_start = (char *)pbd1;
914 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
915
916 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
917 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
918
919 pbd1->version = pkc1->version;
920 pkc1->prev = pkc1->nxt_offset;
921 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
922
923 prb_thaw_queue(pkc1);
924 _prb_refresh_rx_retire_blk_timer(pkc1);
925
926 smp_wmb();
927 }
928
929 /*
930 * Queue freeze logic:
931 * 1) Assume tp_block_nr = 8 blocks.
932 * 2) At time 't0', user opens Rx ring.
933 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
934 * 4) user-space is either sleeping or processing block '0'.
935 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
936 * it will close block-7,loop around and try to fill block '0'.
937 * call-flow:
938 * __packet_lookup_frame_in_block
939 * prb_retire_current_block()
940 * prb_dispatch_next_block()
941 * |->(BLOCK_STATUS == USER) evaluates to true
942 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
943 * 6) Now there are two cases:
944 * 6.1) Link goes idle right after the queue is frozen.
945 * But remember, the last open_block() refreshed the timer.
946 * When this timer expires,it will refresh itself so that we can
947 * re-open block-0 in near future.
948 * 6.2) Link is busy and keeps on receiving packets. This is a simple
949 * case and __packet_lookup_frame_in_block will check if block-0
950 * is free and can now be re-used.
951 */
prb_freeze_queue(struct tpacket_kbdq_core * pkc,struct packet_sock * po)952 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
953 struct packet_sock *po)
954 {
955 pkc->reset_pending_on_curr_blk = 1;
956 po->stats.stats3.tp_freeze_q_cnt++;
957 }
958
959 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
960
961 /*
962 * If the next block is free then we will dispatch it
963 * and return a good offset.
964 * Else, we will freeze the queue.
965 * So, caller must check the return value.
966 */
prb_dispatch_next_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po)967 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
968 struct packet_sock *po)
969 {
970 struct tpacket_block_desc *pbd;
971
972 smp_rmb();
973
974 /* 1. Get current block num */
975 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
976
977 /* 2. If this block is currently in_use then freeze the queue */
978 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
979 prb_freeze_queue(pkc, po);
980 return NULL;
981 }
982
983 /*
984 * 3.
985 * open this block and return the offset where the first packet
986 * needs to get stored.
987 */
988 prb_open_block(pkc, pbd);
989 return (void *)pkc->nxt_offset;
990 }
991
prb_retire_current_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po,unsigned int status)992 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
993 struct packet_sock *po, unsigned int status)
994 {
995 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
996
997 /* retire/close the current block */
998 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
999 /*
1000 * Plug the case where copy_bits() is in progress on
1001 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
1002 * have space to copy the pkt in the current block and
1003 * called prb_retire_current_block()
1004 *
1005 * We don't need to worry about the TMO case because
1006 * the timer-handler already handled this case.
1007 */
1008 if (!(status & TP_STATUS_BLK_TMO)) {
1009 /* Waiting for skb_copy_bits to finish... */
1010 write_lock(&pkc->blk_fill_in_prog_lock);
1011 write_unlock(&pkc->blk_fill_in_prog_lock);
1012 }
1013 prb_close_block(pkc, pbd, po, status);
1014 return;
1015 }
1016 }
1017
prb_curr_blk_in_use(struct tpacket_block_desc * pbd)1018 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
1019 {
1020 return TP_STATUS_USER & BLOCK_STATUS(pbd);
1021 }
1022
prb_queue_frozen(struct tpacket_kbdq_core * pkc)1023 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
1024 {
1025 return pkc->reset_pending_on_curr_blk;
1026 }
1027
prb_clear_blk_fill_status(struct packet_ring_buffer * rb)1028 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
1029 __releases(&pkc->blk_fill_in_prog_lock)
1030 {
1031 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1032
1033 read_unlock(&pkc->blk_fill_in_prog_lock);
1034 }
1035
prb_fill_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1036 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
1037 struct tpacket3_hdr *ppd)
1038 {
1039 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
1040 }
1041
prb_clear_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1042 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
1043 struct tpacket3_hdr *ppd)
1044 {
1045 ppd->hv1.tp_rxhash = 0;
1046 }
1047
prb_fill_vlan_info(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1048 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1049 struct tpacket3_hdr *ppd)
1050 {
1051 struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc);
1052
1053 if (skb_vlan_tag_present(pkc->skb)) {
1054 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1055 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1056 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1057 } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) {
1058 ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev);
1059 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol);
1060 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1061 } else {
1062 ppd->hv1.tp_vlan_tci = 0;
1063 ppd->hv1.tp_vlan_tpid = 0;
1064 ppd->tp_status = TP_STATUS_AVAILABLE;
1065 }
1066 }
1067
prb_run_all_ft_ops(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1068 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1069 struct tpacket3_hdr *ppd)
1070 {
1071 ppd->hv1.tp_padding = 0;
1072 prb_fill_vlan_info(pkc, ppd);
1073
1074 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1075 prb_fill_rxhash(pkc, ppd);
1076 else
1077 prb_clear_rxhash(pkc, ppd);
1078 }
1079
prb_fill_curr_block(char * curr,struct tpacket_kbdq_core * pkc,struct tpacket_block_desc * pbd,unsigned int len)1080 static void prb_fill_curr_block(char *curr,
1081 struct tpacket_kbdq_core *pkc,
1082 struct tpacket_block_desc *pbd,
1083 unsigned int len)
1084 __acquires(&pkc->blk_fill_in_prog_lock)
1085 {
1086 struct tpacket3_hdr *ppd;
1087
1088 ppd = (struct tpacket3_hdr *)curr;
1089 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1090 pkc->prev = curr;
1091 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1092 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1093 BLOCK_NUM_PKTS(pbd) += 1;
1094 read_lock(&pkc->blk_fill_in_prog_lock);
1095 prb_run_all_ft_ops(pkc, ppd);
1096 }
1097
1098 /* Assumes caller has the sk->rx_queue.lock */
__packet_lookup_frame_in_block(struct packet_sock * po,struct sk_buff * skb,unsigned int len)1099 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1100 struct sk_buff *skb,
1101 unsigned int len
1102 )
1103 {
1104 struct tpacket_kbdq_core *pkc;
1105 struct tpacket_block_desc *pbd;
1106 char *curr, *end;
1107
1108 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1109 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1110
1111 /* Queue is frozen when user space is lagging behind */
1112 if (prb_queue_frozen(pkc)) {
1113 /*
1114 * Check if that last block which caused the queue to freeze,
1115 * is still in_use by user-space.
1116 */
1117 if (prb_curr_blk_in_use(pbd)) {
1118 /* Can't record this packet */
1119 return NULL;
1120 } else {
1121 /*
1122 * Ok, the block was released by user-space.
1123 * Now let's open that block.
1124 * opening a block also thaws the queue.
1125 * Thawing is a side effect.
1126 */
1127 prb_open_block(pkc, pbd);
1128 }
1129 }
1130
1131 smp_mb();
1132 curr = pkc->nxt_offset;
1133 pkc->skb = skb;
1134 end = (char *)pbd + pkc->kblk_size;
1135
1136 /* first try the current block */
1137 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1138 prb_fill_curr_block(curr, pkc, pbd, len);
1139 return (void *)curr;
1140 }
1141
1142 /* Ok, close the current block */
1143 prb_retire_current_block(pkc, po, 0);
1144
1145 /* Now, try to dispatch the next block */
1146 curr = (char *)prb_dispatch_next_block(pkc, po);
1147 if (curr) {
1148 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1149 prb_fill_curr_block(curr, pkc, pbd, len);
1150 return (void *)curr;
1151 }
1152
1153 /*
1154 * No free blocks are available.user_space hasn't caught up yet.
1155 * Queue was just frozen and now this packet will get dropped.
1156 */
1157 return NULL;
1158 }
1159
packet_current_rx_frame(struct packet_sock * po,struct sk_buff * skb,int status,unsigned int len)1160 static void *packet_current_rx_frame(struct packet_sock *po,
1161 struct sk_buff *skb,
1162 int status, unsigned int len)
1163 {
1164 char *curr = NULL;
1165 switch (po->tp_version) {
1166 case TPACKET_V1:
1167 case TPACKET_V2:
1168 curr = packet_lookup_frame(po, &po->rx_ring,
1169 po->rx_ring.head, status);
1170 return curr;
1171 case TPACKET_V3:
1172 return __packet_lookup_frame_in_block(po, skb, len);
1173 default:
1174 WARN(1, "TPACKET version not supported\n");
1175 BUG();
1176 return NULL;
1177 }
1178 }
1179
prb_lookup_block(const struct packet_sock * po,const struct packet_ring_buffer * rb,unsigned int idx,int status)1180 static void *prb_lookup_block(const struct packet_sock *po,
1181 const struct packet_ring_buffer *rb,
1182 unsigned int idx,
1183 int status)
1184 {
1185 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1186 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1187
1188 if (status != BLOCK_STATUS(pbd))
1189 return NULL;
1190 return pbd;
1191 }
1192
prb_previous_blk_num(struct packet_ring_buffer * rb)1193 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1194 {
1195 unsigned int prev;
1196 if (rb->prb_bdqc.kactive_blk_num)
1197 prev = rb->prb_bdqc.kactive_blk_num-1;
1198 else
1199 prev = rb->prb_bdqc.knum_blocks-1;
1200 return prev;
1201 }
1202
1203 /* Assumes caller has held the rx_queue.lock */
__prb_previous_block(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1204 static void *__prb_previous_block(struct packet_sock *po,
1205 struct packet_ring_buffer *rb,
1206 int status)
1207 {
1208 unsigned int previous = prb_previous_blk_num(rb);
1209 return prb_lookup_block(po, rb, previous, status);
1210 }
1211
packet_previous_rx_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1212 static void *packet_previous_rx_frame(struct packet_sock *po,
1213 struct packet_ring_buffer *rb,
1214 int status)
1215 {
1216 if (po->tp_version <= TPACKET_V2)
1217 return packet_previous_frame(po, rb, status);
1218
1219 return __prb_previous_block(po, rb, status);
1220 }
1221
packet_increment_rx_head(struct packet_sock * po,struct packet_ring_buffer * rb)1222 static void packet_increment_rx_head(struct packet_sock *po,
1223 struct packet_ring_buffer *rb)
1224 {
1225 switch (po->tp_version) {
1226 case TPACKET_V1:
1227 case TPACKET_V2:
1228 return packet_increment_head(rb);
1229 case TPACKET_V3:
1230 default:
1231 WARN(1, "TPACKET version not supported.\n");
1232 BUG();
1233 return;
1234 }
1235 }
1236
packet_previous_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1237 static void *packet_previous_frame(struct packet_sock *po,
1238 struct packet_ring_buffer *rb,
1239 int status)
1240 {
1241 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1242 return packet_lookup_frame(po, rb, previous, status);
1243 }
1244
packet_increment_head(struct packet_ring_buffer * buff)1245 static void packet_increment_head(struct packet_ring_buffer *buff)
1246 {
1247 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1248 }
1249
packet_inc_pending(struct packet_ring_buffer * rb)1250 static void packet_inc_pending(struct packet_ring_buffer *rb)
1251 {
1252 this_cpu_inc(*rb->pending_refcnt);
1253 }
1254
packet_dec_pending(struct packet_ring_buffer * rb)1255 static void packet_dec_pending(struct packet_ring_buffer *rb)
1256 {
1257 this_cpu_dec(*rb->pending_refcnt);
1258 }
1259
packet_read_pending(const struct packet_ring_buffer * rb)1260 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1261 {
1262 unsigned int refcnt = 0;
1263 int cpu;
1264
1265 /* We don't use pending refcount in rx_ring. */
1266 if (rb->pending_refcnt == NULL)
1267 return 0;
1268
1269 for_each_possible_cpu(cpu)
1270 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1271
1272 return refcnt;
1273 }
1274
packet_alloc_pending(struct packet_sock * po)1275 static int packet_alloc_pending(struct packet_sock *po)
1276 {
1277 po->rx_ring.pending_refcnt = NULL;
1278
1279 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1280 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1281 return -ENOBUFS;
1282
1283 return 0;
1284 }
1285
packet_free_pending(struct packet_sock * po)1286 static void packet_free_pending(struct packet_sock *po)
1287 {
1288 free_percpu(po->tx_ring.pending_refcnt);
1289 }
1290
1291 #define ROOM_POW_OFF 2
1292 #define ROOM_NONE 0x0
1293 #define ROOM_LOW 0x1
1294 #define ROOM_NORMAL 0x2
1295
__tpacket_has_room(const struct packet_sock * po,int pow_off)1296 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1297 {
1298 int idx, len;
1299
1300 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1301 idx = READ_ONCE(po->rx_ring.head);
1302 if (pow_off)
1303 idx += len >> pow_off;
1304 if (idx >= len)
1305 idx -= len;
1306 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1307 }
1308
__tpacket_v3_has_room(const struct packet_sock * po,int pow_off)1309 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1310 {
1311 int idx, len;
1312
1313 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1314 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1315 if (pow_off)
1316 idx += len >> pow_off;
1317 if (idx >= len)
1318 idx -= len;
1319 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1320 }
1321
__packet_rcv_has_room(const struct packet_sock * po,const struct sk_buff * skb)1322 static int __packet_rcv_has_room(const struct packet_sock *po,
1323 const struct sk_buff *skb)
1324 {
1325 const struct sock *sk = &po->sk;
1326 int ret = ROOM_NONE;
1327
1328 if (po->prot_hook.func != tpacket_rcv) {
1329 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1330 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1331 - (skb ? skb->truesize : 0);
1332
1333 if (avail > (rcvbuf >> ROOM_POW_OFF))
1334 return ROOM_NORMAL;
1335 else if (avail > 0)
1336 return ROOM_LOW;
1337 else
1338 return ROOM_NONE;
1339 }
1340
1341 if (po->tp_version == TPACKET_V3) {
1342 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1343 ret = ROOM_NORMAL;
1344 else if (__tpacket_v3_has_room(po, 0))
1345 ret = ROOM_LOW;
1346 } else {
1347 if (__tpacket_has_room(po, ROOM_POW_OFF))
1348 ret = ROOM_NORMAL;
1349 else if (__tpacket_has_room(po, 0))
1350 ret = ROOM_LOW;
1351 }
1352
1353 return ret;
1354 }
1355
packet_rcv_has_room(struct packet_sock * po,struct sk_buff * skb)1356 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1357 {
1358 bool pressure;
1359 int ret;
1360
1361 ret = __packet_rcv_has_room(po, skb);
1362 pressure = ret != ROOM_NORMAL;
1363
1364 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure)
1365 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure);
1366
1367 return ret;
1368 }
1369
packet_rcv_try_clear_pressure(struct packet_sock * po)1370 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1371 {
1372 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) &&
1373 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1374 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false);
1375 }
1376
packet_sock_destruct(struct sock * sk)1377 static void packet_sock_destruct(struct sock *sk)
1378 {
1379 skb_queue_purge(&sk->sk_error_queue);
1380
1381 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1382 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1383
1384 if (!sock_flag(sk, SOCK_DEAD)) {
1385 pr_err("Attempt to release alive packet socket: %p\n", sk);
1386 return;
1387 }
1388 }
1389
fanout_flow_is_huge(struct packet_sock * po,struct sk_buff * skb)1390 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1391 {
1392 u32 *history = po->rollover->history;
1393 u32 victim, rxhash;
1394 int i, count = 0;
1395
1396 rxhash = skb_get_hash(skb);
1397 for (i = 0; i < ROLLOVER_HLEN; i++)
1398 if (READ_ONCE(history[i]) == rxhash)
1399 count++;
1400
1401 victim = get_random_u32_below(ROLLOVER_HLEN);
1402
1403 /* Avoid dirtying the cache line if possible */
1404 if (READ_ONCE(history[victim]) != rxhash)
1405 WRITE_ONCE(history[victim], rxhash);
1406
1407 return count > (ROLLOVER_HLEN >> 1);
1408 }
1409
fanout_demux_hash(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1410 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1411 struct sk_buff *skb,
1412 unsigned int num)
1413 {
1414 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1415 }
1416
fanout_demux_lb(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1417 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1418 struct sk_buff *skb,
1419 unsigned int num)
1420 {
1421 unsigned int val = atomic_inc_return(&f->rr_cur);
1422
1423 return val % num;
1424 }
1425
fanout_demux_cpu(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1426 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1427 struct sk_buff *skb,
1428 unsigned int num)
1429 {
1430 return smp_processor_id() % num;
1431 }
1432
fanout_demux_rnd(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1433 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1434 struct sk_buff *skb,
1435 unsigned int num)
1436 {
1437 return get_random_u32_below(num);
1438 }
1439
fanout_demux_rollover(struct packet_fanout * f,struct sk_buff * skb,unsigned int idx,bool try_self,unsigned int num)1440 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1441 struct sk_buff *skb,
1442 unsigned int idx, bool try_self,
1443 unsigned int num)
1444 {
1445 struct packet_sock *po, *po_next, *po_skip = NULL;
1446 unsigned int i, j, room = ROOM_NONE;
1447
1448 po = pkt_sk(rcu_dereference(f->arr[idx]));
1449
1450 if (try_self) {
1451 room = packet_rcv_has_room(po, skb);
1452 if (room == ROOM_NORMAL ||
1453 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1454 return idx;
1455 po_skip = po;
1456 }
1457
1458 i = j = min_t(int, po->rollover->sock, num - 1);
1459 do {
1460 po_next = pkt_sk(rcu_dereference(f->arr[i]));
1461 if (po_next != po_skip &&
1462 !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) &&
1463 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1464 if (i != j)
1465 po->rollover->sock = i;
1466 atomic_long_inc(&po->rollover->num);
1467 if (room == ROOM_LOW)
1468 atomic_long_inc(&po->rollover->num_huge);
1469 return i;
1470 }
1471
1472 if (++i == num)
1473 i = 0;
1474 } while (i != j);
1475
1476 atomic_long_inc(&po->rollover->num_failed);
1477 return idx;
1478 }
1479
fanout_demux_qm(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1480 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1481 struct sk_buff *skb,
1482 unsigned int num)
1483 {
1484 return skb_get_queue_mapping(skb) % num;
1485 }
1486
fanout_demux_bpf(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1487 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1488 struct sk_buff *skb,
1489 unsigned int num)
1490 {
1491 struct bpf_prog *prog;
1492 unsigned int ret = 0;
1493
1494 rcu_read_lock();
1495 prog = rcu_dereference(f->bpf_prog);
1496 if (prog)
1497 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1498 rcu_read_unlock();
1499
1500 return ret;
1501 }
1502
fanout_has_flag(struct packet_fanout * f,u16 flag)1503 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1504 {
1505 return f->flags & (flag >> 8);
1506 }
1507
packet_rcv_fanout(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1508 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1509 struct packet_type *pt, struct net_device *orig_dev)
1510 {
1511 struct packet_fanout *f = pt->af_packet_priv;
1512 unsigned int num = READ_ONCE(f->num_members);
1513 struct net *net = read_pnet(&f->net);
1514 struct packet_sock *po;
1515 unsigned int idx;
1516
1517 if (!net_eq(dev_net(dev), net) || !num) {
1518 kfree_skb(skb);
1519 return 0;
1520 }
1521
1522 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1523 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1524 if (!skb)
1525 return 0;
1526 }
1527 switch (f->type) {
1528 case PACKET_FANOUT_HASH:
1529 default:
1530 idx = fanout_demux_hash(f, skb, num);
1531 break;
1532 case PACKET_FANOUT_LB:
1533 idx = fanout_demux_lb(f, skb, num);
1534 break;
1535 case PACKET_FANOUT_CPU:
1536 idx = fanout_demux_cpu(f, skb, num);
1537 break;
1538 case PACKET_FANOUT_RND:
1539 idx = fanout_demux_rnd(f, skb, num);
1540 break;
1541 case PACKET_FANOUT_QM:
1542 idx = fanout_demux_qm(f, skb, num);
1543 break;
1544 case PACKET_FANOUT_ROLLOVER:
1545 idx = fanout_demux_rollover(f, skb, 0, false, num);
1546 break;
1547 case PACKET_FANOUT_CBPF:
1548 case PACKET_FANOUT_EBPF:
1549 idx = fanout_demux_bpf(f, skb, num);
1550 break;
1551 }
1552
1553 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1554 idx = fanout_demux_rollover(f, skb, idx, true, num);
1555
1556 po = pkt_sk(rcu_dereference(f->arr[idx]));
1557 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1558 }
1559
1560 DEFINE_MUTEX(fanout_mutex);
1561 EXPORT_SYMBOL_GPL(fanout_mutex);
1562 static LIST_HEAD(fanout_list);
1563 static u16 fanout_next_id;
1564
__fanout_link(struct sock * sk,struct packet_sock * po)1565 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1566 {
1567 struct packet_fanout *f = po->fanout;
1568
1569 spin_lock(&f->lock);
1570 rcu_assign_pointer(f->arr[f->num_members], sk);
1571 smp_wmb();
1572 f->num_members++;
1573 if (f->num_members == 1)
1574 dev_add_pack(&f->prot_hook);
1575 spin_unlock(&f->lock);
1576 }
1577
__fanout_unlink(struct sock * sk,struct packet_sock * po)1578 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1579 {
1580 struct packet_fanout *f = po->fanout;
1581 int i;
1582
1583 spin_lock(&f->lock);
1584 for (i = 0; i < f->num_members; i++) {
1585 if (rcu_dereference_protected(f->arr[i],
1586 lockdep_is_held(&f->lock)) == sk)
1587 break;
1588 }
1589 BUG_ON(i >= f->num_members);
1590 rcu_assign_pointer(f->arr[i],
1591 rcu_dereference_protected(f->arr[f->num_members - 1],
1592 lockdep_is_held(&f->lock)));
1593 f->num_members--;
1594 if (f->num_members == 0)
1595 __dev_remove_pack(&f->prot_hook);
1596 spin_unlock(&f->lock);
1597 }
1598
match_fanout_group(struct packet_type * ptype,struct sock * sk)1599 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1600 {
1601 if (sk->sk_family != PF_PACKET)
1602 return false;
1603
1604 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1605 }
1606
fanout_init_data(struct packet_fanout * f)1607 static void fanout_init_data(struct packet_fanout *f)
1608 {
1609 switch (f->type) {
1610 case PACKET_FANOUT_LB:
1611 atomic_set(&f->rr_cur, 0);
1612 break;
1613 case PACKET_FANOUT_CBPF:
1614 case PACKET_FANOUT_EBPF:
1615 RCU_INIT_POINTER(f->bpf_prog, NULL);
1616 break;
1617 }
1618 }
1619
__fanout_set_data_bpf(struct packet_fanout * f,struct bpf_prog * new)1620 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1621 {
1622 struct bpf_prog *old;
1623
1624 spin_lock(&f->lock);
1625 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1626 rcu_assign_pointer(f->bpf_prog, new);
1627 spin_unlock(&f->lock);
1628
1629 if (old) {
1630 synchronize_net();
1631 bpf_prog_destroy(old);
1632 }
1633 }
1634
fanout_set_data_cbpf(struct packet_sock * po,sockptr_t data,unsigned int len)1635 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1636 unsigned int len)
1637 {
1638 struct bpf_prog *new;
1639 struct sock_fprog fprog;
1640 int ret;
1641
1642 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1643 return -EPERM;
1644
1645 ret = copy_bpf_fprog_from_user(&fprog, data, len);
1646 if (ret)
1647 return ret;
1648
1649 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1650 if (ret)
1651 return ret;
1652
1653 __fanout_set_data_bpf(po->fanout, new);
1654 return 0;
1655 }
1656
fanout_set_data_ebpf(struct packet_sock * po,sockptr_t data,unsigned int len)1657 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1658 unsigned int len)
1659 {
1660 struct bpf_prog *new;
1661 u32 fd;
1662
1663 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1664 return -EPERM;
1665 if (len != sizeof(fd))
1666 return -EINVAL;
1667 if (copy_from_sockptr(&fd, data, len))
1668 return -EFAULT;
1669
1670 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1671 if (IS_ERR(new))
1672 return PTR_ERR(new);
1673
1674 __fanout_set_data_bpf(po->fanout, new);
1675 return 0;
1676 }
1677
fanout_set_data(struct packet_sock * po,sockptr_t data,unsigned int len)1678 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1679 unsigned int len)
1680 {
1681 switch (po->fanout->type) {
1682 case PACKET_FANOUT_CBPF:
1683 return fanout_set_data_cbpf(po, data, len);
1684 case PACKET_FANOUT_EBPF:
1685 return fanout_set_data_ebpf(po, data, len);
1686 default:
1687 return -EINVAL;
1688 }
1689 }
1690
fanout_release_data(struct packet_fanout * f)1691 static void fanout_release_data(struct packet_fanout *f)
1692 {
1693 switch (f->type) {
1694 case PACKET_FANOUT_CBPF:
1695 case PACKET_FANOUT_EBPF:
1696 __fanout_set_data_bpf(f, NULL);
1697 }
1698 }
1699
__fanout_id_is_free(struct sock * sk,u16 candidate_id)1700 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1701 {
1702 struct packet_fanout *f;
1703
1704 list_for_each_entry(f, &fanout_list, list) {
1705 if (f->id == candidate_id &&
1706 read_pnet(&f->net) == sock_net(sk)) {
1707 return false;
1708 }
1709 }
1710 return true;
1711 }
1712
fanout_find_new_id(struct sock * sk,u16 * new_id)1713 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1714 {
1715 u16 id = fanout_next_id;
1716
1717 do {
1718 if (__fanout_id_is_free(sk, id)) {
1719 *new_id = id;
1720 fanout_next_id = id + 1;
1721 return true;
1722 }
1723
1724 id++;
1725 } while (id != fanout_next_id);
1726
1727 return false;
1728 }
1729
fanout_add(struct sock * sk,struct fanout_args * args)1730 static int fanout_add(struct sock *sk, struct fanout_args *args)
1731 {
1732 struct packet_rollover *rollover = NULL;
1733 struct packet_sock *po = pkt_sk(sk);
1734 u16 type_flags = args->type_flags;
1735 struct packet_fanout *f, *match;
1736 u8 type = type_flags & 0xff;
1737 u8 flags = type_flags >> 8;
1738 u16 id = args->id;
1739 int err;
1740
1741 switch (type) {
1742 case PACKET_FANOUT_ROLLOVER:
1743 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1744 return -EINVAL;
1745 break;
1746 case PACKET_FANOUT_HASH:
1747 case PACKET_FANOUT_LB:
1748 case PACKET_FANOUT_CPU:
1749 case PACKET_FANOUT_RND:
1750 case PACKET_FANOUT_QM:
1751 case PACKET_FANOUT_CBPF:
1752 case PACKET_FANOUT_EBPF:
1753 break;
1754 default:
1755 return -EINVAL;
1756 }
1757
1758 mutex_lock(&fanout_mutex);
1759
1760 err = -EALREADY;
1761 if (po->fanout)
1762 goto out;
1763
1764 if (type == PACKET_FANOUT_ROLLOVER ||
1765 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1766 err = -ENOMEM;
1767 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1768 if (!rollover)
1769 goto out;
1770 atomic_long_set(&rollover->num, 0);
1771 atomic_long_set(&rollover->num_huge, 0);
1772 atomic_long_set(&rollover->num_failed, 0);
1773 }
1774
1775 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1776 if (id != 0) {
1777 err = -EINVAL;
1778 goto out;
1779 }
1780 if (!fanout_find_new_id(sk, &id)) {
1781 err = -ENOMEM;
1782 goto out;
1783 }
1784 /* ephemeral flag for the first socket in the group: drop it */
1785 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1786 }
1787
1788 match = NULL;
1789 list_for_each_entry(f, &fanout_list, list) {
1790 if (f->id == id &&
1791 read_pnet(&f->net) == sock_net(sk)) {
1792 match = f;
1793 break;
1794 }
1795 }
1796 err = -EINVAL;
1797 if (match) {
1798 if (match->flags != flags)
1799 goto out;
1800 if (args->max_num_members &&
1801 args->max_num_members != match->max_num_members)
1802 goto out;
1803 } else {
1804 if (args->max_num_members > PACKET_FANOUT_MAX)
1805 goto out;
1806 if (!args->max_num_members)
1807 /* legacy PACKET_FANOUT_MAX */
1808 args->max_num_members = 256;
1809 err = -ENOMEM;
1810 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1811 GFP_KERNEL);
1812 if (!match)
1813 goto out;
1814 write_pnet(&match->net, sock_net(sk));
1815 match->id = id;
1816 match->type = type;
1817 match->flags = flags;
1818 INIT_LIST_HEAD(&match->list);
1819 spin_lock_init(&match->lock);
1820 refcount_set(&match->sk_ref, 0);
1821 fanout_init_data(match);
1822 match->prot_hook.type = po->prot_hook.type;
1823 match->prot_hook.dev = po->prot_hook.dev;
1824 match->prot_hook.func = packet_rcv_fanout;
1825 match->prot_hook.af_packet_priv = match;
1826 match->prot_hook.af_packet_net = read_pnet(&match->net);
1827 match->prot_hook.id_match = match_fanout_group;
1828 match->max_num_members = args->max_num_members;
1829 match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
1830 list_add(&match->list, &fanout_list);
1831 }
1832 err = -EINVAL;
1833
1834 spin_lock(&po->bind_lock);
1835 if (packet_sock_flag(po, PACKET_SOCK_RUNNING) &&
1836 match->type == type &&
1837 match->prot_hook.type == po->prot_hook.type &&
1838 match->prot_hook.dev == po->prot_hook.dev) {
1839 err = -ENOSPC;
1840 if (refcount_read(&match->sk_ref) < match->max_num_members) {
1841 __dev_remove_pack(&po->prot_hook);
1842
1843 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1844 WRITE_ONCE(po->fanout, match);
1845
1846 po->rollover = rollover;
1847 rollover = NULL;
1848 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1849 __fanout_link(sk, po);
1850 err = 0;
1851 }
1852 }
1853 spin_unlock(&po->bind_lock);
1854
1855 if (err && !refcount_read(&match->sk_ref)) {
1856 list_del(&match->list);
1857 kvfree(match);
1858 }
1859
1860 out:
1861 kfree(rollover);
1862 mutex_unlock(&fanout_mutex);
1863 return err;
1864 }
1865
1866 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1867 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1868 * It is the responsibility of the caller to call fanout_release_data() and
1869 * free the returned packet_fanout (after synchronize_net())
1870 */
fanout_release(struct sock * sk)1871 static struct packet_fanout *fanout_release(struct sock *sk)
1872 {
1873 struct packet_sock *po = pkt_sk(sk);
1874 struct packet_fanout *f;
1875
1876 mutex_lock(&fanout_mutex);
1877 f = po->fanout;
1878 if (f) {
1879 po->fanout = NULL;
1880
1881 if (refcount_dec_and_test(&f->sk_ref))
1882 list_del(&f->list);
1883 else
1884 f = NULL;
1885 }
1886 mutex_unlock(&fanout_mutex);
1887
1888 return f;
1889 }
1890
packet_extra_vlan_len_allowed(const struct net_device * dev,struct sk_buff * skb)1891 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1892 struct sk_buff *skb)
1893 {
1894 /* Earlier code assumed this would be a VLAN pkt, double-check
1895 * this now that we have the actual packet in hand. We can only
1896 * do this check on Ethernet devices.
1897 */
1898 if (unlikely(dev->type != ARPHRD_ETHER))
1899 return false;
1900
1901 skb_reset_mac_header(skb);
1902 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1903 }
1904
1905 static const struct proto_ops packet_ops;
1906
1907 static const struct proto_ops packet_ops_spkt;
1908
packet_rcv_spkt(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1909 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1910 struct packet_type *pt, struct net_device *orig_dev)
1911 {
1912 struct sock *sk;
1913 struct sockaddr_pkt *spkt;
1914
1915 /*
1916 * When we registered the protocol we saved the socket in the data
1917 * field for just this event.
1918 */
1919
1920 sk = pt->af_packet_priv;
1921
1922 /*
1923 * Yank back the headers [hope the device set this
1924 * right or kerboom...]
1925 *
1926 * Incoming packets have ll header pulled,
1927 * push it back.
1928 *
1929 * For outgoing ones skb->data == skb_mac_header(skb)
1930 * so that this procedure is noop.
1931 */
1932
1933 if (skb->pkt_type == PACKET_LOOPBACK)
1934 goto out;
1935
1936 if (!net_eq(dev_net(dev), sock_net(sk)))
1937 goto out;
1938
1939 skb = skb_share_check(skb, GFP_ATOMIC);
1940 if (skb == NULL)
1941 goto oom;
1942
1943 /* drop any routing info */
1944 skb_dst_drop(skb);
1945
1946 /* drop conntrack reference */
1947 nf_reset_ct(skb);
1948
1949 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1950
1951 skb_push(skb, skb->data - skb_mac_header(skb));
1952
1953 /*
1954 * The SOCK_PACKET socket receives _all_ frames.
1955 */
1956
1957 spkt->spkt_family = dev->type;
1958 strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1959 spkt->spkt_protocol = skb->protocol;
1960
1961 /*
1962 * Charge the memory to the socket. This is done specifically
1963 * to prevent sockets using all the memory up.
1964 */
1965
1966 if (sock_queue_rcv_skb(sk, skb) == 0)
1967 return 0;
1968
1969 out:
1970 kfree_skb(skb);
1971 oom:
1972 return 0;
1973 }
1974
packet_parse_headers(struct sk_buff * skb,struct socket * sock)1975 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1976 {
1977 int depth;
1978
1979 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1980 sock->type == SOCK_RAW) {
1981 skb_reset_mac_header(skb);
1982 skb->protocol = dev_parse_header_protocol(skb);
1983 }
1984
1985 /* Move network header to the right position for VLAN tagged packets */
1986 if (likely(skb->dev->type == ARPHRD_ETHER) &&
1987 eth_type_vlan(skb->protocol) &&
1988 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
1989 skb_set_network_header(skb, depth);
1990
1991 skb_probe_transport_header(skb);
1992 }
1993
1994 /*
1995 * Output a raw packet to a device layer. This bypasses all the other
1996 * protocol layers and you must therefore supply it with a complete frame
1997 */
1998
packet_sendmsg_spkt(struct socket * sock,struct msghdr * msg,size_t len)1999 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
2000 size_t len)
2001 {
2002 struct sock *sk = sock->sk;
2003 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
2004 struct sk_buff *skb = NULL;
2005 struct net_device *dev;
2006 struct sockcm_cookie sockc;
2007 __be16 proto = 0;
2008 int err;
2009 int extra_len = 0;
2010
2011 /*
2012 * Get and verify the address.
2013 */
2014
2015 if (saddr) {
2016 if (msg->msg_namelen < sizeof(struct sockaddr))
2017 return -EINVAL;
2018 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
2019 proto = saddr->spkt_protocol;
2020 } else
2021 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
2022
2023 /*
2024 * Find the device first to size check it
2025 */
2026
2027 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
2028 retry:
2029 rcu_read_lock();
2030 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
2031 err = -ENODEV;
2032 if (dev == NULL)
2033 goto out_unlock;
2034
2035 err = -ENETDOWN;
2036 if (!(dev->flags & IFF_UP))
2037 goto out_unlock;
2038
2039 /*
2040 * You may not queue a frame bigger than the mtu. This is the lowest level
2041 * raw protocol and you must do your own fragmentation at this level.
2042 */
2043
2044 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2045 if (!netif_supports_nofcs(dev)) {
2046 err = -EPROTONOSUPPORT;
2047 goto out_unlock;
2048 }
2049 extra_len = 4; /* We're doing our own CRC */
2050 }
2051
2052 err = -EMSGSIZE;
2053 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
2054 goto out_unlock;
2055
2056 if (!skb) {
2057 size_t reserved = LL_RESERVED_SPACE(dev);
2058 int tlen = dev->needed_tailroom;
2059 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
2060
2061 rcu_read_unlock();
2062 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
2063 if (skb == NULL)
2064 return -ENOBUFS;
2065 /* FIXME: Save some space for broken drivers that write a hard
2066 * header at transmission time by themselves. PPP is the notable
2067 * one here. This should really be fixed at the driver level.
2068 */
2069 skb_reserve(skb, reserved);
2070 skb_reset_network_header(skb);
2071
2072 /* Try to align data part correctly */
2073 if (hhlen) {
2074 skb->data -= hhlen;
2075 skb->tail -= hhlen;
2076 if (len < hhlen)
2077 skb_reset_network_header(skb);
2078 }
2079 err = memcpy_from_msg(skb_put(skb, len), msg, len);
2080 if (err)
2081 goto out_free;
2082 goto retry;
2083 }
2084
2085 if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
2086 err = -EINVAL;
2087 goto out_unlock;
2088 }
2089 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2090 !packet_extra_vlan_len_allowed(dev, skb)) {
2091 err = -EMSGSIZE;
2092 goto out_unlock;
2093 }
2094
2095 sockcm_init(&sockc, sk);
2096 if (msg->msg_controllen) {
2097 err = sock_cmsg_send(sk, msg, &sockc);
2098 if (unlikely(err))
2099 goto out_unlock;
2100 }
2101
2102 skb->protocol = proto;
2103 skb->dev = dev;
2104 skb->priority = READ_ONCE(sk->sk_priority);
2105 skb->mark = READ_ONCE(sk->sk_mark);
2106 skb->tstamp = sockc.transmit_time;
2107
2108 skb_setup_tx_timestamp(skb, sockc.tsflags);
2109
2110 if (unlikely(extra_len == 4))
2111 skb->no_fcs = 1;
2112
2113 packet_parse_headers(skb, sock);
2114
2115 dev_queue_xmit(skb);
2116 rcu_read_unlock();
2117 return len;
2118
2119 out_unlock:
2120 rcu_read_unlock();
2121 out_free:
2122 kfree_skb(skb);
2123 return err;
2124 }
2125
run_filter(struct sk_buff * skb,const struct sock * sk,unsigned int res)2126 static unsigned int run_filter(struct sk_buff *skb,
2127 const struct sock *sk,
2128 unsigned int res)
2129 {
2130 struct sk_filter *filter;
2131
2132 rcu_read_lock();
2133 filter = rcu_dereference(sk->sk_filter);
2134 if (filter != NULL)
2135 res = bpf_prog_run_clear_cb(filter->prog, skb);
2136 rcu_read_unlock();
2137
2138 return res;
2139 }
2140
packet_rcv_vnet(struct msghdr * msg,const struct sk_buff * skb,size_t * len,int vnet_hdr_sz)2141 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2142 size_t *len, int vnet_hdr_sz)
2143 {
2144 struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };
2145
2146 if (*len < vnet_hdr_sz)
2147 return -EINVAL;
2148 *len -= vnet_hdr_sz;
2149
2150 if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0))
2151 return -EINVAL;
2152
2153 return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
2154 }
2155
2156 /*
2157 * This function makes lazy skb cloning in hope that most of packets
2158 * are discarded by BPF.
2159 *
2160 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2161 * and skb->cb are mangled. It works because (and until) packets
2162 * falling here are owned by current CPU. Output packets are cloned
2163 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2164 * sequentially, so that if we return skb to original state on exit,
2165 * we will not harm anyone.
2166 */
2167
packet_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2168 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2169 struct packet_type *pt, struct net_device *orig_dev)
2170 {
2171 struct sock *sk;
2172 struct sockaddr_ll *sll;
2173 struct packet_sock *po;
2174 u8 *skb_head = skb->data;
2175 int skb_len = skb->len;
2176 unsigned int snaplen, res;
2177 bool is_drop_n_account = false;
2178
2179 if (skb->pkt_type == PACKET_LOOPBACK)
2180 goto drop;
2181
2182 sk = pt->af_packet_priv;
2183 po = pkt_sk(sk);
2184
2185 if (!net_eq(dev_net(dev), sock_net(sk)))
2186 goto drop;
2187
2188 skb->dev = dev;
2189
2190 if (dev_has_header(dev)) {
2191 /* The device has an explicit notion of ll header,
2192 * exported to higher levels.
2193 *
2194 * Otherwise, the device hides details of its frame
2195 * structure, so that corresponding packet head is
2196 * never delivered to user.
2197 */
2198 if (sk->sk_type != SOCK_DGRAM)
2199 skb_push(skb, skb->data - skb_mac_header(skb));
2200 else if (skb->pkt_type == PACKET_OUTGOING) {
2201 /* Special case: outgoing packets have ll header at head */
2202 skb_pull(skb, skb_network_offset(skb));
2203 }
2204 }
2205
2206 snaplen = skb->len;
2207
2208 res = run_filter(skb, sk, snaplen);
2209 if (!res)
2210 goto drop_n_restore;
2211 if (snaplen > res)
2212 snaplen = res;
2213
2214 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2215 goto drop_n_acct;
2216
2217 if (skb_shared(skb)) {
2218 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2219 if (nskb == NULL)
2220 goto drop_n_acct;
2221
2222 if (skb_head != skb->data) {
2223 skb->data = skb_head;
2224 skb->len = skb_len;
2225 }
2226 consume_skb(skb);
2227 skb = nskb;
2228 }
2229
2230 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2231
2232 sll = &PACKET_SKB_CB(skb)->sa.ll;
2233 sll->sll_hatype = dev->type;
2234 sll->sll_pkttype = skb->pkt_type;
2235 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2236 sll->sll_ifindex = orig_dev->ifindex;
2237 else
2238 sll->sll_ifindex = dev->ifindex;
2239
2240 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2241
2242 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2243 * Use their space for storing the original skb length.
2244 */
2245 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2246
2247 if (pskb_trim(skb, snaplen))
2248 goto drop_n_acct;
2249
2250 skb_set_owner_r(skb, sk);
2251 skb->dev = NULL;
2252 skb_dst_drop(skb);
2253
2254 /* drop conntrack reference */
2255 nf_reset_ct(skb);
2256
2257 spin_lock(&sk->sk_receive_queue.lock);
2258 po->stats.stats1.tp_packets++;
2259 sock_skb_set_dropcount(sk, skb);
2260 skb_clear_delivery_time(skb);
2261 __skb_queue_tail(&sk->sk_receive_queue, skb);
2262 spin_unlock(&sk->sk_receive_queue.lock);
2263 sk->sk_data_ready(sk);
2264 return 0;
2265
2266 drop_n_acct:
2267 is_drop_n_account = true;
2268 atomic_inc(&po->tp_drops);
2269 atomic_inc(&sk->sk_drops);
2270
2271 drop_n_restore:
2272 if (skb_head != skb->data && skb_shared(skb)) {
2273 skb->data = skb_head;
2274 skb->len = skb_len;
2275 }
2276 drop:
2277 if (!is_drop_n_account)
2278 consume_skb(skb);
2279 else
2280 kfree_skb(skb);
2281 return 0;
2282 }
2283
tpacket_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2284 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2285 struct packet_type *pt, struct net_device *orig_dev)
2286 {
2287 struct sock *sk;
2288 struct packet_sock *po;
2289 struct sockaddr_ll *sll;
2290 union tpacket_uhdr h;
2291 u8 *skb_head = skb->data;
2292 int skb_len = skb->len;
2293 unsigned int snaplen, res;
2294 unsigned long status = TP_STATUS_USER;
2295 unsigned short macoff, hdrlen;
2296 unsigned int netoff;
2297 struct sk_buff *copy_skb = NULL;
2298 struct timespec64 ts;
2299 __u32 ts_status;
2300 bool is_drop_n_account = false;
2301 unsigned int slot_id = 0;
2302 int vnet_hdr_sz = 0;
2303
2304 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2305 * We may add members to them until current aligned size without forcing
2306 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2307 */
2308 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2309 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2310
2311 if (skb->pkt_type == PACKET_LOOPBACK)
2312 goto drop;
2313
2314 sk = pt->af_packet_priv;
2315 po = pkt_sk(sk);
2316
2317 if (!net_eq(dev_net(dev), sock_net(sk)))
2318 goto drop;
2319
2320 if (dev_has_header(dev)) {
2321 if (sk->sk_type != SOCK_DGRAM)
2322 skb_push(skb, skb->data - skb_mac_header(skb));
2323 else if (skb->pkt_type == PACKET_OUTGOING) {
2324 /* Special case: outgoing packets have ll header at head */
2325 skb_pull(skb, skb_network_offset(skb));
2326 }
2327 }
2328
2329 snaplen = skb->len;
2330
2331 res = run_filter(skb, sk, snaplen);
2332 if (!res)
2333 goto drop_n_restore;
2334
2335 /* If we are flooded, just give up */
2336 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2337 atomic_inc(&po->tp_drops);
2338 goto drop_n_restore;
2339 }
2340
2341 if (skb->ip_summed == CHECKSUM_PARTIAL)
2342 status |= TP_STATUS_CSUMNOTREADY;
2343 else if (skb->pkt_type != PACKET_OUTGOING &&
2344 skb_csum_unnecessary(skb))
2345 status |= TP_STATUS_CSUM_VALID;
2346 if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
2347 status |= TP_STATUS_GSO_TCP;
2348
2349 if (snaplen > res)
2350 snaplen = res;
2351
2352 if (sk->sk_type == SOCK_DGRAM) {
2353 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2354 po->tp_reserve;
2355 } else {
2356 unsigned int maclen = skb_network_offset(skb);
2357 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2358 (maclen < 16 ? 16 : maclen)) +
2359 po->tp_reserve;
2360 vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2361 if (vnet_hdr_sz)
2362 netoff += vnet_hdr_sz;
2363 macoff = netoff - maclen;
2364 }
2365 if (netoff > USHRT_MAX) {
2366 atomic_inc(&po->tp_drops);
2367 goto drop_n_restore;
2368 }
2369 if (po->tp_version <= TPACKET_V2) {
2370 if (macoff + snaplen > po->rx_ring.frame_size) {
2371 if (po->copy_thresh &&
2372 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2373 if (skb_shared(skb)) {
2374 copy_skb = skb_clone(skb, GFP_ATOMIC);
2375 } else {
2376 copy_skb = skb_get(skb);
2377 skb_head = skb->data;
2378 }
2379 if (copy_skb) {
2380 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2381 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2382 skb_set_owner_r(copy_skb, sk);
2383 }
2384 }
2385 snaplen = po->rx_ring.frame_size - macoff;
2386 if ((int)snaplen < 0) {
2387 snaplen = 0;
2388 vnet_hdr_sz = 0;
2389 }
2390 }
2391 } else if (unlikely(macoff + snaplen >
2392 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2393 u32 nval;
2394
2395 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2396 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2397 snaplen, nval, macoff);
2398 snaplen = nval;
2399 if (unlikely((int)snaplen < 0)) {
2400 snaplen = 0;
2401 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2402 vnet_hdr_sz = 0;
2403 }
2404 }
2405 spin_lock(&sk->sk_receive_queue.lock);
2406 h.raw = packet_current_rx_frame(po, skb,
2407 TP_STATUS_KERNEL, (macoff+snaplen));
2408 if (!h.raw)
2409 goto drop_n_account;
2410
2411 if (po->tp_version <= TPACKET_V2) {
2412 slot_id = po->rx_ring.head;
2413 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2414 goto drop_n_account;
2415 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2416 }
2417
2418 if (vnet_hdr_sz &&
2419 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2420 sizeof(struct virtio_net_hdr),
2421 vio_le(), true, 0)) {
2422 if (po->tp_version == TPACKET_V3)
2423 prb_clear_blk_fill_status(&po->rx_ring);
2424 goto drop_n_account;
2425 }
2426
2427 if (po->tp_version <= TPACKET_V2) {
2428 packet_increment_rx_head(po, &po->rx_ring);
2429 /*
2430 * LOSING will be reported till you read the stats,
2431 * because it's COR - Clear On Read.
2432 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2433 * at packet level.
2434 */
2435 if (atomic_read(&po->tp_drops))
2436 status |= TP_STATUS_LOSING;
2437 }
2438
2439 po->stats.stats1.tp_packets++;
2440 if (copy_skb) {
2441 status |= TP_STATUS_COPY;
2442 skb_clear_delivery_time(copy_skb);
2443 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2444 }
2445 spin_unlock(&sk->sk_receive_queue.lock);
2446
2447 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2448
2449 /* Always timestamp; prefer an existing software timestamp taken
2450 * closer to the time of capture.
2451 */
2452 ts_status = tpacket_get_timestamp(skb, &ts,
2453 READ_ONCE(po->tp_tstamp) |
2454 SOF_TIMESTAMPING_SOFTWARE);
2455 if (!ts_status)
2456 ktime_get_real_ts64(&ts);
2457
2458 status |= ts_status;
2459
2460 switch (po->tp_version) {
2461 case TPACKET_V1:
2462 h.h1->tp_len = skb->len;
2463 h.h1->tp_snaplen = snaplen;
2464 h.h1->tp_mac = macoff;
2465 h.h1->tp_net = netoff;
2466 h.h1->tp_sec = ts.tv_sec;
2467 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2468 hdrlen = sizeof(*h.h1);
2469 break;
2470 case TPACKET_V2:
2471 h.h2->tp_len = skb->len;
2472 h.h2->tp_snaplen = snaplen;
2473 h.h2->tp_mac = macoff;
2474 h.h2->tp_net = netoff;
2475 h.h2->tp_sec = ts.tv_sec;
2476 h.h2->tp_nsec = ts.tv_nsec;
2477 if (skb_vlan_tag_present(skb)) {
2478 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2479 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2480 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2481 } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
2482 h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev);
2483 h.h2->tp_vlan_tpid = ntohs(skb->protocol);
2484 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2485 } else {
2486 h.h2->tp_vlan_tci = 0;
2487 h.h2->tp_vlan_tpid = 0;
2488 }
2489 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2490 hdrlen = sizeof(*h.h2);
2491 break;
2492 case TPACKET_V3:
2493 /* tp_nxt_offset,vlan are already populated above.
2494 * So DONT clear those fields here
2495 */
2496 h.h3->tp_status |= status;
2497 h.h3->tp_len = skb->len;
2498 h.h3->tp_snaplen = snaplen;
2499 h.h3->tp_mac = macoff;
2500 h.h3->tp_net = netoff;
2501 h.h3->tp_sec = ts.tv_sec;
2502 h.h3->tp_nsec = ts.tv_nsec;
2503 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2504 hdrlen = sizeof(*h.h3);
2505 break;
2506 default:
2507 BUG();
2508 }
2509
2510 sll = h.raw + TPACKET_ALIGN(hdrlen);
2511 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2512 sll->sll_family = AF_PACKET;
2513 sll->sll_hatype = dev->type;
2514 sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ?
2515 vlan_get_protocol_dgram(skb) : skb->protocol;
2516 sll->sll_pkttype = skb->pkt_type;
2517 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2518 sll->sll_ifindex = orig_dev->ifindex;
2519 else
2520 sll->sll_ifindex = dev->ifindex;
2521
2522 smp_mb();
2523
2524 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2525 if (po->tp_version <= TPACKET_V2) {
2526 u8 *start, *end;
2527
2528 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2529 macoff + snaplen);
2530
2531 for (start = h.raw; start < end; start += PAGE_SIZE)
2532 flush_dcache_page(pgv_to_page(start));
2533 }
2534 smp_wmb();
2535 #endif
2536
2537 if (po->tp_version <= TPACKET_V2) {
2538 spin_lock(&sk->sk_receive_queue.lock);
2539 __packet_set_status(po, h.raw, status);
2540 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2541 spin_unlock(&sk->sk_receive_queue.lock);
2542 sk->sk_data_ready(sk);
2543 } else if (po->tp_version == TPACKET_V3) {
2544 prb_clear_blk_fill_status(&po->rx_ring);
2545 }
2546
2547 drop_n_restore:
2548 if (skb_head != skb->data && skb_shared(skb)) {
2549 skb->data = skb_head;
2550 skb->len = skb_len;
2551 }
2552 drop:
2553 if (!is_drop_n_account)
2554 consume_skb(skb);
2555 else
2556 kfree_skb(skb);
2557 return 0;
2558
2559 drop_n_account:
2560 spin_unlock(&sk->sk_receive_queue.lock);
2561 atomic_inc(&po->tp_drops);
2562 is_drop_n_account = true;
2563
2564 sk->sk_data_ready(sk);
2565 kfree_skb(copy_skb);
2566 goto drop_n_restore;
2567 }
2568
tpacket_destruct_skb(struct sk_buff * skb)2569 static void tpacket_destruct_skb(struct sk_buff *skb)
2570 {
2571 struct packet_sock *po = pkt_sk(skb->sk);
2572
2573 if (likely(po->tx_ring.pg_vec)) {
2574 void *ph;
2575 __u32 ts;
2576
2577 ph = skb_zcopy_get_nouarg(skb);
2578 packet_dec_pending(&po->tx_ring);
2579
2580 ts = __packet_set_timestamp(po, ph, skb);
2581 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2582
2583 complete(&po->skb_completion);
2584 }
2585
2586 sock_wfree(skb);
2587 }
2588
__packet_snd_vnet_parse(struct virtio_net_hdr * vnet_hdr,size_t len)2589 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2590 {
2591 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2592 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2593 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2594 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2595 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2596 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2597 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2598
2599 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2600 return -EINVAL;
2601
2602 return 0;
2603 }
2604
packet_snd_vnet_parse(struct msghdr * msg,size_t * len,struct virtio_net_hdr * vnet_hdr,int vnet_hdr_sz)2605 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2606 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
2607 {
2608 int ret;
2609
2610 if (*len < vnet_hdr_sz)
2611 return -EINVAL;
2612 *len -= vnet_hdr_sz;
2613
2614 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2615 return -EFAULT;
2616
2617 ret = __packet_snd_vnet_parse(vnet_hdr, *len);
2618 if (ret)
2619 return ret;
2620
2621 /* move iter to point to the start of mac header */
2622 if (vnet_hdr_sz != sizeof(struct virtio_net_hdr))
2623 iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
2624
2625 return 0;
2626 }
2627
tpacket_fill_skb(struct packet_sock * po,struct sk_buff * skb,void * frame,struct net_device * dev,void * data,int tp_len,__be16 proto,unsigned char * addr,int hlen,int copylen,const struct sockcm_cookie * sockc)2628 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2629 void *frame, struct net_device *dev, void *data, int tp_len,
2630 __be16 proto, unsigned char *addr, int hlen, int copylen,
2631 const struct sockcm_cookie *sockc)
2632 {
2633 union tpacket_uhdr ph;
2634 int to_write, offset, len, nr_frags, len_max;
2635 struct socket *sock = po->sk.sk_socket;
2636 struct page *page;
2637 int err;
2638
2639 ph.raw = frame;
2640
2641 skb->protocol = proto;
2642 skb->dev = dev;
2643 skb->priority = READ_ONCE(po->sk.sk_priority);
2644 skb->mark = READ_ONCE(po->sk.sk_mark);
2645 skb->tstamp = sockc->transmit_time;
2646 skb_setup_tx_timestamp(skb, sockc->tsflags);
2647 skb_zcopy_set_nouarg(skb, ph.raw);
2648
2649 skb_reserve(skb, hlen);
2650 skb_reset_network_header(skb);
2651
2652 to_write = tp_len;
2653
2654 if (sock->type == SOCK_DGRAM) {
2655 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2656 NULL, tp_len);
2657 if (unlikely(err < 0))
2658 return -EINVAL;
2659 } else if (copylen) {
2660 int hdrlen = min_t(int, copylen, tp_len);
2661
2662 skb_push(skb, dev->hard_header_len);
2663 skb_put(skb, copylen - dev->hard_header_len);
2664 err = skb_store_bits(skb, 0, data, hdrlen);
2665 if (unlikely(err))
2666 return err;
2667 if (!dev_validate_header(dev, skb->data, hdrlen))
2668 return -EINVAL;
2669
2670 data += hdrlen;
2671 to_write -= hdrlen;
2672 }
2673
2674 offset = offset_in_page(data);
2675 len_max = PAGE_SIZE - offset;
2676 len = ((to_write > len_max) ? len_max : to_write);
2677
2678 skb->data_len = to_write;
2679 skb->len += to_write;
2680 skb->truesize += to_write;
2681 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2682
2683 while (likely(to_write)) {
2684 nr_frags = skb_shinfo(skb)->nr_frags;
2685
2686 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2687 pr_err("Packet exceed the number of skb frags(%u)\n",
2688 (unsigned int)MAX_SKB_FRAGS);
2689 return -EFAULT;
2690 }
2691
2692 page = pgv_to_page(data);
2693 data += len;
2694 flush_dcache_page(page);
2695 get_page(page);
2696 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2697 to_write -= len;
2698 offset = 0;
2699 len_max = PAGE_SIZE;
2700 len = ((to_write > len_max) ? len_max : to_write);
2701 }
2702
2703 packet_parse_headers(skb, sock);
2704
2705 return tp_len;
2706 }
2707
tpacket_parse_header(struct packet_sock * po,void * frame,int size_max,void ** data)2708 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2709 int size_max, void **data)
2710 {
2711 union tpacket_uhdr ph;
2712 int tp_len, off;
2713
2714 ph.raw = frame;
2715
2716 switch (po->tp_version) {
2717 case TPACKET_V3:
2718 if (ph.h3->tp_next_offset != 0) {
2719 pr_warn_once("variable sized slot not supported");
2720 return -EINVAL;
2721 }
2722 tp_len = ph.h3->tp_len;
2723 break;
2724 case TPACKET_V2:
2725 tp_len = ph.h2->tp_len;
2726 break;
2727 default:
2728 tp_len = ph.h1->tp_len;
2729 break;
2730 }
2731 if (unlikely(tp_len > size_max)) {
2732 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2733 return -EMSGSIZE;
2734 }
2735
2736 if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
2737 int off_min, off_max;
2738
2739 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2740 off_max = po->tx_ring.frame_size - tp_len;
2741 if (po->sk.sk_type == SOCK_DGRAM) {
2742 switch (po->tp_version) {
2743 case TPACKET_V3:
2744 off = ph.h3->tp_net;
2745 break;
2746 case TPACKET_V2:
2747 off = ph.h2->tp_net;
2748 break;
2749 default:
2750 off = ph.h1->tp_net;
2751 break;
2752 }
2753 } else {
2754 switch (po->tp_version) {
2755 case TPACKET_V3:
2756 off = ph.h3->tp_mac;
2757 break;
2758 case TPACKET_V2:
2759 off = ph.h2->tp_mac;
2760 break;
2761 default:
2762 off = ph.h1->tp_mac;
2763 break;
2764 }
2765 }
2766 if (unlikely((off < off_min) || (off_max < off)))
2767 return -EINVAL;
2768 } else {
2769 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2770 }
2771
2772 *data = frame + off;
2773 return tp_len;
2774 }
2775
tpacket_snd(struct packet_sock * po,struct msghdr * msg)2776 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2777 {
2778 struct sk_buff *skb = NULL;
2779 struct net_device *dev;
2780 struct virtio_net_hdr *vnet_hdr = NULL;
2781 struct sockcm_cookie sockc;
2782 __be16 proto;
2783 int err, reserve = 0;
2784 void *ph;
2785 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2786 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2787 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2788 unsigned char *addr = NULL;
2789 int tp_len, size_max;
2790 void *data;
2791 int len_sum = 0;
2792 int status = TP_STATUS_AVAILABLE;
2793 int hlen, tlen, copylen = 0;
2794 long timeo = 0;
2795
2796 mutex_lock(&po->pg_vec_lock);
2797
2798 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2799 * we need to confirm it under protection of pg_vec_lock.
2800 */
2801 if (unlikely(!po->tx_ring.pg_vec)) {
2802 err = -EBUSY;
2803 goto out;
2804 }
2805 if (likely(saddr == NULL)) {
2806 dev = packet_cached_dev_get(po);
2807 proto = READ_ONCE(po->num);
2808 } else {
2809 err = -EINVAL;
2810 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2811 goto out;
2812 if (msg->msg_namelen < (saddr->sll_halen
2813 + offsetof(struct sockaddr_ll,
2814 sll_addr)))
2815 goto out;
2816 proto = saddr->sll_protocol;
2817 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2818 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2819 if (dev && msg->msg_namelen < dev->addr_len +
2820 offsetof(struct sockaddr_ll, sll_addr))
2821 goto out_put;
2822 addr = saddr->sll_addr;
2823 }
2824 }
2825
2826 err = -ENXIO;
2827 if (unlikely(dev == NULL))
2828 goto out;
2829 err = -ENETDOWN;
2830 if (unlikely(!(dev->flags & IFF_UP)))
2831 goto out_put;
2832
2833 sockcm_init(&sockc, &po->sk);
2834 if (msg->msg_controllen) {
2835 err = sock_cmsg_send(&po->sk, msg, &sockc);
2836 if (unlikely(err))
2837 goto out_put;
2838 }
2839
2840 if (po->sk.sk_socket->type == SOCK_RAW)
2841 reserve = dev->hard_header_len;
2842 size_max = po->tx_ring.frame_size
2843 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2844
2845 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
2846 size_max = dev->mtu + reserve + VLAN_HLEN;
2847
2848 reinit_completion(&po->skb_completion);
2849
2850 do {
2851 ph = packet_current_frame(po, &po->tx_ring,
2852 TP_STATUS_SEND_REQUEST);
2853 if (unlikely(ph == NULL)) {
2854 if (need_wait && skb) {
2855 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2856 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2857 if (timeo <= 0) {
2858 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2859 goto out_put;
2860 }
2861 }
2862 /* check for additional frames */
2863 continue;
2864 }
2865
2866 skb = NULL;
2867 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2868 if (tp_len < 0)
2869 goto tpacket_error;
2870
2871 status = TP_STATUS_SEND_REQUEST;
2872 hlen = LL_RESERVED_SPACE(dev);
2873 tlen = dev->needed_tailroom;
2874 if (vnet_hdr_sz) {
2875 vnet_hdr = data;
2876 data += vnet_hdr_sz;
2877 tp_len -= vnet_hdr_sz;
2878 if (tp_len < 0 ||
2879 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2880 tp_len = -EINVAL;
2881 goto tpacket_error;
2882 }
2883 copylen = __virtio16_to_cpu(vio_le(),
2884 vnet_hdr->hdr_len);
2885 }
2886 copylen = max_t(int, copylen, dev->hard_header_len);
2887 skb = sock_alloc_send_skb(&po->sk,
2888 hlen + tlen + sizeof(struct sockaddr_ll) +
2889 (copylen - dev->hard_header_len),
2890 !need_wait, &err);
2891
2892 if (unlikely(skb == NULL)) {
2893 /* we assume the socket was initially writeable ... */
2894 if (likely(len_sum > 0))
2895 err = len_sum;
2896 goto out_status;
2897 }
2898 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2899 addr, hlen, copylen, &sockc);
2900 if (likely(tp_len >= 0) &&
2901 tp_len > dev->mtu + reserve &&
2902 !vnet_hdr_sz &&
2903 !packet_extra_vlan_len_allowed(dev, skb))
2904 tp_len = -EMSGSIZE;
2905
2906 if (unlikely(tp_len < 0)) {
2907 tpacket_error:
2908 if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
2909 __packet_set_status(po, ph,
2910 TP_STATUS_AVAILABLE);
2911 packet_increment_head(&po->tx_ring);
2912 kfree_skb(skb);
2913 continue;
2914 } else {
2915 status = TP_STATUS_WRONG_FORMAT;
2916 err = tp_len;
2917 goto out_status;
2918 }
2919 }
2920
2921 if (vnet_hdr_sz) {
2922 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2923 tp_len = -EINVAL;
2924 goto tpacket_error;
2925 }
2926 virtio_net_hdr_set_proto(skb, vnet_hdr);
2927 }
2928
2929 skb->destructor = tpacket_destruct_skb;
2930 __packet_set_status(po, ph, TP_STATUS_SENDING);
2931 packet_inc_pending(&po->tx_ring);
2932
2933 status = TP_STATUS_SEND_REQUEST;
2934 err = packet_xmit(po, skb);
2935 if (unlikely(err != 0)) {
2936 if (err > 0)
2937 err = net_xmit_errno(err);
2938 if (err && __packet_get_status(po, ph) ==
2939 TP_STATUS_AVAILABLE) {
2940 /* skb was destructed already */
2941 skb = NULL;
2942 goto out_status;
2943 }
2944 /*
2945 * skb was dropped but not destructed yet;
2946 * let's treat it like congestion or err < 0
2947 */
2948 err = 0;
2949 }
2950 packet_increment_head(&po->tx_ring);
2951 len_sum += tp_len;
2952 } while (likely((ph != NULL) ||
2953 /* Note: packet_read_pending() might be slow if we have
2954 * to call it as it's per_cpu variable, but in fast-path
2955 * we already short-circuit the loop with the first
2956 * condition, and luckily don't have to go that path
2957 * anyway.
2958 */
2959 (need_wait && packet_read_pending(&po->tx_ring))));
2960
2961 err = len_sum;
2962 goto out_put;
2963
2964 out_status:
2965 __packet_set_status(po, ph, status);
2966 kfree_skb(skb);
2967 out_put:
2968 dev_put(dev);
2969 out:
2970 mutex_unlock(&po->pg_vec_lock);
2971 return err;
2972 }
2973
packet_alloc_skb(struct sock * sk,size_t prepad,size_t reserve,size_t len,size_t linear,int noblock,int * err)2974 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2975 size_t reserve, size_t len,
2976 size_t linear, int noblock,
2977 int *err)
2978 {
2979 struct sk_buff *skb;
2980
2981 /* Under a page? Don't bother with paged skb. */
2982 if (prepad + len < PAGE_SIZE || !linear)
2983 linear = len;
2984
2985 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
2986 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
2987 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2988 err, PAGE_ALLOC_COSTLY_ORDER);
2989 if (!skb)
2990 return NULL;
2991
2992 skb_reserve(skb, reserve);
2993 skb_put(skb, linear);
2994 skb->data_len = len - linear;
2995 skb->len += len - linear;
2996
2997 return skb;
2998 }
2999
packet_snd(struct socket * sock,struct msghdr * msg,size_t len)3000 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
3001 {
3002 struct sock *sk = sock->sk;
3003 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
3004 struct sk_buff *skb;
3005 struct net_device *dev;
3006 __be16 proto;
3007 unsigned char *addr = NULL;
3008 int err, reserve = 0;
3009 struct sockcm_cookie sockc;
3010 struct virtio_net_hdr vnet_hdr = { 0 };
3011 int offset = 0;
3012 struct packet_sock *po = pkt_sk(sk);
3013 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
3014 int hlen, tlen, linear;
3015 int extra_len = 0;
3016
3017 /*
3018 * Get and verify the address.
3019 */
3020
3021 if (likely(saddr == NULL)) {
3022 dev = packet_cached_dev_get(po);
3023 proto = READ_ONCE(po->num);
3024 } else {
3025 err = -EINVAL;
3026 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
3027 goto out;
3028 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
3029 goto out;
3030 proto = saddr->sll_protocol;
3031 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
3032 if (sock->type == SOCK_DGRAM) {
3033 if (dev && msg->msg_namelen < dev->addr_len +
3034 offsetof(struct sockaddr_ll, sll_addr))
3035 goto out_unlock;
3036 addr = saddr->sll_addr;
3037 }
3038 }
3039
3040 err = -ENXIO;
3041 if (unlikely(dev == NULL))
3042 goto out_unlock;
3043 err = -ENETDOWN;
3044 if (unlikely(!(dev->flags & IFF_UP)))
3045 goto out_unlock;
3046
3047 sockcm_init(&sockc, sk);
3048 sockc.mark = READ_ONCE(sk->sk_mark);
3049 if (msg->msg_controllen) {
3050 err = sock_cmsg_send(sk, msg, &sockc);
3051 if (unlikely(err))
3052 goto out_unlock;
3053 }
3054
3055 if (sock->type == SOCK_RAW)
3056 reserve = dev->hard_header_len;
3057 if (vnet_hdr_sz) {
3058 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
3059 if (err)
3060 goto out_unlock;
3061 }
3062
3063 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
3064 if (!netif_supports_nofcs(dev)) {
3065 err = -EPROTONOSUPPORT;
3066 goto out_unlock;
3067 }
3068 extra_len = 4; /* We're doing our own CRC */
3069 }
3070
3071 err = -EMSGSIZE;
3072 if (!vnet_hdr.gso_type &&
3073 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
3074 goto out_unlock;
3075
3076 err = -ENOBUFS;
3077 hlen = LL_RESERVED_SPACE(dev);
3078 tlen = dev->needed_tailroom;
3079 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
3080 linear = max(linear, min_t(int, len, dev->hard_header_len));
3081 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3082 msg->msg_flags & MSG_DONTWAIT, &err);
3083 if (skb == NULL)
3084 goto out_unlock;
3085
3086 skb_reset_network_header(skb);
3087
3088 err = -EINVAL;
3089 if (sock->type == SOCK_DGRAM) {
3090 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
3091 if (unlikely(offset < 0))
3092 goto out_free;
3093 } else if (reserve) {
3094 skb_reserve(skb, -reserve);
3095 if (len < reserve + sizeof(struct ipv6hdr) &&
3096 dev->min_header_len != dev->hard_header_len)
3097 skb_reset_network_header(skb);
3098 }
3099
3100 /* Returns -EFAULT on error */
3101 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3102 if (err)
3103 goto out_free;
3104
3105 if ((sock->type == SOCK_RAW &&
3106 !dev_validate_header(dev, skb->data, len)) || !skb->len) {
3107 err = -EINVAL;
3108 goto out_free;
3109 }
3110
3111 skb_setup_tx_timestamp(skb, sockc.tsflags);
3112
3113 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3114 !packet_extra_vlan_len_allowed(dev, skb)) {
3115 err = -EMSGSIZE;
3116 goto out_free;
3117 }
3118
3119 skb->protocol = proto;
3120 skb->dev = dev;
3121 skb->priority = READ_ONCE(sk->sk_priority);
3122 skb->mark = sockc.mark;
3123 skb->tstamp = sockc.transmit_time;
3124
3125 if (unlikely(extra_len == 4))
3126 skb->no_fcs = 1;
3127
3128 packet_parse_headers(skb, sock);
3129
3130 if (vnet_hdr_sz) {
3131 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3132 if (err)
3133 goto out_free;
3134 len += vnet_hdr_sz;
3135 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3136 }
3137
3138 err = packet_xmit(po, skb);
3139
3140 if (unlikely(err != 0)) {
3141 if (err > 0)
3142 err = net_xmit_errno(err);
3143 if (err)
3144 goto out_unlock;
3145 }
3146
3147 dev_put(dev);
3148
3149 return len;
3150
3151 out_free:
3152 kfree_skb(skb);
3153 out_unlock:
3154 dev_put(dev);
3155 out:
3156 return err;
3157 }
3158
packet_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)3159 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3160 {
3161 struct sock *sk = sock->sk;
3162 struct packet_sock *po = pkt_sk(sk);
3163
3164 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3165 * tpacket_snd() will redo the check safely.
3166 */
3167 if (data_race(po->tx_ring.pg_vec))
3168 return tpacket_snd(po, msg);
3169
3170 return packet_snd(sock, msg, len);
3171 }
3172
3173 /*
3174 * Close a PACKET socket. This is fairly simple. We immediately go
3175 * to 'closed' state and remove our protocol entry in the device list.
3176 */
3177
packet_release(struct socket * sock)3178 static int packet_release(struct socket *sock)
3179 {
3180 struct sock *sk = sock->sk;
3181 struct packet_sock *po;
3182 struct packet_fanout *f;
3183 struct net *net;
3184 union tpacket_req_u req_u;
3185
3186 if (!sk)
3187 return 0;
3188
3189 net = sock_net(sk);
3190 po = pkt_sk(sk);
3191
3192 mutex_lock(&net->packet.sklist_lock);
3193 sk_del_node_init_rcu(sk);
3194 mutex_unlock(&net->packet.sklist_lock);
3195
3196 sock_prot_inuse_add(net, sk->sk_prot, -1);
3197
3198 spin_lock(&po->bind_lock);
3199 unregister_prot_hook(sk, false);
3200 packet_cached_dev_reset(po);
3201
3202 if (po->prot_hook.dev) {
3203 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3204 po->prot_hook.dev = NULL;
3205 }
3206 spin_unlock(&po->bind_lock);
3207
3208 packet_flush_mclist(sk);
3209
3210 lock_sock(sk);
3211 if (po->rx_ring.pg_vec) {
3212 memset(&req_u, 0, sizeof(req_u));
3213 packet_set_ring(sk, &req_u, 1, 0);
3214 }
3215
3216 if (po->tx_ring.pg_vec) {
3217 memset(&req_u, 0, sizeof(req_u));
3218 packet_set_ring(sk, &req_u, 1, 1);
3219 }
3220 release_sock(sk);
3221
3222 f = fanout_release(sk);
3223
3224 synchronize_net();
3225
3226 kfree(po->rollover);
3227 if (f) {
3228 fanout_release_data(f);
3229 kvfree(f);
3230 }
3231 /*
3232 * Now the socket is dead. No more input will appear.
3233 */
3234 sock_orphan(sk);
3235 sock->sk = NULL;
3236
3237 /* Purge queues */
3238
3239 skb_queue_purge(&sk->sk_receive_queue);
3240 packet_free_pending(po);
3241
3242 sock_put(sk);
3243 return 0;
3244 }
3245
3246 /*
3247 * Attach a packet hook.
3248 */
3249
packet_do_bind(struct sock * sk,const char * name,int ifindex,__be16 proto)3250 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3251 __be16 proto)
3252 {
3253 struct packet_sock *po = pkt_sk(sk);
3254 struct net_device *dev = NULL;
3255 bool unlisted = false;
3256 bool need_rehook;
3257 int ret = 0;
3258
3259 lock_sock(sk);
3260 spin_lock(&po->bind_lock);
3261 if (!proto)
3262 proto = po->num;
3263
3264 rcu_read_lock();
3265
3266 if (po->fanout) {
3267 ret = -EINVAL;
3268 goto out_unlock;
3269 }
3270
3271 if (name) {
3272 dev = dev_get_by_name_rcu(sock_net(sk), name);
3273 if (!dev) {
3274 ret = -ENODEV;
3275 goto out_unlock;
3276 }
3277 } else if (ifindex) {
3278 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3279 if (!dev) {
3280 ret = -ENODEV;
3281 goto out_unlock;
3282 }
3283 }
3284
3285 need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
3286
3287 if (need_rehook) {
3288 dev_hold(dev);
3289 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
3290 rcu_read_unlock();
3291 /* prevents packet_notifier() from calling
3292 * register_prot_hook()
3293 */
3294 WRITE_ONCE(po->num, 0);
3295 __unregister_prot_hook(sk, true);
3296 rcu_read_lock();
3297 if (dev)
3298 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3299 dev->ifindex);
3300 }
3301
3302 BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING));
3303 WRITE_ONCE(po->num, proto);
3304 po->prot_hook.type = proto;
3305
3306 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3307
3308 if (unlikely(unlisted)) {
3309 po->prot_hook.dev = NULL;
3310 WRITE_ONCE(po->ifindex, -1);
3311 packet_cached_dev_reset(po);
3312 } else {
3313 netdev_hold(dev, &po->prot_hook.dev_tracker,
3314 GFP_ATOMIC);
3315 po->prot_hook.dev = dev;
3316 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3317 packet_cached_dev_assign(po, dev);
3318 }
3319 dev_put(dev);
3320 }
3321
3322 if (proto == 0 || !need_rehook)
3323 goto out_unlock;
3324
3325 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3326 register_prot_hook(sk);
3327 } else {
3328 sk->sk_err = ENETDOWN;
3329 if (!sock_flag(sk, SOCK_DEAD))
3330 sk_error_report(sk);
3331 }
3332
3333 out_unlock:
3334 rcu_read_unlock();
3335 spin_unlock(&po->bind_lock);
3336 release_sock(sk);
3337 return ret;
3338 }
3339
3340 /*
3341 * Bind a packet socket to a device
3342 */
3343
packet_bind_spkt(struct socket * sock,struct sockaddr * uaddr,int addr_len)3344 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3345 int addr_len)
3346 {
3347 struct sock *sk = sock->sk;
3348 char name[sizeof(uaddr->sa_data_min) + 1];
3349
3350 /*
3351 * Check legality
3352 */
3353
3354 if (addr_len != sizeof(struct sockaddr))
3355 return -EINVAL;
3356 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3357 * zero-terminated.
3358 */
3359 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
3360 name[sizeof(uaddr->sa_data_min)] = 0;
3361
3362 return packet_do_bind(sk, name, 0, 0);
3363 }
3364
packet_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)3365 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3366 {
3367 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3368 struct sock *sk = sock->sk;
3369
3370 /*
3371 * Check legality
3372 */
3373
3374 if (addr_len < sizeof(struct sockaddr_ll))
3375 return -EINVAL;
3376 if (sll->sll_family != AF_PACKET)
3377 return -EINVAL;
3378
3379 return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
3380 }
3381
3382 static struct proto packet_proto = {
3383 .name = "PACKET",
3384 .owner = THIS_MODULE,
3385 .obj_size = sizeof(struct packet_sock),
3386 };
3387
3388 /*
3389 * Create a packet of type SOCK_PACKET.
3390 */
3391
packet_create(struct net * net,struct socket * sock,int protocol,int kern)3392 static int packet_create(struct net *net, struct socket *sock, int protocol,
3393 int kern)
3394 {
3395 struct sock *sk;
3396 struct packet_sock *po;
3397 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3398 int err;
3399
3400 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3401 return -EPERM;
3402 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3403 sock->type != SOCK_PACKET)
3404 return -ESOCKTNOSUPPORT;
3405
3406 sock->state = SS_UNCONNECTED;
3407
3408 err = -ENOBUFS;
3409 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3410 if (sk == NULL)
3411 goto out;
3412
3413 sock->ops = &packet_ops;
3414 if (sock->type == SOCK_PACKET)
3415 sock->ops = &packet_ops_spkt;
3416
3417 po = pkt_sk(sk);
3418 err = packet_alloc_pending(po);
3419 if (err)
3420 goto out_sk_free;
3421
3422 sock_init_data(sock, sk);
3423
3424 init_completion(&po->skb_completion);
3425 sk->sk_family = PF_PACKET;
3426 po->num = proto;
3427
3428 packet_cached_dev_reset(po);
3429
3430 sk->sk_destruct = packet_sock_destruct;
3431
3432 /*
3433 * Attach a protocol block
3434 */
3435
3436 spin_lock_init(&po->bind_lock);
3437 mutex_init(&po->pg_vec_lock);
3438 po->rollover = NULL;
3439 po->prot_hook.func = packet_rcv;
3440
3441 if (sock->type == SOCK_PACKET)
3442 po->prot_hook.func = packet_rcv_spkt;
3443
3444 po->prot_hook.af_packet_priv = sk;
3445 po->prot_hook.af_packet_net = sock_net(sk);
3446
3447 if (proto) {
3448 po->prot_hook.type = proto;
3449 __register_prot_hook(sk);
3450 }
3451
3452 mutex_lock(&net->packet.sklist_lock);
3453 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3454 mutex_unlock(&net->packet.sklist_lock);
3455
3456 sock_prot_inuse_add(net, &packet_proto, 1);
3457
3458 return 0;
3459 out_sk_free:
3460 sk_free(sk);
3461 out:
3462 return err;
3463 }
3464
3465 /*
3466 * Pull a packet from our receive queue and hand it to the user.
3467 * If necessary we block.
3468 */
3469
packet_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)3470 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3471 int flags)
3472 {
3473 struct sock *sk = sock->sk;
3474 struct sk_buff *skb;
3475 int copied, err;
3476 int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz);
3477 unsigned int origlen = 0;
3478
3479 err = -EINVAL;
3480 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3481 goto out;
3482
3483 #if 0
3484 /* What error should we return now? EUNATTACH? */
3485 if (pkt_sk(sk)->ifindex < 0)
3486 return -ENODEV;
3487 #endif
3488
3489 if (flags & MSG_ERRQUEUE) {
3490 err = sock_recv_errqueue(sk, msg, len,
3491 SOL_PACKET, PACKET_TX_TIMESTAMP);
3492 goto out;
3493 }
3494
3495 /*
3496 * Call the generic datagram receiver. This handles all sorts
3497 * of horrible races and re-entrancy so we can forget about it
3498 * in the protocol layers.
3499 *
3500 * Now it will return ENETDOWN, if device have just gone down,
3501 * but then it will block.
3502 */
3503
3504 skb = skb_recv_datagram(sk, flags, &err);
3505
3506 /*
3507 * An error occurred so return it. Because skb_recv_datagram()
3508 * handles the blocking we don't see and worry about blocking
3509 * retries.
3510 */
3511
3512 if (skb == NULL)
3513 goto out;
3514
3515 packet_rcv_try_clear_pressure(pkt_sk(sk));
3516
3517 if (vnet_hdr_len) {
3518 err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
3519 if (err)
3520 goto out_free;
3521 }
3522
3523 /* You lose any data beyond the buffer you gave. If it worries
3524 * a user program they can ask the device for its MTU
3525 * anyway.
3526 */
3527 copied = skb->len;
3528 if (copied > len) {
3529 copied = len;
3530 msg->msg_flags |= MSG_TRUNC;
3531 }
3532
3533 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3534 if (err)
3535 goto out_free;
3536
3537 if (sock->type != SOCK_PACKET) {
3538 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3539
3540 /* Original length was stored in sockaddr_ll fields */
3541 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3542 sll->sll_family = AF_PACKET;
3543 sll->sll_protocol = (sock->type == SOCK_DGRAM) ?
3544 vlan_get_protocol_dgram(skb) : skb->protocol;
3545 }
3546
3547 sock_recv_cmsgs(msg, sk, skb);
3548
3549 if (msg->msg_name) {
3550 const size_t max_len = min(sizeof(skb->cb),
3551 sizeof(struct sockaddr_storage));
3552 int copy_len;
3553
3554 /* If the address length field is there to be filled
3555 * in, we fill it in now.
3556 */
3557 if (sock->type == SOCK_PACKET) {
3558 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3559 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3560 copy_len = msg->msg_namelen;
3561 } else {
3562 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3563
3564 msg->msg_namelen = sll->sll_halen +
3565 offsetof(struct sockaddr_ll, sll_addr);
3566 copy_len = msg->msg_namelen;
3567 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3568 memset(msg->msg_name +
3569 offsetof(struct sockaddr_ll, sll_addr),
3570 0, sizeof(sll->sll_addr));
3571 msg->msg_namelen = sizeof(struct sockaddr_ll);
3572 }
3573 }
3574 if (WARN_ON_ONCE(copy_len > max_len)) {
3575 copy_len = max_len;
3576 msg->msg_namelen = copy_len;
3577 }
3578 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3579 }
3580
3581 if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
3582 struct tpacket_auxdata aux;
3583
3584 aux.tp_status = TP_STATUS_USER;
3585 if (skb->ip_summed == CHECKSUM_PARTIAL)
3586 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3587 else if (skb->pkt_type != PACKET_OUTGOING &&
3588 skb_csum_unnecessary(skb))
3589 aux.tp_status |= TP_STATUS_CSUM_VALID;
3590 if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
3591 aux.tp_status |= TP_STATUS_GSO_TCP;
3592
3593 aux.tp_len = origlen;
3594 aux.tp_snaplen = skb->len;
3595 aux.tp_mac = 0;
3596 aux.tp_net = skb_network_offset(skb);
3597 if (skb_vlan_tag_present(skb)) {
3598 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3599 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3600 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3601 } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
3602 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3603 struct net_device *dev;
3604
3605 rcu_read_lock();
3606 dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex);
3607 if (dev) {
3608 aux.tp_vlan_tci = vlan_get_tci(skb, dev);
3609 aux.tp_vlan_tpid = ntohs(skb->protocol);
3610 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3611 } else {
3612 aux.tp_vlan_tci = 0;
3613 aux.tp_vlan_tpid = 0;
3614 }
3615 rcu_read_unlock();
3616 } else {
3617 aux.tp_vlan_tci = 0;
3618 aux.tp_vlan_tpid = 0;
3619 }
3620 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3621 }
3622
3623 /*
3624 * Free or return the buffer as appropriate. Again this
3625 * hides all the races and re-entrancy issues from us.
3626 */
3627 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3628
3629 out_free:
3630 skb_free_datagram(sk, skb);
3631 out:
3632 return err;
3633 }
3634
packet_getname_spkt(struct socket * sock,struct sockaddr * uaddr,int peer)3635 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3636 int peer)
3637 {
3638 struct net_device *dev;
3639 struct sock *sk = sock->sk;
3640
3641 if (peer)
3642 return -EOPNOTSUPP;
3643
3644 uaddr->sa_family = AF_PACKET;
3645 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
3646 rcu_read_lock();
3647 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3648 if (dev)
3649 strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
3650 rcu_read_unlock();
3651
3652 return sizeof(*uaddr);
3653 }
3654
packet_getname(struct socket * sock,struct sockaddr * uaddr,int peer)3655 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3656 int peer)
3657 {
3658 struct net_device *dev;
3659 struct sock *sk = sock->sk;
3660 struct packet_sock *po = pkt_sk(sk);
3661 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3662 int ifindex;
3663
3664 if (peer)
3665 return -EOPNOTSUPP;
3666
3667 ifindex = READ_ONCE(po->ifindex);
3668 sll->sll_family = AF_PACKET;
3669 sll->sll_ifindex = ifindex;
3670 sll->sll_protocol = READ_ONCE(po->num);
3671 sll->sll_pkttype = 0;
3672 rcu_read_lock();
3673 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3674 if (dev) {
3675 sll->sll_hatype = dev->type;
3676 sll->sll_halen = dev->addr_len;
3677
3678 /* Let __fortify_memcpy_chk() know the actual buffer size. */
3679 memcpy(((struct sockaddr_storage *)sll)->__data +
3680 offsetof(struct sockaddr_ll, sll_addr) -
3681 offsetofend(struct sockaddr_ll, sll_family),
3682 dev->dev_addr, dev->addr_len);
3683 } else {
3684 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3685 sll->sll_halen = 0;
3686 }
3687 rcu_read_unlock();
3688
3689 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3690 }
3691
packet_dev_mc(struct net_device * dev,struct packet_mclist * i,int what)3692 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3693 int what)
3694 {
3695 switch (i->type) {
3696 case PACKET_MR_MULTICAST:
3697 if (i->alen != dev->addr_len)
3698 return -EINVAL;
3699 if (what > 0)
3700 return dev_mc_add(dev, i->addr);
3701 else
3702 return dev_mc_del(dev, i->addr);
3703 break;
3704 case PACKET_MR_PROMISC:
3705 return dev_set_promiscuity(dev, what);
3706 case PACKET_MR_ALLMULTI:
3707 return dev_set_allmulti(dev, what);
3708 case PACKET_MR_UNICAST:
3709 if (i->alen != dev->addr_len)
3710 return -EINVAL;
3711 if (what > 0)
3712 return dev_uc_add(dev, i->addr);
3713 else
3714 return dev_uc_del(dev, i->addr);
3715 break;
3716 default:
3717 break;
3718 }
3719 return 0;
3720 }
3721
packet_dev_mclist_delete(struct net_device * dev,struct packet_mclist ** mlp)3722 static void packet_dev_mclist_delete(struct net_device *dev,
3723 struct packet_mclist **mlp)
3724 {
3725 struct packet_mclist *ml;
3726
3727 while ((ml = *mlp) != NULL) {
3728 if (ml->ifindex == dev->ifindex) {
3729 packet_dev_mc(dev, ml, -1);
3730 *mlp = ml->next;
3731 kfree(ml);
3732 } else
3733 mlp = &ml->next;
3734 }
3735 }
3736
packet_mc_add(struct sock * sk,struct packet_mreq_max * mreq)3737 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3738 {
3739 struct packet_sock *po = pkt_sk(sk);
3740 struct packet_mclist *ml, *i;
3741 struct net_device *dev;
3742 int err;
3743
3744 rtnl_lock();
3745
3746 err = -ENODEV;
3747 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3748 if (!dev)
3749 goto done;
3750
3751 err = -EINVAL;
3752 if (mreq->mr_alen > dev->addr_len)
3753 goto done;
3754
3755 err = -ENOBUFS;
3756 i = kmalloc(sizeof(*i), GFP_KERNEL);
3757 if (i == NULL)
3758 goto done;
3759
3760 err = 0;
3761 for (ml = po->mclist; ml; ml = ml->next) {
3762 if (ml->ifindex == mreq->mr_ifindex &&
3763 ml->type == mreq->mr_type &&
3764 ml->alen == mreq->mr_alen &&
3765 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3766 ml->count++;
3767 /* Free the new element ... */
3768 kfree(i);
3769 goto done;
3770 }
3771 }
3772
3773 i->type = mreq->mr_type;
3774 i->ifindex = mreq->mr_ifindex;
3775 i->alen = mreq->mr_alen;
3776 memcpy(i->addr, mreq->mr_address, i->alen);
3777 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3778 i->count = 1;
3779 i->next = po->mclist;
3780 po->mclist = i;
3781 err = packet_dev_mc(dev, i, 1);
3782 if (err) {
3783 po->mclist = i->next;
3784 kfree(i);
3785 }
3786
3787 done:
3788 rtnl_unlock();
3789 return err;
3790 }
3791
packet_mc_drop(struct sock * sk,struct packet_mreq_max * mreq)3792 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3793 {
3794 struct packet_mclist *ml, **mlp;
3795
3796 rtnl_lock();
3797
3798 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3799 if (ml->ifindex == mreq->mr_ifindex &&
3800 ml->type == mreq->mr_type &&
3801 ml->alen == mreq->mr_alen &&
3802 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3803 if (--ml->count == 0) {
3804 struct net_device *dev;
3805 *mlp = ml->next;
3806 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3807 if (dev)
3808 packet_dev_mc(dev, ml, -1);
3809 kfree(ml);
3810 }
3811 break;
3812 }
3813 }
3814 rtnl_unlock();
3815 return 0;
3816 }
3817
packet_flush_mclist(struct sock * sk)3818 static void packet_flush_mclist(struct sock *sk)
3819 {
3820 struct packet_sock *po = pkt_sk(sk);
3821 struct packet_mclist *ml;
3822
3823 if (!po->mclist)
3824 return;
3825
3826 rtnl_lock();
3827 while ((ml = po->mclist) != NULL) {
3828 struct net_device *dev;
3829
3830 po->mclist = ml->next;
3831 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3832 if (dev != NULL)
3833 packet_dev_mc(dev, ml, -1);
3834 kfree(ml);
3835 }
3836 rtnl_unlock();
3837 }
3838
3839 static int
packet_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)3840 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3841 unsigned int optlen)
3842 {
3843 struct sock *sk = sock->sk;
3844 struct packet_sock *po = pkt_sk(sk);
3845 int ret;
3846
3847 if (level != SOL_PACKET)
3848 return -ENOPROTOOPT;
3849
3850 switch (optname) {
3851 case PACKET_ADD_MEMBERSHIP:
3852 case PACKET_DROP_MEMBERSHIP:
3853 {
3854 struct packet_mreq_max mreq;
3855 int len = optlen;
3856 memset(&mreq, 0, sizeof(mreq));
3857 if (len < sizeof(struct packet_mreq))
3858 return -EINVAL;
3859 if (len > sizeof(mreq))
3860 len = sizeof(mreq);
3861 if (copy_from_sockptr(&mreq, optval, len))
3862 return -EFAULT;
3863 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3864 return -EINVAL;
3865 if (optname == PACKET_ADD_MEMBERSHIP)
3866 ret = packet_mc_add(sk, &mreq);
3867 else
3868 ret = packet_mc_drop(sk, &mreq);
3869 return ret;
3870 }
3871
3872 case PACKET_RX_RING:
3873 case PACKET_TX_RING:
3874 {
3875 union tpacket_req_u req_u;
3876
3877 ret = -EINVAL;
3878 lock_sock(sk);
3879 switch (po->tp_version) {
3880 case TPACKET_V1:
3881 case TPACKET_V2:
3882 if (optlen < sizeof(req_u.req))
3883 break;
3884 ret = copy_from_sockptr(&req_u.req, optval,
3885 sizeof(req_u.req)) ?
3886 -EINVAL : 0;
3887 break;
3888 case TPACKET_V3:
3889 default:
3890 if (optlen < sizeof(req_u.req3))
3891 break;
3892 ret = copy_from_sockptr(&req_u.req3, optval,
3893 sizeof(req_u.req3)) ?
3894 -EINVAL : 0;
3895 break;
3896 }
3897 if (!ret)
3898 ret = packet_set_ring(sk, &req_u, 0,
3899 optname == PACKET_TX_RING);
3900 release_sock(sk);
3901 return ret;
3902 }
3903 case PACKET_COPY_THRESH:
3904 {
3905 int val;
3906
3907 if (optlen != sizeof(val))
3908 return -EINVAL;
3909 if (copy_from_sockptr(&val, optval, sizeof(val)))
3910 return -EFAULT;
3911
3912 pkt_sk(sk)->copy_thresh = val;
3913 return 0;
3914 }
3915 case PACKET_VERSION:
3916 {
3917 int val;
3918
3919 if (optlen != sizeof(val))
3920 return -EINVAL;
3921 if (copy_from_sockptr(&val, optval, sizeof(val)))
3922 return -EFAULT;
3923 switch (val) {
3924 case TPACKET_V1:
3925 case TPACKET_V2:
3926 case TPACKET_V3:
3927 break;
3928 default:
3929 return -EINVAL;
3930 }
3931 lock_sock(sk);
3932 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3933 ret = -EBUSY;
3934 } else {
3935 po->tp_version = val;
3936 ret = 0;
3937 }
3938 release_sock(sk);
3939 return ret;
3940 }
3941 case PACKET_RESERVE:
3942 {
3943 unsigned int val;
3944
3945 if (optlen != sizeof(val))
3946 return -EINVAL;
3947 if (copy_from_sockptr(&val, optval, sizeof(val)))
3948 return -EFAULT;
3949 if (val > INT_MAX)
3950 return -EINVAL;
3951 lock_sock(sk);
3952 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3953 ret = -EBUSY;
3954 } else {
3955 po->tp_reserve = val;
3956 ret = 0;
3957 }
3958 release_sock(sk);
3959 return ret;
3960 }
3961 case PACKET_LOSS:
3962 {
3963 unsigned int val;
3964
3965 if (optlen != sizeof(val))
3966 return -EINVAL;
3967 if (copy_from_sockptr(&val, optval, sizeof(val)))
3968 return -EFAULT;
3969
3970 lock_sock(sk);
3971 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3972 ret = -EBUSY;
3973 } else {
3974 packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
3975 ret = 0;
3976 }
3977 release_sock(sk);
3978 return ret;
3979 }
3980 case PACKET_AUXDATA:
3981 {
3982 int val;
3983
3984 if (optlen < sizeof(val))
3985 return -EINVAL;
3986 if (copy_from_sockptr(&val, optval, sizeof(val)))
3987 return -EFAULT;
3988
3989 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
3990 return 0;
3991 }
3992 case PACKET_ORIGDEV:
3993 {
3994 int val;
3995
3996 if (optlen < sizeof(val))
3997 return -EINVAL;
3998 if (copy_from_sockptr(&val, optval, sizeof(val)))
3999 return -EFAULT;
4000
4001 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
4002 return 0;
4003 }
4004 case PACKET_VNET_HDR:
4005 case PACKET_VNET_HDR_SZ:
4006 {
4007 int val, hdr_len;
4008
4009 if (sock->type != SOCK_RAW)
4010 return -EINVAL;
4011 if (optlen < sizeof(val))
4012 return -EINVAL;
4013 if (copy_from_sockptr(&val, optval, sizeof(val)))
4014 return -EFAULT;
4015
4016 if (optname == PACKET_VNET_HDR_SZ) {
4017 if (val && val != sizeof(struct virtio_net_hdr) &&
4018 val != sizeof(struct virtio_net_hdr_mrg_rxbuf))
4019 return -EINVAL;
4020 hdr_len = val;
4021 } else {
4022 hdr_len = val ? sizeof(struct virtio_net_hdr) : 0;
4023 }
4024 lock_sock(sk);
4025 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
4026 ret = -EBUSY;
4027 } else {
4028 WRITE_ONCE(po->vnet_hdr_sz, hdr_len);
4029 ret = 0;
4030 }
4031 release_sock(sk);
4032 return ret;
4033 }
4034 case PACKET_TIMESTAMP:
4035 {
4036 int val;
4037
4038 if (optlen != sizeof(val))
4039 return -EINVAL;
4040 if (copy_from_sockptr(&val, optval, sizeof(val)))
4041 return -EFAULT;
4042
4043 WRITE_ONCE(po->tp_tstamp, val);
4044 return 0;
4045 }
4046 case PACKET_FANOUT:
4047 {
4048 struct fanout_args args = { 0 };
4049
4050 if (optlen != sizeof(int) && optlen != sizeof(args))
4051 return -EINVAL;
4052 if (copy_from_sockptr(&args, optval, optlen))
4053 return -EFAULT;
4054
4055 return fanout_add(sk, &args);
4056 }
4057 case PACKET_FANOUT_DATA:
4058 {
4059 /* Paired with the WRITE_ONCE() in fanout_add() */
4060 if (!READ_ONCE(po->fanout))
4061 return -EINVAL;
4062
4063 return fanout_set_data(po, optval, optlen);
4064 }
4065 case PACKET_IGNORE_OUTGOING:
4066 {
4067 int val;
4068
4069 if (optlen != sizeof(val))
4070 return -EINVAL;
4071 if (copy_from_sockptr(&val, optval, sizeof(val)))
4072 return -EFAULT;
4073 if (val < 0 || val > 1)
4074 return -EINVAL;
4075
4076 WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
4077 return 0;
4078 }
4079 case PACKET_TX_HAS_OFF:
4080 {
4081 unsigned int val;
4082
4083 if (optlen != sizeof(val))
4084 return -EINVAL;
4085 if (copy_from_sockptr(&val, optval, sizeof(val)))
4086 return -EFAULT;
4087
4088 lock_sock(sk);
4089 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
4090 packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
4091
4092 release_sock(sk);
4093 return 0;
4094 }
4095 case PACKET_QDISC_BYPASS:
4096 {
4097 int val;
4098
4099 if (optlen != sizeof(val))
4100 return -EINVAL;
4101 if (copy_from_sockptr(&val, optval, sizeof(val)))
4102 return -EFAULT;
4103
4104 packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val);
4105 return 0;
4106 }
4107 default:
4108 return -ENOPROTOOPT;
4109 }
4110 }
4111
packet_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)4112 static int packet_getsockopt(struct socket *sock, int level, int optname,
4113 char __user *optval, int __user *optlen)
4114 {
4115 int len;
4116 int val, lv = sizeof(val);
4117 struct sock *sk = sock->sk;
4118 struct packet_sock *po = pkt_sk(sk);
4119 void *data = &val;
4120 union tpacket_stats_u st;
4121 struct tpacket_rollover_stats rstats;
4122 int drops;
4123
4124 if (level != SOL_PACKET)
4125 return -ENOPROTOOPT;
4126
4127 if (get_user(len, optlen))
4128 return -EFAULT;
4129
4130 if (len < 0)
4131 return -EINVAL;
4132
4133 switch (optname) {
4134 case PACKET_STATISTICS:
4135 spin_lock_bh(&sk->sk_receive_queue.lock);
4136 memcpy(&st, &po->stats, sizeof(st));
4137 memset(&po->stats, 0, sizeof(po->stats));
4138 spin_unlock_bh(&sk->sk_receive_queue.lock);
4139 drops = atomic_xchg(&po->tp_drops, 0);
4140
4141 if (po->tp_version == TPACKET_V3) {
4142 lv = sizeof(struct tpacket_stats_v3);
4143 st.stats3.tp_drops = drops;
4144 st.stats3.tp_packets += drops;
4145 data = &st.stats3;
4146 } else {
4147 lv = sizeof(struct tpacket_stats);
4148 st.stats1.tp_drops = drops;
4149 st.stats1.tp_packets += drops;
4150 data = &st.stats1;
4151 }
4152
4153 break;
4154 case PACKET_AUXDATA:
4155 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4156 break;
4157 case PACKET_ORIGDEV:
4158 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4159 break;
4160 case PACKET_VNET_HDR:
4161 val = !!READ_ONCE(po->vnet_hdr_sz);
4162 break;
4163 case PACKET_VNET_HDR_SZ:
4164 val = READ_ONCE(po->vnet_hdr_sz);
4165 break;
4166 case PACKET_VERSION:
4167 val = po->tp_version;
4168 break;
4169 case PACKET_HDRLEN:
4170 if (len > sizeof(int))
4171 len = sizeof(int);
4172 if (len < sizeof(int))
4173 return -EINVAL;
4174 if (copy_from_user(&val, optval, len))
4175 return -EFAULT;
4176 switch (val) {
4177 case TPACKET_V1:
4178 val = sizeof(struct tpacket_hdr);
4179 break;
4180 case TPACKET_V2:
4181 val = sizeof(struct tpacket2_hdr);
4182 break;
4183 case TPACKET_V3:
4184 val = sizeof(struct tpacket3_hdr);
4185 break;
4186 default:
4187 return -EINVAL;
4188 }
4189 break;
4190 case PACKET_RESERVE:
4191 val = po->tp_reserve;
4192 break;
4193 case PACKET_LOSS:
4194 val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
4195 break;
4196 case PACKET_TIMESTAMP:
4197 val = READ_ONCE(po->tp_tstamp);
4198 break;
4199 case PACKET_FANOUT:
4200 val = (po->fanout ?
4201 ((u32)po->fanout->id |
4202 ((u32)po->fanout->type << 16) |
4203 ((u32)po->fanout->flags << 24)) :
4204 0);
4205 break;
4206 case PACKET_IGNORE_OUTGOING:
4207 val = READ_ONCE(po->prot_hook.ignore_outgoing);
4208 break;
4209 case PACKET_ROLLOVER_STATS:
4210 if (!po->rollover)
4211 return -EINVAL;
4212 rstats.tp_all = atomic_long_read(&po->rollover->num);
4213 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4214 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4215 data = &rstats;
4216 lv = sizeof(rstats);
4217 break;
4218 case PACKET_TX_HAS_OFF:
4219 val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
4220 break;
4221 case PACKET_QDISC_BYPASS:
4222 val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS);
4223 break;
4224 default:
4225 return -ENOPROTOOPT;
4226 }
4227
4228 if (len > lv)
4229 len = lv;
4230 if (put_user(len, optlen))
4231 return -EFAULT;
4232 if (copy_to_user(optval, data, len))
4233 return -EFAULT;
4234 return 0;
4235 }
4236
packet_notifier(struct notifier_block * this,unsigned long msg,void * ptr)4237 static int packet_notifier(struct notifier_block *this,
4238 unsigned long msg, void *ptr)
4239 {
4240 struct sock *sk;
4241 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4242 struct net *net = dev_net(dev);
4243
4244 rcu_read_lock();
4245 sk_for_each_rcu(sk, &net->packet.sklist) {
4246 struct packet_sock *po = pkt_sk(sk);
4247
4248 switch (msg) {
4249 case NETDEV_UNREGISTER:
4250 if (po->mclist)
4251 packet_dev_mclist_delete(dev, &po->mclist);
4252 fallthrough;
4253
4254 case NETDEV_DOWN:
4255 if (dev->ifindex == po->ifindex) {
4256 spin_lock(&po->bind_lock);
4257 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
4258 __unregister_prot_hook(sk, false);
4259 sk->sk_err = ENETDOWN;
4260 if (!sock_flag(sk, SOCK_DEAD))
4261 sk_error_report(sk);
4262 }
4263 if (msg == NETDEV_UNREGISTER) {
4264 packet_cached_dev_reset(po);
4265 WRITE_ONCE(po->ifindex, -1);
4266 netdev_put(po->prot_hook.dev,
4267 &po->prot_hook.dev_tracker);
4268 po->prot_hook.dev = NULL;
4269 }
4270 spin_unlock(&po->bind_lock);
4271 }
4272 break;
4273 case NETDEV_UP:
4274 if (dev->ifindex == po->ifindex) {
4275 spin_lock(&po->bind_lock);
4276 if (po->num)
4277 register_prot_hook(sk);
4278 spin_unlock(&po->bind_lock);
4279 }
4280 break;
4281 }
4282 }
4283 rcu_read_unlock();
4284 return NOTIFY_DONE;
4285 }
4286
4287
packet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)4288 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4289 unsigned long arg)
4290 {
4291 struct sock *sk = sock->sk;
4292
4293 switch (cmd) {
4294 case SIOCOUTQ:
4295 {
4296 int amount = sk_wmem_alloc_get(sk);
4297
4298 return put_user(amount, (int __user *)arg);
4299 }
4300 case SIOCINQ:
4301 {
4302 struct sk_buff *skb;
4303 int amount = 0;
4304
4305 spin_lock_bh(&sk->sk_receive_queue.lock);
4306 skb = skb_peek(&sk->sk_receive_queue);
4307 if (skb)
4308 amount = skb->len;
4309 spin_unlock_bh(&sk->sk_receive_queue.lock);
4310 return put_user(amount, (int __user *)arg);
4311 }
4312 #ifdef CONFIG_INET
4313 case SIOCADDRT:
4314 case SIOCDELRT:
4315 case SIOCDARP:
4316 case SIOCGARP:
4317 case SIOCSARP:
4318 case SIOCGIFADDR:
4319 case SIOCSIFADDR:
4320 case SIOCGIFBRDADDR:
4321 case SIOCSIFBRDADDR:
4322 case SIOCGIFNETMASK:
4323 case SIOCSIFNETMASK:
4324 case SIOCGIFDSTADDR:
4325 case SIOCSIFDSTADDR:
4326 case SIOCSIFFLAGS:
4327 return inet_dgram_ops.ioctl(sock, cmd, arg);
4328 #endif
4329
4330 default:
4331 return -ENOIOCTLCMD;
4332 }
4333 return 0;
4334 }
4335
packet_poll(struct file * file,struct socket * sock,poll_table * wait)4336 static __poll_t packet_poll(struct file *file, struct socket *sock,
4337 poll_table *wait)
4338 {
4339 struct sock *sk = sock->sk;
4340 struct packet_sock *po = pkt_sk(sk);
4341 __poll_t mask = datagram_poll(file, sock, wait);
4342
4343 spin_lock_bh(&sk->sk_receive_queue.lock);
4344 if (po->rx_ring.pg_vec) {
4345 if (!packet_previous_rx_frame(po, &po->rx_ring,
4346 TP_STATUS_KERNEL))
4347 mask |= EPOLLIN | EPOLLRDNORM;
4348 }
4349 packet_rcv_try_clear_pressure(po);
4350 spin_unlock_bh(&sk->sk_receive_queue.lock);
4351 spin_lock_bh(&sk->sk_write_queue.lock);
4352 if (po->tx_ring.pg_vec) {
4353 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4354 mask |= EPOLLOUT | EPOLLWRNORM;
4355 }
4356 spin_unlock_bh(&sk->sk_write_queue.lock);
4357 return mask;
4358 }
4359
4360
4361 /* Dirty? Well, I still did not learn better way to account
4362 * for user mmaps.
4363 */
4364
packet_mm_open(struct vm_area_struct * vma)4365 static void packet_mm_open(struct vm_area_struct *vma)
4366 {
4367 struct file *file = vma->vm_file;
4368 struct socket *sock = file->private_data;
4369 struct sock *sk = sock->sk;
4370
4371 if (sk)
4372 atomic_long_inc(&pkt_sk(sk)->mapped);
4373 }
4374
packet_mm_close(struct vm_area_struct * vma)4375 static void packet_mm_close(struct vm_area_struct *vma)
4376 {
4377 struct file *file = vma->vm_file;
4378 struct socket *sock = file->private_data;
4379 struct sock *sk = sock->sk;
4380
4381 if (sk)
4382 atomic_long_dec(&pkt_sk(sk)->mapped);
4383 }
4384
4385 static const struct vm_operations_struct packet_mmap_ops = {
4386 .open = packet_mm_open,
4387 .close = packet_mm_close,
4388 };
4389
free_pg_vec(struct pgv * pg_vec,unsigned int order,unsigned int len)4390 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4391 unsigned int len)
4392 {
4393 int i;
4394
4395 for (i = 0; i < len; i++) {
4396 if (likely(pg_vec[i].buffer)) {
4397 if (is_vmalloc_addr(pg_vec[i].buffer))
4398 vfree(pg_vec[i].buffer);
4399 else
4400 free_pages((unsigned long)pg_vec[i].buffer,
4401 order);
4402 pg_vec[i].buffer = NULL;
4403 }
4404 }
4405 kfree(pg_vec);
4406 }
4407
alloc_one_pg_vec_page(unsigned long order)4408 static char *alloc_one_pg_vec_page(unsigned long order)
4409 {
4410 char *buffer;
4411 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4412 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4413
4414 buffer = (char *) __get_free_pages(gfp_flags, order);
4415 if (buffer)
4416 return buffer;
4417
4418 /* __get_free_pages failed, fall back to vmalloc */
4419 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4420 if (buffer)
4421 return buffer;
4422
4423 /* vmalloc failed, lets dig into swap here */
4424 gfp_flags &= ~__GFP_NORETRY;
4425 buffer = (char *) __get_free_pages(gfp_flags, order);
4426 if (buffer)
4427 return buffer;
4428
4429 /* complete and utter failure */
4430 return NULL;
4431 }
4432
alloc_pg_vec(struct tpacket_req * req,int order)4433 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4434 {
4435 unsigned int block_nr = req->tp_block_nr;
4436 struct pgv *pg_vec;
4437 int i;
4438
4439 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4440 if (unlikely(!pg_vec))
4441 goto out;
4442
4443 for (i = 0; i < block_nr; i++) {
4444 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4445 if (unlikely(!pg_vec[i].buffer))
4446 goto out_free_pgvec;
4447 }
4448
4449 out:
4450 return pg_vec;
4451
4452 out_free_pgvec:
4453 free_pg_vec(pg_vec, order, block_nr);
4454 pg_vec = NULL;
4455 goto out;
4456 }
4457
packet_set_ring(struct sock * sk,union tpacket_req_u * req_u,int closing,int tx_ring)4458 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4459 int closing, int tx_ring)
4460 {
4461 struct pgv *pg_vec = NULL;
4462 struct packet_sock *po = pkt_sk(sk);
4463 unsigned long *rx_owner_map = NULL;
4464 int was_running, order = 0;
4465 struct packet_ring_buffer *rb;
4466 struct sk_buff_head *rb_queue;
4467 __be16 num;
4468 int err;
4469 /* Added to avoid minimal code churn */
4470 struct tpacket_req *req = &req_u->req;
4471
4472 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4473 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4474
4475 err = -EBUSY;
4476 if (!closing) {
4477 if (atomic_long_read(&po->mapped))
4478 goto out;
4479 if (packet_read_pending(rb))
4480 goto out;
4481 }
4482
4483 if (req->tp_block_nr) {
4484 unsigned int min_frame_size;
4485
4486 /* Sanity tests and some calculations */
4487 err = -EBUSY;
4488 if (unlikely(rb->pg_vec))
4489 goto out;
4490
4491 switch (po->tp_version) {
4492 case TPACKET_V1:
4493 po->tp_hdrlen = TPACKET_HDRLEN;
4494 break;
4495 case TPACKET_V2:
4496 po->tp_hdrlen = TPACKET2_HDRLEN;
4497 break;
4498 case TPACKET_V3:
4499 po->tp_hdrlen = TPACKET3_HDRLEN;
4500 break;
4501 }
4502
4503 err = -EINVAL;
4504 if (unlikely((int)req->tp_block_size <= 0))
4505 goto out;
4506 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4507 goto out;
4508 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4509 if (po->tp_version >= TPACKET_V3 &&
4510 req->tp_block_size <
4511 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4512 goto out;
4513 if (unlikely(req->tp_frame_size < min_frame_size))
4514 goto out;
4515 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4516 goto out;
4517
4518 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4519 if (unlikely(rb->frames_per_block == 0))
4520 goto out;
4521 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4522 goto out;
4523 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4524 req->tp_frame_nr))
4525 goto out;
4526
4527 err = -ENOMEM;
4528 order = get_order(req->tp_block_size);
4529 pg_vec = alloc_pg_vec(req, order);
4530 if (unlikely(!pg_vec))
4531 goto out;
4532 switch (po->tp_version) {
4533 case TPACKET_V3:
4534 /* Block transmit is not supported yet */
4535 if (!tx_ring) {
4536 init_prb_bdqc(po, rb, pg_vec, req_u);
4537 } else {
4538 struct tpacket_req3 *req3 = &req_u->req3;
4539
4540 if (req3->tp_retire_blk_tov ||
4541 req3->tp_sizeof_priv ||
4542 req3->tp_feature_req_word) {
4543 err = -EINVAL;
4544 goto out_free_pg_vec;
4545 }
4546 }
4547 break;
4548 default:
4549 if (!tx_ring) {
4550 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4551 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4552 if (!rx_owner_map)
4553 goto out_free_pg_vec;
4554 }
4555 break;
4556 }
4557 }
4558 /* Done */
4559 else {
4560 err = -EINVAL;
4561 if (unlikely(req->tp_frame_nr))
4562 goto out;
4563 }
4564
4565
4566 /* Detach socket from network */
4567 spin_lock(&po->bind_lock);
4568 was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
4569 num = po->num;
4570 if (was_running) {
4571 WRITE_ONCE(po->num, 0);
4572 __unregister_prot_hook(sk, false);
4573 }
4574 spin_unlock(&po->bind_lock);
4575
4576 synchronize_net();
4577
4578 err = -EBUSY;
4579 mutex_lock(&po->pg_vec_lock);
4580 if (closing || atomic_long_read(&po->mapped) == 0) {
4581 err = 0;
4582 spin_lock_bh(&rb_queue->lock);
4583 swap(rb->pg_vec, pg_vec);
4584 if (po->tp_version <= TPACKET_V2)
4585 swap(rb->rx_owner_map, rx_owner_map);
4586 rb->frame_max = (req->tp_frame_nr - 1);
4587 rb->head = 0;
4588 rb->frame_size = req->tp_frame_size;
4589 spin_unlock_bh(&rb_queue->lock);
4590
4591 swap(rb->pg_vec_order, order);
4592 swap(rb->pg_vec_len, req->tp_block_nr);
4593
4594 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4595 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4596 tpacket_rcv : packet_rcv;
4597 skb_queue_purge(rb_queue);
4598 if (atomic_long_read(&po->mapped))
4599 pr_err("packet_mmap: vma is busy: %ld\n",
4600 atomic_long_read(&po->mapped));
4601 }
4602 mutex_unlock(&po->pg_vec_lock);
4603
4604 spin_lock(&po->bind_lock);
4605 if (was_running) {
4606 WRITE_ONCE(po->num, num);
4607 register_prot_hook(sk);
4608 }
4609 spin_unlock(&po->bind_lock);
4610 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4611 /* Because we don't support block-based V3 on tx-ring */
4612 if (!tx_ring)
4613 prb_shutdown_retire_blk_timer(po, rb_queue);
4614 }
4615
4616 out_free_pg_vec:
4617 if (pg_vec) {
4618 bitmap_free(rx_owner_map);
4619 free_pg_vec(pg_vec, order, req->tp_block_nr);
4620 }
4621 out:
4622 return err;
4623 }
4624
packet_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)4625 static int packet_mmap(struct file *file, struct socket *sock,
4626 struct vm_area_struct *vma)
4627 {
4628 struct sock *sk = sock->sk;
4629 struct packet_sock *po = pkt_sk(sk);
4630 unsigned long size, expected_size;
4631 struct packet_ring_buffer *rb;
4632 unsigned long start;
4633 int err = -EINVAL;
4634 int i;
4635
4636 if (vma->vm_pgoff)
4637 return -EINVAL;
4638
4639 mutex_lock(&po->pg_vec_lock);
4640
4641 expected_size = 0;
4642 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4643 if (rb->pg_vec) {
4644 expected_size += rb->pg_vec_len
4645 * rb->pg_vec_pages
4646 * PAGE_SIZE;
4647 }
4648 }
4649
4650 if (expected_size == 0)
4651 goto out;
4652
4653 size = vma->vm_end - vma->vm_start;
4654 if (size != expected_size)
4655 goto out;
4656
4657 start = vma->vm_start;
4658 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4659 if (rb->pg_vec == NULL)
4660 continue;
4661
4662 for (i = 0; i < rb->pg_vec_len; i++) {
4663 struct page *page;
4664 void *kaddr = rb->pg_vec[i].buffer;
4665 int pg_num;
4666
4667 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4668 page = pgv_to_page(kaddr);
4669 err = vm_insert_page(vma, start, page);
4670 if (unlikely(err))
4671 goto out;
4672 start += PAGE_SIZE;
4673 kaddr += PAGE_SIZE;
4674 }
4675 }
4676 }
4677
4678 atomic_long_inc(&po->mapped);
4679 vma->vm_ops = &packet_mmap_ops;
4680 err = 0;
4681
4682 out:
4683 mutex_unlock(&po->pg_vec_lock);
4684 return err;
4685 }
4686
4687 static const struct proto_ops packet_ops_spkt = {
4688 .family = PF_PACKET,
4689 .owner = THIS_MODULE,
4690 .release = packet_release,
4691 .bind = packet_bind_spkt,
4692 .connect = sock_no_connect,
4693 .socketpair = sock_no_socketpair,
4694 .accept = sock_no_accept,
4695 .getname = packet_getname_spkt,
4696 .poll = datagram_poll,
4697 .ioctl = packet_ioctl,
4698 .gettstamp = sock_gettstamp,
4699 .listen = sock_no_listen,
4700 .shutdown = sock_no_shutdown,
4701 .sendmsg = packet_sendmsg_spkt,
4702 .recvmsg = packet_recvmsg,
4703 .mmap = sock_no_mmap,
4704 };
4705
4706 static const struct proto_ops packet_ops = {
4707 .family = PF_PACKET,
4708 .owner = THIS_MODULE,
4709 .release = packet_release,
4710 .bind = packet_bind,
4711 .connect = sock_no_connect,
4712 .socketpair = sock_no_socketpair,
4713 .accept = sock_no_accept,
4714 .getname = packet_getname,
4715 .poll = packet_poll,
4716 .ioctl = packet_ioctl,
4717 .gettstamp = sock_gettstamp,
4718 .listen = sock_no_listen,
4719 .shutdown = sock_no_shutdown,
4720 .setsockopt = packet_setsockopt,
4721 .getsockopt = packet_getsockopt,
4722 .sendmsg = packet_sendmsg,
4723 .recvmsg = packet_recvmsg,
4724 .mmap = packet_mmap,
4725 };
4726
4727 static const struct net_proto_family packet_family_ops = {
4728 .family = PF_PACKET,
4729 .create = packet_create,
4730 .owner = THIS_MODULE,
4731 };
4732
4733 static struct notifier_block packet_netdev_notifier = {
4734 .notifier_call = packet_notifier,
4735 };
4736
4737 #ifdef CONFIG_PROC_FS
4738
packet_seq_start(struct seq_file * seq,loff_t * pos)4739 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4740 __acquires(RCU)
4741 {
4742 struct net *net = seq_file_net(seq);
4743
4744 rcu_read_lock();
4745 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4746 }
4747
packet_seq_next(struct seq_file * seq,void * v,loff_t * pos)4748 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4749 {
4750 struct net *net = seq_file_net(seq);
4751 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4752 }
4753
packet_seq_stop(struct seq_file * seq,void * v)4754 static void packet_seq_stop(struct seq_file *seq, void *v)
4755 __releases(RCU)
4756 {
4757 rcu_read_unlock();
4758 }
4759
packet_seq_show(struct seq_file * seq,void * v)4760 static int packet_seq_show(struct seq_file *seq, void *v)
4761 {
4762 if (v == SEQ_START_TOKEN)
4763 seq_printf(seq,
4764 "%*sRefCnt Type Proto Iface R Rmem User Inode\n",
4765 IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4766 else {
4767 struct sock *s = sk_entry(v);
4768 const struct packet_sock *po = pkt_sk(s);
4769
4770 seq_printf(seq,
4771 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4772 s,
4773 refcount_read(&s->sk_refcnt),
4774 s->sk_type,
4775 ntohs(READ_ONCE(po->num)),
4776 READ_ONCE(po->ifindex),
4777 packet_sock_flag(po, PACKET_SOCK_RUNNING),
4778 atomic_read(&s->sk_rmem_alloc),
4779 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4780 sock_i_ino(s));
4781 }
4782
4783 return 0;
4784 }
4785
4786 static const struct seq_operations packet_seq_ops = {
4787 .start = packet_seq_start,
4788 .next = packet_seq_next,
4789 .stop = packet_seq_stop,
4790 .show = packet_seq_show,
4791 };
4792 #endif
4793
packet_net_init(struct net * net)4794 static int __net_init packet_net_init(struct net *net)
4795 {
4796 mutex_init(&net->packet.sklist_lock);
4797 INIT_HLIST_HEAD(&net->packet.sklist);
4798
4799 #ifdef CONFIG_PROC_FS
4800 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4801 sizeof(struct seq_net_private)))
4802 return -ENOMEM;
4803 #endif /* CONFIG_PROC_FS */
4804
4805 return 0;
4806 }
4807
packet_net_exit(struct net * net)4808 static void __net_exit packet_net_exit(struct net *net)
4809 {
4810 remove_proc_entry("packet", net->proc_net);
4811 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4812 }
4813
4814 static struct pernet_operations packet_net_ops = {
4815 .init = packet_net_init,
4816 .exit = packet_net_exit,
4817 };
4818
4819
packet_exit(void)4820 static void __exit packet_exit(void)
4821 {
4822 sock_unregister(PF_PACKET);
4823 proto_unregister(&packet_proto);
4824 unregister_netdevice_notifier(&packet_netdev_notifier);
4825 unregister_pernet_subsys(&packet_net_ops);
4826 }
4827
packet_init(void)4828 static int __init packet_init(void)
4829 {
4830 int rc;
4831
4832 rc = register_pernet_subsys(&packet_net_ops);
4833 if (rc)
4834 goto out;
4835 rc = register_netdevice_notifier(&packet_netdev_notifier);
4836 if (rc)
4837 goto out_pernet;
4838 rc = proto_register(&packet_proto, 0);
4839 if (rc)
4840 goto out_notifier;
4841 rc = sock_register(&packet_family_ops);
4842 if (rc)
4843 goto out_proto;
4844
4845 return 0;
4846
4847 out_proto:
4848 proto_unregister(&packet_proto);
4849 out_notifier:
4850 unregister_netdevice_notifier(&packet_netdev_notifier);
4851 out_pernet:
4852 unregister_pernet_subsys(&packet_net_ops);
4853 out:
4854 return rc;
4855 }
4856
4857 module_init(packet_init);
4858 module_exit(packet_exit);
4859 MODULE_LICENSE("GPL");
4860 MODULE_ALIAS_NETPROTO(PF_PACKET);
4861