xref: /openbmc/linux/net/packet/internal.h (revision 612a462a)
1 #ifndef __PACKET_INTERNAL_H__
2 #define __PACKET_INTERNAL_H__
3 
4 #include <linux/refcount.h>
5 
6 struct packet_mclist {
7 	struct packet_mclist	*next;
8 	int			ifindex;
9 	int			count;
10 	unsigned short		type;
11 	unsigned short		alen;
12 	unsigned char		addr[MAX_ADDR_LEN];
13 };
14 
15 /* kbdq - kernel block descriptor queue */
16 struct tpacket_kbdq_core {
17 	struct pgv	*pkbdq;
18 	unsigned int	feature_req_word;
19 	unsigned int	hdrlen;
20 	unsigned char	reset_pending_on_curr_blk;
21 	unsigned char   delete_blk_timer;
22 	unsigned short	kactive_blk_num;
23 	unsigned short	blk_sizeof_priv;
24 
25 	/* last_kactive_blk_num:
26 	 * trick to see if user-space has caught up
27 	 * in order to avoid refreshing timer when every single pkt arrives.
28 	 */
29 	unsigned short	last_kactive_blk_num;
30 
31 	char		*pkblk_start;
32 	char		*pkblk_end;
33 	int		kblk_size;
34 	unsigned int	max_frame_len;
35 	unsigned int	knum_blocks;
36 	uint64_t	knxt_seq_num;
37 	char		*prev;
38 	char		*nxt_offset;
39 	struct sk_buff	*skb;
40 
41 	atomic_t	blk_fill_in_prog;
42 
43 	/* Default is set to 8ms */
44 #define DEFAULT_PRB_RETIRE_TOV	(8)
45 
46 	unsigned short  retire_blk_tov;
47 	unsigned short  version;
48 	unsigned long	tov_in_jiffies;
49 
50 	/* timer to retire an outstanding block */
51 	struct timer_list retire_blk_timer;
52 };
53 
54 struct pgv {
55 	char *buffer;
56 };
57 
58 struct packet_ring_buffer {
59 	struct pgv		*pg_vec;
60 
61 	unsigned int		head;
62 	unsigned int		frames_per_block;
63 	unsigned int		frame_size;
64 	unsigned int		frame_max;
65 
66 	unsigned int		pg_vec_order;
67 	unsigned int		pg_vec_pages;
68 	unsigned int		pg_vec_len;
69 
70 	unsigned int __percpu	*pending_refcnt;
71 
72 	struct tpacket_kbdq_core	prb_bdqc;
73 };
74 
75 extern struct mutex fanout_mutex;
76 #define PACKET_FANOUT_MAX	256
77 
78 struct packet_fanout {
79 	possible_net_t		net;
80 	unsigned int		num_members;
81 	u16			id;
82 	u8			type;
83 	u8			flags;
84 	union {
85 		atomic_t		rr_cur;
86 		struct bpf_prog __rcu	*bpf_prog;
87 	};
88 	struct list_head	list;
89 	struct sock		*arr[PACKET_FANOUT_MAX];
90 	spinlock_t		lock;
91 	refcount_t		sk_ref;
92 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
93 };
94 
95 struct packet_rollover {
96 	int			sock;
97 	struct rcu_head		rcu;
98 	atomic_long_t		num;
99 	atomic_long_t		num_huge;
100 	atomic_long_t		num_failed;
101 #define ROLLOVER_HLEN	(L1_CACHE_BYTES / sizeof(u32))
102 	u32			history[ROLLOVER_HLEN] ____cacheline_aligned;
103 } ____cacheline_aligned_in_smp;
104 
105 struct packet_sock {
106 	/* struct sock has to be the first member of packet_sock */
107 	struct sock		sk;
108 	struct packet_fanout	*fanout;
109 	union  tpacket_stats_u	stats;
110 	struct packet_ring_buffer	rx_ring;
111 	struct packet_ring_buffer	tx_ring;
112 	int			copy_thresh;
113 	spinlock_t		bind_lock;
114 	struct mutex		pg_vec_lock;
115 	unsigned int		running:1,	/* prot_hook is attached*/
116 				auxdata:1,
117 				origdev:1,
118 				has_vnet_hdr:1;
119 	int			pressure;
120 	int			ifindex;	/* bound device		*/
121 	__be16			num;
122 	struct packet_rollover	*rollover;
123 	struct packet_mclist	*mclist;
124 	atomic_t		mapped;
125 	enum tpacket_versions	tp_version;
126 	unsigned int		tp_hdrlen;
127 	unsigned int		tp_reserve;
128 	unsigned int		tp_loss:1;
129 	unsigned int		tp_tx_has_off:1;
130 	unsigned int		tp_tstamp;
131 	struct net_device __rcu	*cached_dev;
132 	int			(*xmit)(struct sk_buff *skb);
133 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
134 };
135 
136 static struct packet_sock *pkt_sk(struct sock *sk)
137 {
138 	return (struct packet_sock *)sk;
139 }
140 
141 #endif
142