xref: /openbmc/linux/drivers/net/wireguard/queueing.h (revision 55fd7e02)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5 
6 #ifndef _WG_QUEUEING_H
7 #define _WG_QUEUEING_H
8 
9 #include "peer.h"
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
12 #include <linux/ip.h>
13 #include <linux/ipv6.h>
14 
15 struct wg_device;
16 struct wg_peer;
17 struct multicore_worker;
18 struct crypt_queue;
19 struct sk_buff;
20 
21 /* queueing.c APIs: */
22 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
23 			 bool multicore, unsigned int len);
24 void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
25 struct multicore_worker __percpu *
26 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
27 
28 /* receive.c APIs: */
29 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
30 void wg_packet_handshake_receive_worker(struct work_struct *work);
31 /* NAPI poll function: */
32 int wg_packet_rx_poll(struct napi_struct *napi, int budget);
33 /* Workqueue worker: */
34 void wg_packet_decrypt_worker(struct work_struct *work);
35 
36 /* send.c APIs: */
37 void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
38 						bool is_retry);
39 void wg_packet_send_handshake_response(struct wg_peer *peer);
40 void wg_packet_send_handshake_cookie(struct wg_device *wg,
41 				     struct sk_buff *initiating_skb,
42 				     __le32 sender_index);
43 void wg_packet_send_keepalive(struct wg_peer *peer);
44 void wg_packet_purge_staged_packets(struct wg_peer *peer);
45 void wg_packet_send_staged_packets(struct wg_peer *peer);
46 /* Workqueue workers: */
47 void wg_packet_handshake_send_worker(struct work_struct *work);
48 void wg_packet_tx_worker(struct work_struct *work);
49 void wg_packet_encrypt_worker(struct work_struct *work);
50 
51 enum packet_state {
52 	PACKET_STATE_UNCRYPTED,
53 	PACKET_STATE_CRYPTED,
54 	PACKET_STATE_DEAD
55 };
56 
57 struct packet_cb {
58 	u64 nonce;
59 	struct noise_keypair *keypair;
60 	atomic_t state;
61 	u32 mtu;
62 	u8 ds;
63 };
64 
65 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
66 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
67 
68 /* Returns either the correct skb->protocol value, or 0 if invalid. */
69 static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
70 {
71 	if (skb_network_header(skb) >= skb->head &&
72 	    (skb_network_header(skb) + sizeof(struct iphdr)) <=
73 		    skb_tail_pointer(skb) &&
74 	    ip_hdr(skb)->version == 4)
75 		return htons(ETH_P_IP);
76 	if (skb_network_header(skb) >= skb->head &&
77 	    (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
78 		    skb_tail_pointer(skb) &&
79 	    ipv6_hdr(skb)->version == 6)
80 		return htons(ETH_P_IPV6);
81 	return 0;
82 }
83 
84 static inline bool wg_check_packet_protocol(struct sk_buff *skb)
85 {
86 	__be16 real_protocol = wg_examine_packet_protocol(skb);
87 	return real_protocol && skb->protocol == real_protocol;
88 }
89 
90 static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
91 {
92 	u8 l4_hash = skb->l4_hash;
93 	u8 sw_hash = skb->sw_hash;
94 	u32 hash = skb->hash;
95 	skb_scrub_packet(skb, true);
96 	memset(&skb->headers_start, 0,
97 	       offsetof(struct sk_buff, headers_end) -
98 		       offsetof(struct sk_buff, headers_start));
99 	if (encapsulating) {
100 		skb->l4_hash = l4_hash;
101 		skb->sw_hash = sw_hash;
102 		skb->hash = hash;
103 	}
104 	skb->queue_mapping = 0;
105 	skb->nohdr = 0;
106 	skb->peeked = 0;
107 	skb->mac_len = 0;
108 	skb->dev = NULL;
109 #ifdef CONFIG_NET_SCHED
110 	skb->tc_index = 0;
111 #endif
112 	skb_reset_redirect(skb);
113 	skb->hdr_len = skb_headroom(skb);
114 	skb_reset_mac_header(skb);
115 	skb_reset_network_header(skb);
116 	skb_reset_transport_header(skb);
117 	skb_probe_transport_header(skb);
118 	skb_reset_inner_headers(skb);
119 }
120 
121 static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
122 {
123 	unsigned int cpu = *stored_cpu, cpu_index, i;
124 
125 	if (unlikely(cpu == nr_cpumask_bits ||
126 		     !cpumask_test_cpu(cpu, cpu_online_mask))) {
127 		cpu_index = id % cpumask_weight(cpu_online_mask);
128 		cpu = cpumask_first(cpu_online_mask);
129 		for (i = 0; i < cpu_index; ++i)
130 			cpu = cpumask_next(cpu, cpu_online_mask);
131 		*stored_cpu = cpu;
132 	}
133 	return cpu;
134 }
135 
136 /* This function is racy, in the sense that next is unlocked, so it could return
137  * the same CPU twice. A race-free version of this would be to instead store an
138  * atomic sequence number, do an increment-and-return, and then iterate through
139  * every possible CPU until we get to that index -- choose_cpu. However that's
140  * a bit slower, and it doesn't seem like this potential race actually
141  * introduces any performance loss, so we live with it.
142  */
143 static inline int wg_cpumask_next_online(int *next)
144 {
145 	int cpu = *next;
146 
147 	while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
148 		cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
149 	*next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
150 	return cpu;
151 }
152 
153 static inline int wg_queue_enqueue_per_device_and_peer(
154 	struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
155 	struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
156 {
157 	int cpu;
158 
159 	atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
160 	/* We first queue this up for the peer ingestion, but the consumer
161 	 * will wait for the state to change to CRYPTED or DEAD before.
162 	 */
163 	if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
164 		return -ENOSPC;
165 	/* Then we queue it up in the device queue, which consumes the
166 	 * packet as soon as it can.
167 	 */
168 	cpu = wg_cpumask_next_online(next_cpu);
169 	if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
170 		return -EPIPE;
171 	queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
172 	return 0;
173 }
174 
175 static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
176 					     struct sk_buff *skb,
177 					     enum packet_state state)
178 {
179 	/* We take a reference, because as soon as we call atomic_set, the
180 	 * peer can be freed from below us.
181 	 */
182 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
183 
184 	atomic_set_release(&PACKET_CB(skb)->state, state);
185 	queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
186 					       peer->internal_id),
187 		      peer->device->packet_crypt_wq, &queue->work);
188 	wg_peer_put(peer);
189 }
190 
191 static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
192 						  enum packet_state state)
193 {
194 	/* We take a reference, because as soon as we call atomic_set, the
195 	 * peer can be freed from below us.
196 	 */
197 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
198 
199 	atomic_set_release(&PACKET_CB(skb)->state, state);
200 	napi_schedule(&peer->napi);
201 	wg_peer_put(peer);
202 }
203 
204 #ifdef DEBUG
205 bool wg_packet_counter_selftest(void);
206 #endif
207 
208 #endif /* _WG_QUEUEING_H */
209