xref: /openbmc/linux/drivers/net/wireguard/queueing.h (revision 6db6b729)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5 
6 #ifndef _WG_QUEUEING_H
7 #define _WG_QUEUEING_H
8 
9 #include "peer.h"
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
12 #include <linux/ip.h>
13 #include <linux/ipv6.h>
14 #include <net/ip_tunnels.h>
15 
16 struct wg_device;
17 struct wg_peer;
18 struct multicore_worker;
19 struct crypt_queue;
20 struct prev_queue;
21 struct sk_buff;
22 
23 /* queueing.c APIs: */
24 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
25 			 unsigned int len);
26 void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
27 struct multicore_worker __percpu *
28 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
29 
30 /* receive.c APIs: */
31 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
32 void wg_packet_handshake_receive_worker(struct work_struct *work);
33 /* NAPI poll function: */
34 int wg_packet_rx_poll(struct napi_struct *napi, int budget);
35 /* Workqueue worker: */
36 void wg_packet_decrypt_worker(struct work_struct *work);
37 
38 /* send.c APIs: */
39 void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
40 						bool is_retry);
41 void wg_packet_send_handshake_response(struct wg_peer *peer);
42 void wg_packet_send_handshake_cookie(struct wg_device *wg,
43 				     struct sk_buff *initiating_skb,
44 				     __le32 sender_index);
45 void wg_packet_send_keepalive(struct wg_peer *peer);
46 void wg_packet_purge_staged_packets(struct wg_peer *peer);
47 void wg_packet_send_staged_packets(struct wg_peer *peer);
48 /* Workqueue workers: */
49 void wg_packet_handshake_send_worker(struct work_struct *work);
50 void wg_packet_tx_worker(struct work_struct *work);
51 void wg_packet_encrypt_worker(struct work_struct *work);
52 
53 enum packet_state {
54 	PACKET_STATE_UNCRYPTED,
55 	PACKET_STATE_CRYPTED,
56 	PACKET_STATE_DEAD
57 };
58 
59 struct packet_cb {
60 	u64 nonce;
61 	struct noise_keypair *keypair;
62 	atomic_t state;
63 	u32 mtu;
64 	u8 ds;
65 };
66 
67 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
68 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
69 
70 static inline bool wg_check_packet_protocol(struct sk_buff *skb)
71 {
72 	__be16 real_protocol = ip_tunnel_parse_protocol(skb);
73 	return real_protocol && skb->protocol == real_protocol;
74 }
75 
76 static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
77 {
78 	u8 l4_hash = skb->l4_hash;
79 	u8 sw_hash = skb->sw_hash;
80 	u32 hash = skb->hash;
81 	skb_scrub_packet(skb, true);
82 	memset(&skb->headers, 0, sizeof(skb->headers));
83 	if (encapsulating) {
84 		skb->l4_hash = l4_hash;
85 		skb->sw_hash = sw_hash;
86 		skb->hash = hash;
87 	}
88 	skb->queue_mapping = 0;
89 	skb->nohdr = 0;
90 	skb->peeked = 0;
91 	skb->mac_len = 0;
92 	skb->dev = NULL;
93 #ifdef CONFIG_NET_SCHED
94 	skb->tc_index = 0;
95 #endif
96 	skb_reset_redirect(skb);
97 	skb->hdr_len = skb_headroom(skb);
98 	skb_reset_mac_header(skb);
99 	skb_reset_network_header(skb);
100 	skb_reset_transport_header(skb);
101 	skb_probe_transport_header(skb);
102 	skb_reset_inner_headers(skb);
103 }
104 
105 static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
106 {
107 	unsigned int cpu = *stored_cpu, cpu_index, i;
108 
109 	if (unlikely(cpu >= nr_cpu_ids ||
110 		     !cpumask_test_cpu(cpu, cpu_online_mask))) {
111 		cpu_index = id % cpumask_weight(cpu_online_mask);
112 		cpu = cpumask_first(cpu_online_mask);
113 		for (i = 0; i < cpu_index; ++i)
114 			cpu = cpumask_next(cpu, cpu_online_mask);
115 		*stored_cpu = cpu;
116 	}
117 	return cpu;
118 }
119 
120 /* This function is racy, in the sense that it's called while last_cpu is
121  * unlocked, so it could return the same CPU twice. Adding locking or using
122  * atomic sequence numbers is slower though, and the consequences of racing are
123  * harmless, so live with it.
124  */
125 static inline int wg_cpumask_next_online(int *last_cpu)
126 {
127 	int cpu = cpumask_next(*last_cpu, cpu_online_mask);
128 	if (cpu >= nr_cpu_ids)
129 		cpu = cpumask_first(cpu_online_mask);
130 	*last_cpu = cpu;
131 	return cpu;
132 }
133 
134 void wg_prev_queue_init(struct prev_queue *queue);
135 
136 /* Multi producer */
137 bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
138 
139 /* Single consumer */
140 struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
141 
142 /* Single consumer */
143 static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
144 {
145 	if (queue->peeked)
146 		return queue->peeked;
147 	queue->peeked = wg_prev_queue_dequeue(queue);
148 	return queue->peeked;
149 }
150 
151 /* Single consumer */
152 static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
153 {
154 	queue->peeked = NULL;
155 }
156 
157 static inline int wg_queue_enqueue_per_device_and_peer(
158 	struct crypt_queue *device_queue, struct prev_queue *peer_queue,
159 	struct sk_buff *skb, struct workqueue_struct *wq)
160 {
161 	int cpu;
162 
163 	atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
164 	/* We first queue this up for the peer ingestion, but the consumer
165 	 * will wait for the state to change to CRYPTED or DEAD before.
166 	 */
167 	if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
168 		return -ENOSPC;
169 
170 	/* Then we queue it up in the device queue, which consumes the
171 	 * packet as soon as it can.
172 	 */
173 	cpu = wg_cpumask_next_online(&device_queue->last_cpu);
174 	if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
175 		return -EPIPE;
176 	queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
177 	return 0;
178 }
179 
180 static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
181 {
182 	/* We take a reference, because as soon as we call atomic_set, the
183 	 * peer can be freed from below us.
184 	 */
185 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
186 
187 	atomic_set_release(&PACKET_CB(skb)->state, state);
188 	queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
189 		      peer->device->packet_crypt_wq, &peer->transmit_packet_work);
190 	wg_peer_put(peer);
191 }
192 
193 static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
194 {
195 	/* We take a reference, because as soon as we call atomic_set, the
196 	 * peer can be freed from below us.
197 	 */
198 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
199 
200 	atomic_set_release(&PACKET_CB(skb)->state, state);
201 	napi_schedule(&peer->napi);
202 	wg_peer_put(peer);
203 }
204 
205 #ifdef DEBUG
206 bool wg_packet_counter_selftest(void);
207 #endif
208 
209 #endif /* _WG_QUEUEING_H */
210