xref: /openbmc/linux/drivers/net/wireguard/peerlookup.c (revision 4b4193256c8d3bc3a5397b5cd9494c2ad386317d)
1e7096c13SJason A. Donenfeld // SPDX-License-Identifier: GPL-2.0
2e7096c13SJason A. Donenfeld /*
3e7096c13SJason A. Donenfeld  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4e7096c13SJason A. Donenfeld  */
5e7096c13SJason A. Donenfeld 
6e7096c13SJason A. Donenfeld #include "peerlookup.h"
7e7096c13SJason A. Donenfeld #include "peer.h"
8e7096c13SJason A. Donenfeld #include "noise.h"
9e7096c13SJason A. Donenfeld 
pubkey_bucket(struct pubkey_hashtable * table,const u8 pubkey[NOISE_PUBLIC_KEY_LEN])10e7096c13SJason A. Donenfeld static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
11e7096c13SJason A. Donenfeld 					const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
12e7096c13SJason A. Donenfeld {
13e7096c13SJason A. Donenfeld 	/* siphash gives us a secure 64bit number based on a random key. Since
14e7096c13SJason A. Donenfeld 	 * the bits are uniformly distributed, we can then mask off to get the
15e7096c13SJason A. Donenfeld 	 * bits we need.
16e7096c13SJason A. Donenfeld 	 */
17e7096c13SJason A. Donenfeld 	const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
18e7096c13SJason A. Donenfeld 
19e7096c13SJason A. Donenfeld 	return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
20e7096c13SJason A. Donenfeld }
21e7096c13SJason A. Donenfeld 
wg_pubkey_hashtable_alloc(void)22e7096c13SJason A. Donenfeld struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
23e7096c13SJason A. Donenfeld {
24e7096c13SJason A. Donenfeld 	struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
25e7096c13SJason A. Donenfeld 
26e7096c13SJason A. Donenfeld 	if (!table)
27e7096c13SJason A. Donenfeld 		return NULL;
28e7096c13SJason A. Donenfeld 
29e7096c13SJason A. Donenfeld 	get_random_bytes(&table->key, sizeof(table->key));
30e7096c13SJason A. Donenfeld 	hash_init(table->hashtable);
31e7096c13SJason A. Donenfeld 	mutex_init(&table->lock);
32e7096c13SJason A. Donenfeld 	return table;
33e7096c13SJason A. Donenfeld }
34e7096c13SJason A. Donenfeld 
wg_pubkey_hashtable_add(struct pubkey_hashtable * table,struct wg_peer * peer)35e7096c13SJason A. Donenfeld void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
36e7096c13SJason A. Donenfeld 			     struct wg_peer *peer)
37e7096c13SJason A. Donenfeld {
38e7096c13SJason A. Donenfeld 	mutex_lock(&table->lock);
39e7096c13SJason A. Donenfeld 	hlist_add_head_rcu(&peer->pubkey_hash,
40e7096c13SJason A. Donenfeld 			   pubkey_bucket(table, peer->handshake.remote_static));
41e7096c13SJason A. Donenfeld 	mutex_unlock(&table->lock);
42e7096c13SJason A. Donenfeld }
43e7096c13SJason A. Donenfeld 
wg_pubkey_hashtable_remove(struct pubkey_hashtable * table,struct wg_peer * peer)44e7096c13SJason A. Donenfeld void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
45e7096c13SJason A. Donenfeld 				struct wg_peer *peer)
46e7096c13SJason A. Donenfeld {
47e7096c13SJason A. Donenfeld 	mutex_lock(&table->lock);
48e7096c13SJason A. Donenfeld 	hlist_del_init_rcu(&peer->pubkey_hash);
49e7096c13SJason A. Donenfeld 	mutex_unlock(&table->lock);
50e7096c13SJason A. Donenfeld }
51e7096c13SJason A. Donenfeld 
52e7096c13SJason A. Donenfeld /* Returns a strong reference to a peer */
53e7096c13SJason A. Donenfeld struct wg_peer *
wg_pubkey_hashtable_lookup(struct pubkey_hashtable * table,const u8 pubkey[NOISE_PUBLIC_KEY_LEN])54e7096c13SJason A. Donenfeld wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
55e7096c13SJason A. Donenfeld 			   const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
56e7096c13SJason A. Donenfeld {
57e7096c13SJason A. Donenfeld 	struct wg_peer *iter_peer, *peer = NULL;
58e7096c13SJason A. Donenfeld 
59e7096c13SJason A. Donenfeld 	rcu_read_lock_bh();
60e7096c13SJason A. Donenfeld 	hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
61e7096c13SJason A. Donenfeld 				    pubkey_hash) {
62e7096c13SJason A. Donenfeld 		if (!memcmp(pubkey, iter_peer->handshake.remote_static,
63e7096c13SJason A. Donenfeld 			    NOISE_PUBLIC_KEY_LEN)) {
64e7096c13SJason A. Donenfeld 			peer = iter_peer;
65e7096c13SJason A. Donenfeld 			break;
66e7096c13SJason A. Donenfeld 		}
67e7096c13SJason A. Donenfeld 	}
68e7096c13SJason A. Donenfeld 	peer = wg_peer_get_maybe_zero(peer);
69e7096c13SJason A. Donenfeld 	rcu_read_unlock_bh();
70e7096c13SJason A. Donenfeld 	return peer;
71e7096c13SJason A. Donenfeld }
72e7096c13SJason A. Donenfeld 
index_bucket(struct index_hashtable * table,const __le32 index)73e7096c13SJason A. Donenfeld static struct hlist_head *index_bucket(struct index_hashtable *table,
74e7096c13SJason A. Donenfeld 				       const __le32 index)
75e7096c13SJason A. Donenfeld {
76e7096c13SJason A. Donenfeld 	/* Since the indices are random and thus all bits are uniformly
77e7096c13SJason A. Donenfeld 	 * distributed, we can find its bucket simply by masking.
78e7096c13SJason A. Donenfeld 	 */
79e7096c13SJason A. Donenfeld 	return &table->hashtable[(__force u32)index &
80e7096c13SJason A. Donenfeld 				 (HASH_SIZE(table->hashtable) - 1)];
81e7096c13SJason A. Donenfeld }
82e7096c13SJason A. Donenfeld 
wg_index_hashtable_alloc(void)83e7096c13SJason A. Donenfeld struct index_hashtable *wg_index_hashtable_alloc(void)
84e7096c13SJason A. Donenfeld {
85e7096c13SJason A. Donenfeld 	struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
86e7096c13SJason A. Donenfeld 
87e7096c13SJason A. Donenfeld 	if (!table)
88e7096c13SJason A. Donenfeld 		return NULL;
89e7096c13SJason A. Donenfeld 
90e7096c13SJason A. Donenfeld 	hash_init(table->hashtable);
91e7096c13SJason A. Donenfeld 	spin_lock_init(&table->lock);
92e7096c13SJason A. Donenfeld 	return table;
93e7096c13SJason A. Donenfeld }
94e7096c13SJason A. Donenfeld 
95e7096c13SJason A. Donenfeld /* At the moment, we limit ourselves to 2^20 total peers, which generally might
96e7096c13SJason A. Donenfeld  * amount to 2^20*3 items in this hashtable. The algorithm below works by
97e7096c13SJason A. Donenfeld  * picking a random number and testing it. We can see that these limits mean we
98e7096c13SJason A. Donenfeld  * usually succeed pretty quickly:
99e7096c13SJason A. Donenfeld  *
100e7096c13SJason A. Donenfeld  * >>> def calculation(tries, size):
101e7096c13SJason A. Donenfeld  * ...     return (size / 2**32)**(tries - 1) *  (1 - (size / 2**32))
102e7096c13SJason A. Donenfeld  * ...
103e7096c13SJason A. Donenfeld  * >>> calculation(1, 2**20 * 3)
104e7096c13SJason A. Donenfeld  * 0.999267578125
105e7096c13SJason A. Donenfeld  * >>> calculation(2, 2**20 * 3)
106e7096c13SJason A. Donenfeld  * 0.0007318854331970215
107e7096c13SJason A. Donenfeld  * >>> calculation(3, 2**20 * 3)
108e7096c13SJason A. Donenfeld  * 5.360489012673497e-07
109e7096c13SJason A. Donenfeld  * >>> calculation(4, 2**20 * 3)
110e7096c13SJason A. Donenfeld  * 3.9261394135792216e-10
111e7096c13SJason A. Donenfeld  *
112e7096c13SJason A. Donenfeld  * At the moment, we don't do any masking, so this algorithm isn't exactly
113e7096c13SJason A. Donenfeld  * constant time in either the random guessing or in the hash list lookup. We
114e7096c13SJason A. Donenfeld  * could require a minimum of 3 tries, which would successfully mask the
115e7096c13SJason A. Donenfeld  * guessing. this would not, however, help with the growing hash lengths, which
116e7096c13SJason A. Donenfeld  * is another thing to consider moving forward.
117e7096c13SJason A. Donenfeld  */
118e7096c13SJason A. Donenfeld 
wg_index_hashtable_insert(struct index_hashtable * table,struct index_hashtable_entry * entry)119e7096c13SJason A. Donenfeld __le32 wg_index_hashtable_insert(struct index_hashtable *table,
120e7096c13SJason A. Donenfeld 				 struct index_hashtable_entry *entry)
121e7096c13SJason A. Donenfeld {
122e7096c13SJason A. Donenfeld 	struct index_hashtable_entry *existing_entry;
123e7096c13SJason A. Donenfeld 
124e7096c13SJason A. Donenfeld 	spin_lock_bh(&table->lock);
125e7096c13SJason A. Donenfeld 	hlist_del_init_rcu(&entry->index_hash);
126e7096c13SJason A. Donenfeld 	spin_unlock_bh(&table->lock);
127e7096c13SJason A. Donenfeld 
128e7096c13SJason A. Donenfeld 	rcu_read_lock_bh();
129e7096c13SJason A. Donenfeld 
130e7096c13SJason A. Donenfeld search_unused_slot:
131e7096c13SJason A. Donenfeld 	/* First we try to find an unused slot, randomly, while unlocked. */
132e7096c13SJason A. Donenfeld 	entry->index = (__force __le32)get_random_u32();
133e7096c13SJason A. Donenfeld 	hlist_for_each_entry_rcu_bh(existing_entry,
134e7096c13SJason A. Donenfeld 				    index_bucket(table, entry->index),
135e7096c13SJason A. Donenfeld 				    index_hash) {
136e7096c13SJason A. Donenfeld 		if (existing_entry->index == entry->index)
137e7096c13SJason A. Donenfeld 			/* If it's already in use, we continue searching. */
138e7096c13SJason A. Donenfeld 			goto search_unused_slot;
139e7096c13SJason A. Donenfeld 	}
140e7096c13SJason A. Donenfeld 
141e7096c13SJason A. Donenfeld 	/* Once we've found an unused slot, we lock it, and then double-check
142e7096c13SJason A. Donenfeld 	 * that nobody else stole it from us.
143e7096c13SJason A. Donenfeld 	 */
144e7096c13SJason A. Donenfeld 	spin_lock_bh(&table->lock);
145e7096c13SJason A. Donenfeld 	hlist_for_each_entry_rcu_bh(existing_entry,
146e7096c13SJason A. Donenfeld 				    index_bucket(table, entry->index),
147e7096c13SJason A. Donenfeld 				    index_hash) {
148e7096c13SJason A. Donenfeld 		if (existing_entry->index == entry->index) {
149e7096c13SJason A. Donenfeld 			spin_unlock_bh(&table->lock);
150e7096c13SJason A. Donenfeld 			/* If it was stolen, we start over. */
151e7096c13SJason A. Donenfeld 			goto search_unused_slot;
152e7096c13SJason A. Donenfeld 		}
153e7096c13SJason A. Donenfeld 	}
154e7096c13SJason A. Donenfeld 	/* Otherwise, we know we have it exclusively (since we're locked),
155e7096c13SJason A. Donenfeld 	 * so we insert.
156e7096c13SJason A. Donenfeld 	 */
157e7096c13SJason A. Donenfeld 	hlist_add_head_rcu(&entry->index_hash,
158e7096c13SJason A. Donenfeld 			   index_bucket(table, entry->index));
159e7096c13SJason A. Donenfeld 	spin_unlock_bh(&table->lock);
160e7096c13SJason A. Donenfeld 
161e7096c13SJason A. Donenfeld 	rcu_read_unlock_bh();
162e7096c13SJason A. Donenfeld 
163e7096c13SJason A. Donenfeld 	return entry->index;
164e7096c13SJason A. Donenfeld }
165e7096c13SJason A. Donenfeld 
wg_index_hashtable_replace(struct index_hashtable * table,struct index_hashtable_entry * old,struct index_hashtable_entry * new)166e7096c13SJason A. Donenfeld bool wg_index_hashtable_replace(struct index_hashtable *table,
167e7096c13SJason A. Donenfeld 				struct index_hashtable_entry *old,
168e7096c13SJason A. Donenfeld 				struct index_hashtable_entry *new)
169e7096c13SJason A. Donenfeld {
170*6147f7b1SJason A. Donenfeld 	bool ret;
171*6147f7b1SJason A. Donenfeld 
172e7096c13SJason A. Donenfeld 	spin_lock_bh(&table->lock);
173*6147f7b1SJason A. Donenfeld 	ret = !hlist_unhashed(&old->index_hash);
174*6147f7b1SJason A. Donenfeld 	if (unlikely(!ret))
175*6147f7b1SJason A. Donenfeld 		goto out;
176*6147f7b1SJason A. Donenfeld 
177e7096c13SJason A. Donenfeld 	new->index = old->index;
178e7096c13SJason A. Donenfeld 	hlist_replace_rcu(&old->index_hash, &new->index_hash);
179e7096c13SJason A. Donenfeld 
180e7096c13SJason A. Donenfeld 	/* Calling init here NULLs out index_hash, and in fact after this
181e7096c13SJason A. Donenfeld 	 * function returns, it's theoretically possible for this to get
182e7096c13SJason A. Donenfeld 	 * reinserted elsewhere. That means the RCU lookup below might either
183e7096c13SJason A. Donenfeld 	 * terminate early or jump between buckets, in which case the packet
184e7096c13SJason A. Donenfeld 	 * simply gets dropped, which isn't terrible.
185e7096c13SJason A. Donenfeld 	 */
186e7096c13SJason A. Donenfeld 	INIT_HLIST_NODE(&old->index_hash);
187*6147f7b1SJason A. Donenfeld out:
188e7096c13SJason A. Donenfeld 	spin_unlock_bh(&table->lock);
189*6147f7b1SJason A. Donenfeld 	return ret;
190e7096c13SJason A. Donenfeld }
191e7096c13SJason A. Donenfeld 
wg_index_hashtable_remove(struct index_hashtable * table,struct index_hashtable_entry * entry)192e7096c13SJason A. Donenfeld void wg_index_hashtable_remove(struct index_hashtable *table,
193e7096c13SJason A. Donenfeld 			       struct index_hashtable_entry *entry)
194e7096c13SJason A. Donenfeld {
195e7096c13SJason A. Donenfeld 	spin_lock_bh(&table->lock);
196e7096c13SJason A. Donenfeld 	hlist_del_init_rcu(&entry->index_hash);
197e7096c13SJason A. Donenfeld 	spin_unlock_bh(&table->lock);
198e7096c13SJason A. Donenfeld }
199e7096c13SJason A. Donenfeld 
200e7096c13SJason A. Donenfeld /* Returns a strong reference to a entry->peer */
201e7096c13SJason A. Donenfeld struct index_hashtable_entry *
wg_index_hashtable_lookup(struct index_hashtable * table,const enum index_hashtable_type type_mask,const __le32 index,struct wg_peer ** peer)202e7096c13SJason A. Donenfeld wg_index_hashtable_lookup(struct index_hashtable *table,
203e7096c13SJason A. Donenfeld 			  const enum index_hashtable_type type_mask,
204e7096c13SJason A. Donenfeld 			  const __le32 index, struct wg_peer **peer)
205e7096c13SJason A. Donenfeld {
206e7096c13SJason A. Donenfeld 	struct index_hashtable_entry *iter_entry, *entry = NULL;
207e7096c13SJason A. Donenfeld 
208e7096c13SJason A. Donenfeld 	rcu_read_lock_bh();
209e7096c13SJason A. Donenfeld 	hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
210e7096c13SJason A. Donenfeld 				    index_hash) {
211e7096c13SJason A. Donenfeld 		if (iter_entry->index == index) {
212e7096c13SJason A. Donenfeld 			if (likely(iter_entry->type & type_mask))
213e7096c13SJason A. Donenfeld 				entry = iter_entry;
214e7096c13SJason A. Donenfeld 			break;
215e7096c13SJason A. Donenfeld 		}
216e7096c13SJason A. Donenfeld 	}
217e7096c13SJason A. Donenfeld 	if (likely(entry)) {
218e7096c13SJason A. Donenfeld 		entry->peer = wg_peer_get_maybe_zero(entry->peer);
219e7096c13SJason A. Donenfeld 		if (likely(entry->peer))
220e7096c13SJason A. Donenfeld 			*peer = entry->peer;
221e7096c13SJason A. Donenfeld 		else
222e7096c13SJason A. Donenfeld 			entry = NULL;
223e7096c13SJason A. Donenfeld 	}
224e7096c13SJason A. Donenfeld 	rcu_read_unlock_bh();
225e7096c13SJason A. Donenfeld 	return entry;
226e7096c13SJason A. Donenfeld }
227