xref: /openbmc/linux/drivers/net/xen-netback/hash.c (revision b830f94f)
1 /*
2  * Copyright (c) 2016 Citrix Systems Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License version 2
6  * as published by the Free Softare Foundation; or, when distributed
7  * separately from the Linux kernel or incorporated into other
8  * software packages, subject to the following license:
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this source file (the "Software"), to deal in the Software without
12  * restriction, including without limitation the rights to use, copy, modify,
13  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14  * and to permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26  * IN THE SOFTWARE.
27  */
28 
29 #define XEN_NETIF_DEFINE_TOEPLITZ
30 
31 #include "common.h"
32 #include <linux/vmalloc.h>
33 #include <linux/rculist.h>
34 
35 static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
36 			    unsigned int len, u32 val)
37 {
38 	struct xenvif_hash_cache_entry *new, *entry, *oldest;
39 	unsigned long flags;
40 	bool found;
41 
42 	new = kmalloc(sizeof(*entry), GFP_ATOMIC);
43 	if (!new)
44 		return;
45 
46 	memcpy(new->tag, tag, len);
47 	new->len = len;
48 	new->val = val;
49 
50 	spin_lock_irqsave(&vif->hash.cache.lock, flags);
51 
52 	found = false;
53 	oldest = NULL;
54 	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
55 		/* Make sure we don't add duplicate entries */
56 		if (entry->len == len &&
57 		    memcmp(entry->tag, tag, len) == 0)
58 			found = true;
59 		if (!oldest || entry->seq < oldest->seq)
60 			oldest = entry;
61 	}
62 
63 	if (!found) {
64 		new->seq = atomic_inc_return(&vif->hash.cache.seq);
65 		list_add_rcu(&new->link, &vif->hash.cache.list);
66 
67 		if (++vif->hash.cache.count > xenvif_hash_cache_size) {
68 			list_del_rcu(&oldest->link);
69 			vif->hash.cache.count--;
70 			kfree_rcu(oldest, rcu);
71 		}
72 	}
73 
74 	spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
75 
76 	if (found)
77 		kfree(new);
78 }
79 
80 static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
81 			   unsigned int len)
82 {
83 	u32 val;
84 
85 	val = xen_netif_toeplitz_hash(vif->hash.key,
86 				      sizeof(vif->hash.key),
87 				      data, len);
88 
89 	if (xenvif_hash_cache_size != 0)
90 		xenvif_add_hash(vif, data, len, val);
91 
92 	return val;
93 }
94 
95 static void xenvif_flush_hash(struct xenvif *vif)
96 {
97 	struct xenvif_hash_cache_entry *entry;
98 	unsigned long flags;
99 
100 	if (xenvif_hash_cache_size == 0)
101 		return;
102 
103 	spin_lock_irqsave(&vif->hash.cache.lock, flags);
104 
105 	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
106 		list_del_rcu(&entry->link);
107 		vif->hash.cache.count--;
108 		kfree_rcu(entry, rcu);
109 	}
110 
111 	spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
112 }
113 
114 static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
115 			    unsigned int len)
116 {
117 	struct xenvif_hash_cache_entry *entry;
118 	u32 val;
119 	bool found;
120 
121 	if (len >= XEN_NETBK_HASH_TAG_SIZE)
122 		return 0;
123 
124 	if (xenvif_hash_cache_size == 0)
125 		return xenvif_new_hash(vif, data, len);
126 
127 	rcu_read_lock();
128 
129 	found = false;
130 
131 	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
132 		if (entry->len == len &&
133 		    memcmp(entry->tag, data, len) == 0) {
134 			val = entry->val;
135 			entry->seq = atomic_inc_return(&vif->hash.cache.seq);
136 			found = true;
137 			break;
138 		}
139 	}
140 
141 	rcu_read_unlock();
142 
143 	if (!found)
144 		val = xenvif_new_hash(vif, data, len);
145 
146 	return val;
147 }
148 
149 void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
150 {
151 	struct flow_keys flow;
152 	u32 hash = 0;
153 	enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
154 	u32 flags = vif->hash.flags;
155 	bool has_tcp_hdr;
156 
157 	/* Quick rejection test: If the network protocol doesn't
158 	 * correspond to any enabled hash type then there's no point
159 	 * in parsing the packet header.
160 	 */
161 	switch (skb->protocol) {
162 	case htons(ETH_P_IP):
163 		if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
164 			     XEN_NETIF_CTRL_HASH_TYPE_IPV4))
165 			break;
166 
167 		goto done;
168 
169 	case htons(ETH_P_IPV6):
170 		if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
171 			     XEN_NETIF_CTRL_HASH_TYPE_IPV6))
172 			break;
173 
174 		goto done;
175 
176 	default:
177 		goto done;
178 	}
179 
180 	memset(&flow, 0, sizeof(flow));
181 	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
182 		goto done;
183 
184 	has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
185 		      !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
186 
187 	switch (skb->protocol) {
188 	case htons(ETH_P_IP):
189 		if (has_tcp_hdr &&
190 		    (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
191 			u8 data[12];
192 
193 			memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
194 			memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
195 			memcpy(&data[8], &flow.ports.src, 2);
196 			memcpy(&data[10], &flow.ports.dst, 2);
197 
198 			hash = xenvif_find_hash(vif, data, sizeof(data));
199 			type = PKT_HASH_TYPE_L4;
200 		} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
201 			u8 data[8];
202 
203 			memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
204 			memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
205 
206 			hash = xenvif_find_hash(vif, data, sizeof(data));
207 			type = PKT_HASH_TYPE_L3;
208 		}
209 
210 		break;
211 
212 	case htons(ETH_P_IPV6):
213 		if (has_tcp_hdr &&
214 		    (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
215 			u8 data[36];
216 
217 			memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
218 			memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
219 			memcpy(&data[32], &flow.ports.src, 2);
220 			memcpy(&data[34], &flow.ports.dst, 2);
221 
222 			hash = xenvif_find_hash(vif, data, sizeof(data));
223 			type = PKT_HASH_TYPE_L4;
224 		} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
225 			u8 data[32];
226 
227 			memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
228 			memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
229 
230 			hash = xenvif_find_hash(vif, data, sizeof(data));
231 			type = PKT_HASH_TYPE_L3;
232 		}
233 
234 		break;
235 	}
236 
237 done:
238 	if (type == PKT_HASH_TYPE_NONE)
239 		skb_clear_hash(skb);
240 	else
241 		__skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
242 }
243 
244 u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
245 {
246 	switch (alg) {
247 	case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
248 	case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
249 		break;
250 
251 	default:
252 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
253 	}
254 
255 	vif->hash.alg = alg;
256 
257 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
258 }
259 
260 u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
261 {
262 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
263 		return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
264 
265 	*flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
266 		 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
267 		 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
268 		 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
269 
270 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
271 }
272 
273 u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
274 {
275 	if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
276 		      XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
277 		      XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
278 		      XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
279 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
280 
281 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
282 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
283 
284 	vif->hash.flags = flags;
285 
286 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
287 }
288 
289 u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
290 {
291 	u8 *key = vif->hash.key;
292 	struct gnttab_copy copy_op = {
293 		.source.u.ref = gref,
294 		.source.domid = vif->domid,
295 		.dest.u.gmfn = virt_to_gfn(key),
296 		.dest.domid = DOMID_SELF,
297 		.dest.offset = xen_offset_in_page(key),
298 		.len = len,
299 		.flags = GNTCOPY_source_gref
300 	};
301 
302 	if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
303 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
304 
305 	if (copy_op.len != 0) {
306 		gnttab_batch_copy(&copy_op, 1);
307 
308 		if (copy_op.status != GNTST_okay)
309 			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
310 	}
311 
312 	/* Clear any remaining key octets */
313 	if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
314 		memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
315 
316 	xenvif_flush_hash(vif);
317 
318 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
319 }
320 
321 u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
322 {
323 	if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
324 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
325 
326 	vif->hash.size = size;
327 	memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
328 	       sizeof(u32) * size);
329 
330 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
331 }
332 
333 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
334 			    u32 off)
335 {
336 	u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
337 	unsigned int nr = 1;
338 	struct gnttab_copy copy_op[2] = {{
339 		.source.u.ref = gref,
340 		.source.domid = vif->domid,
341 		.dest.domid = DOMID_SELF,
342 		.len = len * sizeof(*mapping),
343 		.flags = GNTCOPY_source_gref
344 	}};
345 
346 	if ((off + len < off) || (off + len > vif->hash.size) ||
347 	    len > XEN_PAGE_SIZE / sizeof(*mapping))
348 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
349 
350 	copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
351 	copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
352 	if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
353 		copy_op[1] = copy_op[0];
354 		copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
355 		copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
356 		copy_op[1].dest.offset = 0;
357 		copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
358 		copy_op[0].len = copy_op[1].source.offset;
359 		nr = 2;
360 	}
361 
362 	memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
363 	       vif->hash.size * sizeof(*mapping));
364 
365 	if (copy_op[0].len != 0) {
366 		gnttab_batch_copy(copy_op, nr);
367 
368 		if (copy_op[0].status != GNTST_okay ||
369 		    copy_op[nr - 1].status != GNTST_okay)
370 			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
371 	}
372 
373 	while (len-- != 0)
374 		if (mapping[off++] >= vif->num_queues)
375 			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
376 
377 	vif->hash.mapping_sel = !vif->hash.mapping_sel;
378 
379 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
380 }
381 
382 #ifdef CONFIG_DEBUG_FS
383 void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
384 {
385 	unsigned int i;
386 
387 	switch (vif->hash.alg) {
388 	case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
389 		seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
390 		break;
391 
392 	case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
393 		seq_puts(m, "Hash Algorithm: NONE\n");
394 		/* FALLTHRU */
395 	default:
396 		return;
397 	}
398 
399 	if (vif->hash.flags) {
400 		seq_puts(m, "\nHash Flags:\n");
401 
402 		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
403 			seq_puts(m, "- IPv4\n");
404 		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
405 			seq_puts(m, "- IPv4 + TCP\n");
406 		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
407 			seq_puts(m, "- IPv6\n");
408 		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
409 			seq_puts(m, "- IPv6 + TCP\n");
410 	}
411 
412 	seq_puts(m, "\nHash Key:\n");
413 
414 	for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
415 		unsigned int j, n;
416 
417 		n = 8;
418 		if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
419 			n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
420 
421 		seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
422 
423 		for (j = 0; j < n; j++, i++)
424 			seq_printf(m, "%02x ", vif->hash.key[i]);
425 
426 		seq_puts(m, "\n");
427 	}
428 
429 	if (vif->hash.size != 0) {
430 		const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
431 
432 		seq_puts(m, "\nHash Mapping:\n");
433 
434 		for (i = 0; i < vif->hash.size; ) {
435 			unsigned int j, n;
436 
437 			n = 8;
438 			if (i + n >= vif->hash.size)
439 				n = vif->hash.size - i;
440 
441 			seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
442 
443 			for (j = 0; j < n; j++, i++)
444 				seq_printf(m, "%4u ", mapping[i]);
445 
446 			seq_puts(m, "\n");
447 		}
448 	}
449 }
450 #endif /* CONFIG_DEBUG_FS */
451 
452 void xenvif_init_hash(struct xenvif *vif)
453 {
454 	if (xenvif_hash_cache_size == 0)
455 		return;
456 
457 	BUG_ON(vif->hash.cache.count);
458 
459 	spin_lock_init(&vif->hash.cache.lock);
460 	INIT_LIST_HEAD(&vif->hash.cache.list);
461 }
462 
463 void xenvif_deinit_hash(struct xenvif *vif)
464 {
465 	xenvif_flush_hash(vif);
466 }
467