xref: /openbmc/linux/net/openvswitch/flow_table.c (revision c819e2cf)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #include "flow.h"
20 #include "datapath.h"
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
43 #include <net/ip.h>
44 #include <net/ipv6.h>
45 #include <net/ndisc.h>
46 
47 #define TBL_MIN_BUCKETS		1024
48 #define REHASH_INTERVAL		(10 * 60 * HZ)
49 
50 static struct kmem_cache *flow_cache;
51 struct kmem_cache *flow_stats_cache __read_mostly;
52 
53 static u16 range_n_bytes(const struct sw_flow_key_range *range)
54 {
55 	return range->end - range->start;
56 }
57 
58 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
59 		       const struct sw_flow_mask *mask)
60 {
61 	const long *m = (const long *)((const u8 *)&mask->key +
62 				mask->range.start);
63 	const long *s = (const long *)((const u8 *)src +
64 				mask->range.start);
65 	long *d = (long *)((u8 *)dst + mask->range.start);
66 	int i;
67 
68 	/* The memory outside of the 'mask->range' are not set since
69 	 * further operations on 'dst' only uses contents within
70 	 * 'mask->range'.
71 	 */
72 	for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
73 		*d++ = *s++ & *m++;
74 }
75 
76 struct sw_flow *ovs_flow_alloc(void)
77 {
78 	struct sw_flow *flow;
79 	struct flow_stats *stats;
80 	int node;
81 
82 	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
83 	if (!flow)
84 		return ERR_PTR(-ENOMEM);
85 
86 	flow->sf_acts = NULL;
87 	flow->mask = NULL;
88 	flow->stats_last_writer = NUMA_NO_NODE;
89 
90 	/* Initialize the default stat node. */
91 	stats = kmem_cache_alloc_node(flow_stats_cache,
92 				      GFP_KERNEL | __GFP_ZERO, 0);
93 	if (!stats)
94 		goto err;
95 
96 	spin_lock_init(&stats->lock);
97 
98 	RCU_INIT_POINTER(flow->stats[0], stats);
99 
100 	for_each_node(node)
101 		if (node != 0)
102 			RCU_INIT_POINTER(flow->stats[node], NULL);
103 
104 	return flow;
105 err:
106 	kmem_cache_free(flow_cache, flow);
107 	return ERR_PTR(-ENOMEM);
108 }
109 
110 int ovs_flow_tbl_count(const struct flow_table *table)
111 {
112 	return table->count;
113 }
114 
115 static struct flex_array *alloc_buckets(unsigned int n_buckets)
116 {
117 	struct flex_array *buckets;
118 	int i, err;
119 
120 	buckets = flex_array_alloc(sizeof(struct hlist_head),
121 				   n_buckets, GFP_KERNEL);
122 	if (!buckets)
123 		return NULL;
124 
125 	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
126 	if (err) {
127 		flex_array_free(buckets);
128 		return NULL;
129 	}
130 
131 	for (i = 0; i < n_buckets; i++)
132 		INIT_HLIST_HEAD((struct hlist_head *)
133 					flex_array_get(buckets, i));
134 
135 	return buckets;
136 }
137 
138 static void flow_free(struct sw_flow *flow)
139 {
140 	int node;
141 
142 	kfree((struct sw_flow_actions __force *)flow->sf_acts);
143 	for_each_node(node)
144 		if (flow->stats[node])
145 			kmem_cache_free(flow_stats_cache,
146 					(struct flow_stats __force *)flow->stats[node]);
147 	kmem_cache_free(flow_cache, flow);
148 }
149 
150 static void rcu_free_flow_callback(struct rcu_head *rcu)
151 {
152 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
153 
154 	flow_free(flow);
155 }
156 
157 void ovs_flow_free(struct sw_flow *flow, bool deferred)
158 {
159 	if (!flow)
160 		return;
161 
162 	if (deferred)
163 		call_rcu(&flow->rcu, rcu_free_flow_callback);
164 	else
165 		flow_free(flow);
166 }
167 
168 static void free_buckets(struct flex_array *buckets)
169 {
170 	flex_array_free(buckets);
171 }
172 
173 
174 static void __table_instance_destroy(struct table_instance *ti)
175 {
176 	free_buckets(ti->buckets);
177 	kfree(ti);
178 }
179 
180 static struct table_instance *table_instance_alloc(int new_size)
181 {
182 	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
183 
184 	if (!ti)
185 		return NULL;
186 
187 	ti->buckets = alloc_buckets(new_size);
188 
189 	if (!ti->buckets) {
190 		kfree(ti);
191 		return NULL;
192 	}
193 	ti->n_buckets = new_size;
194 	ti->node_ver = 0;
195 	ti->keep_flows = false;
196 	get_random_bytes(&ti->hash_seed, sizeof(u32));
197 
198 	return ti;
199 }
200 
201 int ovs_flow_tbl_init(struct flow_table *table)
202 {
203 	struct table_instance *ti;
204 
205 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
206 
207 	if (!ti)
208 		return -ENOMEM;
209 
210 	rcu_assign_pointer(table->ti, ti);
211 	INIT_LIST_HEAD(&table->mask_list);
212 	table->last_rehash = jiffies;
213 	table->count = 0;
214 	return 0;
215 }
216 
217 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
218 {
219 	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
220 
221 	__table_instance_destroy(ti);
222 }
223 
224 static void table_instance_destroy(struct table_instance *ti, bool deferred)
225 {
226 	int i;
227 
228 	if (!ti)
229 		return;
230 
231 	if (ti->keep_flows)
232 		goto skip_flows;
233 
234 	for (i = 0; i < ti->n_buckets; i++) {
235 		struct sw_flow *flow;
236 		struct hlist_head *head = flex_array_get(ti->buckets, i);
237 		struct hlist_node *n;
238 		int ver = ti->node_ver;
239 
240 		hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
241 			hlist_del_rcu(&flow->hash_node[ver]);
242 			ovs_flow_free(flow, deferred);
243 		}
244 	}
245 
246 skip_flows:
247 	if (deferred)
248 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
249 	else
250 		__table_instance_destroy(ti);
251 }
252 
253 /* No need for locking this function is called from RCU callback or
254  * error path.
255  */
256 void ovs_flow_tbl_destroy(struct flow_table *table)
257 {
258 	struct table_instance *ti = rcu_dereference_raw(table->ti);
259 
260 	table_instance_destroy(ti, false);
261 }
262 
263 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
264 				       u32 *bucket, u32 *last)
265 {
266 	struct sw_flow *flow;
267 	struct hlist_head *head;
268 	int ver;
269 	int i;
270 
271 	ver = ti->node_ver;
272 	while (*bucket < ti->n_buckets) {
273 		i = 0;
274 		head = flex_array_get(ti->buckets, *bucket);
275 		hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
276 			if (i < *last) {
277 				i++;
278 				continue;
279 			}
280 			*last = i + 1;
281 			return flow;
282 		}
283 		(*bucket)++;
284 		*last = 0;
285 	}
286 
287 	return NULL;
288 }
289 
290 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
291 {
292 	hash = jhash_1word(hash, ti->hash_seed);
293 	return flex_array_get(ti->buckets,
294 				(hash & (ti->n_buckets - 1)));
295 }
296 
297 static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
298 {
299 	struct hlist_head *head;
300 
301 	head = find_bucket(ti, flow->hash);
302 	hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
303 }
304 
305 static void flow_table_copy_flows(struct table_instance *old,
306 				  struct table_instance *new)
307 {
308 	int old_ver;
309 	int i;
310 
311 	old_ver = old->node_ver;
312 	new->node_ver = !old_ver;
313 
314 	/* Insert in new table. */
315 	for (i = 0; i < old->n_buckets; i++) {
316 		struct sw_flow *flow;
317 		struct hlist_head *head;
318 
319 		head = flex_array_get(old->buckets, i);
320 
321 		hlist_for_each_entry(flow, head, hash_node[old_ver])
322 			table_instance_insert(new, flow);
323 	}
324 
325 	old->keep_flows = true;
326 }
327 
328 static struct table_instance *table_instance_rehash(struct table_instance *ti,
329 					    int n_buckets)
330 {
331 	struct table_instance *new_ti;
332 
333 	new_ti = table_instance_alloc(n_buckets);
334 	if (!new_ti)
335 		return NULL;
336 
337 	flow_table_copy_flows(ti, new_ti);
338 
339 	return new_ti;
340 }
341 
342 int ovs_flow_tbl_flush(struct flow_table *flow_table)
343 {
344 	struct table_instance *old_ti;
345 	struct table_instance *new_ti;
346 
347 	old_ti = ovsl_dereference(flow_table->ti);
348 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
349 	if (!new_ti)
350 		return -ENOMEM;
351 
352 	rcu_assign_pointer(flow_table->ti, new_ti);
353 	flow_table->last_rehash = jiffies;
354 	flow_table->count = 0;
355 
356 	table_instance_destroy(old_ti, true);
357 	return 0;
358 }
359 
360 static u32 flow_hash(const struct sw_flow_key *key, int key_start,
361 		     int key_end)
362 {
363 	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
364 	int hash_u32s = (key_end - key_start) >> 2;
365 
366 	/* Make sure number of hash bytes are multiple of u32. */
367 	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
368 
369 	return jhash2(hash_key, hash_u32s, 0);
370 }
371 
372 static int flow_key_start(const struct sw_flow_key *key)
373 {
374 	if (key->tun_key.ipv4_dst)
375 		return 0;
376 	else
377 		return rounddown(offsetof(struct sw_flow_key, phy),
378 					  sizeof(long));
379 }
380 
381 static bool cmp_key(const struct sw_flow_key *key1,
382 		    const struct sw_flow_key *key2,
383 		    int key_start, int key_end)
384 {
385 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
386 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
387 	long diffs = 0;
388 	int i;
389 
390 	for (i = key_start; i < key_end;  i += sizeof(long))
391 		diffs |= *cp1++ ^ *cp2++;
392 
393 	return diffs == 0;
394 }
395 
396 static bool flow_cmp_masked_key(const struct sw_flow *flow,
397 				const struct sw_flow_key *key,
398 				int key_start, int key_end)
399 {
400 	return cmp_key(&flow->key, key, key_start, key_end);
401 }
402 
403 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
404 			       const struct sw_flow_match *match)
405 {
406 	struct sw_flow_key *key = match->key;
407 	int key_start = flow_key_start(key);
408 	int key_end = match->range.end;
409 
410 	return cmp_key(&flow->unmasked_key, key, key_start, key_end);
411 }
412 
413 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
414 					  const struct sw_flow_key *unmasked,
415 					  const struct sw_flow_mask *mask)
416 {
417 	struct sw_flow *flow;
418 	struct hlist_head *head;
419 	int key_start = mask->range.start;
420 	int key_end = mask->range.end;
421 	u32 hash;
422 	struct sw_flow_key masked_key;
423 
424 	ovs_flow_mask_key(&masked_key, unmasked, mask);
425 	hash = flow_hash(&masked_key, key_start, key_end);
426 	head = find_bucket(ti, hash);
427 	hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
428 		if (flow->mask == mask && flow->hash == hash &&
429 		    flow_cmp_masked_key(flow, &masked_key,
430 					  key_start, key_end))
431 			return flow;
432 	}
433 	return NULL;
434 }
435 
436 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
437 				    const struct sw_flow_key *key,
438 				    u32 *n_mask_hit)
439 {
440 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
441 	struct sw_flow_mask *mask;
442 	struct sw_flow *flow;
443 
444 	*n_mask_hit = 0;
445 	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
446 		(*n_mask_hit)++;
447 		flow = masked_flow_lookup(ti, key, mask);
448 		if (flow)  /* Found */
449 			return flow;
450 	}
451 	return NULL;
452 }
453 
454 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
455 				    const struct sw_flow_key *key)
456 {
457 	u32 __always_unused n_mask_hit;
458 
459 	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
460 }
461 
462 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
463 					  const struct sw_flow_match *match)
464 {
465 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
466 	struct sw_flow_mask *mask;
467 	struct sw_flow *flow;
468 
469 	/* Always called under ovs-mutex. */
470 	list_for_each_entry(mask, &tbl->mask_list, list) {
471 		flow = masked_flow_lookup(ti, match->key, mask);
472 		if (flow && ovs_flow_cmp_unmasked_key(flow, match))  /* Found */
473 			return flow;
474 	}
475 	return NULL;
476 }
477 
478 int ovs_flow_tbl_num_masks(const struct flow_table *table)
479 {
480 	struct sw_flow_mask *mask;
481 	int num = 0;
482 
483 	list_for_each_entry(mask, &table->mask_list, list)
484 		num++;
485 
486 	return num;
487 }
488 
489 static struct table_instance *table_instance_expand(struct table_instance *ti)
490 {
491 	return table_instance_rehash(ti, ti->n_buckets * 2);
492 }
493 
494 /* Remove 'mask' from the mask list, if it is not needed any more. */
495 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
496 {
497 	if (mask) {
498 		/* ovs-lock is required to protect mask-refcount and
499 		 * mask list.
500 		 */
501 		ASSERT_OVSL();
502 		BUG_ON(!mask->ref_count);
503 		mask->ref_count--;
504 
505 		if (!mask->ref_count) {
506 			list_del_rcu(&mask->list);
507 			kfree_rcu(mask, rcu);
508 		}
509 	}
510 }
511 
512 /* Must be called with OVS mutex held. */
513 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
514 {
515 	struct table_instance *ti = ovsl_dereference(table->ti);
516 
517 	BUG_ON(table->count == 0);
518 	hlist_del_rcu(&flow->hash_node[ti->node_ver]);
519 	table->count--;
520 
521 	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
522 	 * accessible as long as the RCU read lock is held.
523 	 */
524 	flow_mask_remove(table, flow->mask);
525 }
526 
527 static struct sw_flow_mask *mask_alloc(void)
528 {
529 	struct sw_flow_mask *mask;
530 
531 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
532 	if (mask)
533 		mask->ref_count = 1;
534 
535 	return mask;
536 }
537 
538 static bool mask_equal(const struct sw_flow_mask *a,
539 		       const struct sw_flow_mask *b)
540 {
541 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
542 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
543 
544 	return  (a->range.end == b->range.end)
545 		&& (a->range.start == b->range.start)
546 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
547 }
548 
549 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
550 					   const struct sw_flow_mask *mask)
551 {
552 	struct list_head *ml;
553 
554 	list_for_each(ml, &tbl->mask_list) {
555 		struct sw_flow_mask *m;
556 		m = container_of(ml, struct sw_flow_mask, list);
557 		if (mask_equal(mask, m))
558 			return m;
559 	}
560 
561 	return NULL;
562 }
563 
564 /* Add 'mask' into the mask list, if it is not already there. */
565 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
566 			    const struct sw_flow_mask *new)
567 {
568 	struct sw_flow_mask *mask;
569 	mask = flow_mask_find(tbl, new);
570 	if (!mask) {
571 		/* Allocate a new mask if none exsits. */
572 		mask = mask_alloc();
573 		if (!mask)
574 			return -ENOMEM;
575 		mask->key = new->key;
576 		mask->range = new->range;
577 		list_add_rcu(&mask->list, &tbl->mask_list);
578 	} else {
579 		BUG_ON(!mask->ref_count);
580 		mask->ref_count++;
581 	}
582 
583 	flow->mask = mask;
584 	return 0;
585 }
586 
587 /* Must be called with OVS mutex held. */
588 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
589 			const struct sw_flow_mask *mask)
590 {
591 	struct table_instance *new_ti = NULL;
592 	struct table_instance *ti;
593 	int err;
594 
595 	err = flow_mask_insert(table, flow, mask);
596 	if (err)
597 		return err;
598 
599 	flow->hash = flow_hash(&flow->key, flow->mask->range.start,
600 			flow->mask->range.end);
601 	ti = ovsl_dereference(table->ti);
602 	table_instance_insert(ti, flow);
603 	table->count++;
604 
605 	/* Expand table, if necessary, to make room. */
606 	if (table->count > ti->n_buckets)
607 		new_ti = table_instance_expand(ti);
608 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
609 		new_ti = table_instance_rehash(ti, ti->n_buckets);
610 
611 	if (new_ti) {
612 		rcu_assign_pointer(table->ti, new_ti);
613 		table_instance_destroy(ti, true);
614 		table->last_rehash = jiffies;
615 	}
616 	return 0;
617 }
618 
619 /* Initializes the flow module.
620  * Returns zero if successful or a negative error code. */
621 int ovs_flow_init(void)
622 {
623 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
624 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
625 
626 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
627 				       + (num_possible_nodes()
628 					  * sizeof(struct flow_stats *)),
629 				       0, 0, NULL);
630 	if (flow_cache == NULL)
631 		return -ENOMEM;
632 
633 	flow_stats_cache
634 		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
635 				    0, SLAB_HWCACHE_ALIGN, NULL);
636 	if (flow_stats_cache == NULL) {
637 		kmem_cache_destroy(flow_cache);
638 		flow_cache = NULL;
639 		return -ENOMEM;
640 	}
641 
642 	return 0;
643 }
644 
645 /* Uninitializes the flow module. */
646 void ovs_flow_exit(void)
647 {
648 	kmem_cache_destroy(flow_stats_cache);
649 	kmem_cache_destroy(flow_cache);
650 }
651