xref: /openbmc/linux/net/openvswitch/flow_table.c (revision a2cce7a9)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #include "flow.h"
20 #include "datapath.h"
21 #include "flow_netlink.h"
22 #include <linux/uaccess.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/if_ether.h>
26 #include <linux/if_vlan.h>
27 #include <net/llc_pdu.h>
28 #include <linux/kernel.h>
29 #include <linux/jhash.h>
30 #include <linux/jiffies.h>
31 #include <linux/llc.h>
32 #include <linux/module.h>
33 #include <linux/in.h>
34 #include <linux/rcupdate.h>
35 #include <linux/if_arp.h>
36 #include <linux/ip.h>
37 #include <linux/ipv6.h>
38 #include <linux/sctp.h>
39 #include <linux/tcp.h>
40 #include <linux/udp.h>
41 #include <linux/icmp.h>
42 #include <linux/icmpv6.h>
43 #include <linux/rculist.h>
44 #include <net/ip.h>
45 #include <net/ipv6.h>
46 #include <net/ndisc.h>
47 
48 #define TBL_MIN_BUCKETS		1024
49 #define REHASH_INTERVAL		(10 * 60 * HZ)
50 
51 static struct kmem_cache *flow_cache;
52 struct kmem_cache *flow_stats_cache __read_mostly;
53 
54 static u16 range_n_bytes(const struct sw_flow_key_range *range)
55 {
56 	return range->end - range->start;
57 }
58 
59 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
60 		       bool full, const struct sw_flow_mask *mask)
61 {
62 	int start = full ? 0 : mask->range.start;
63 	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
64 	const long *m = (const long *)((const u8 *)&mask->key + start);
65 	const long *s = (const long *)((const u8 *)src + start);
66 	long *d = (long *)((u8 *)dst + start);
67 	int i;
68 
69 	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
70 	 * if 'full' is false the memory outside of the 'mask->range' is left
71 	 * uninitialized. This can be used as an optimization when further
72 	 * operations on 'dst' only use contents within 'mask->range'.
73 	 */
74 	for (i = 0; i < len; i += sizeof(long))
75 		*d++ = *s++ & *m++;
76 }
77 
78 struct sw_flow *ovs_flow_alloc(void)
79 {
80 	struct sw_flow *flow;
81 	struct flow_stats *stats;
82 	int node;
83 
84 	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
85 	if (!flow)
86 		return ERR_PTR(-ENOMEM);
87 
88 	flow->sf_acts = NULL;
89 	flow->mask = NULL;
90 	flow->id.unmasked_key = NULL;
91 	flow->id.ufid_len = 0;
92 	flow->stats_last_writer = NUMA_NO_NODE;
93 
94 	/* Initialize the default stat node. */
95 	stats = kmem_cache_alloc_node(flow_stats_cache,
96 				      GFP_KERNEL | __GFP_ZERO, 0);
97 	if (!stats)
98 		goto err;
99 
100 	spin_lock_init(&stats->lock);
101 
102 	RCU_INIT_POINTER(flow->stats[0], stats);
103 
104 	for_each_node(node)
105 		if (node != 0)
106 			RCU_INIT_POINTER(flow->stats[node], NULL);
107 
108 	return flow;
109 err:
110 	kmem_cache_free(flow_cache, flow);
111 	return ERR_PTR(-ENOMEM);
112 }
113 
114 int ovs_flow_tbl_count(const struct flow_table *table)
115 {
116 	return table->count;
117 }
118 
119 static struct flex_array *alloc_buckets(unsigned int n_buckets)
120 {
121 	struct flex_array *buckets;
122 	int i, err;
123 
124 	buckets = flex_array_alloc(sizeof(struct hlist_head),
125 				   n_buckets, GFP_KERNEL);
126 	if (!buckets)
127 		return NULL;
128 
129 	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
130 	if (err) {
131 		flex_array_free(buckets);
132 		return NULL;
133 	}
134 
135 	for (i = 0; i < n_buckets; i++)
136 		INIT_HLIST_HEAD((struct hlist_head *)
137 					flex_array_get(buckets, i));
138 
139 	return buckets;
140 }
141 
142 static void flow_free(struct sw_flow *flow)
143 {
144 	int node;
145 
146 	if (ovs_identifier_is_key(&flow->id))
147 		kfree(flow->id.unmasked_key);
148 	if (flow->sf_acts)
149 		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
150 	for_each_node(node)
151 		if (flow->stats[node])
152 			kmem_cache_free(flow_stats_cache,
153 					(struct flow_stats __force *)flow->stats[node]);
154 	kmem_cache_free(flow_cache, flow);
155 }
156 
157 static void rcu_free_flow_callback(struct rcu_head *rcu)
158 {
159 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
160 
161 	flow_free(flow);
162 }
163 
164 void ovs_flow_free(struct sw_flow *flow, bool deferred)
165 {
166 	if (!flow)
167 		return;
168 
169 	if (deferred)
170 		call_rcu(&flow->rcu, rcu_free_flow_callback);
171 	else
172 		flow_free(flow);
173 }
174 
175 static void free_buckets(struct flex_array *buckets)
176 {
177 	flex_array_free(buckets);
178 }
179 
180 
181 static void __table_instance_destroy(struct table_instance *ti)
182 {
183 	free_buckets(ti->buckets);
184 	kfree(ti);
185 }
186 
187 static struct table_instance *table_instance_alloc(int new_size)
188 {
189 	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
190 
191 	if (!ti)
192 		return NULL;
193 
194 	ti->buckets = alloc_buckets(new_size);
195 
196 	if (!ti->buckets) {
197 		kfree(ti);
198 		return NULL;
199 	}
200 	ti->n_buckets = new_size;
201 	ti->node_ver = 0;
202 	ti->keep_flows = false;
203 	get_random_bytes(&ti->hash_seed, sizeof(u32));
204 
205 	return ti;
206 }
207 
208 int ovs_flow_tbl_init(struct flow_table *table)
209 {
210 	struct table_instance *ti, *ufid_ti;
211 
212 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
213 
214 	if (!ti)
215 		return -ENOMEM;
216 
217 	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
218 	if (!ufid_ti)
219 		goto free_ti;
220 
221 	rcu_assign_pointer(table->ti, ti);
222 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
223 	INIT_LIST_HEAD(&table->mask_list);
224 	table->last_rehash = jiffies;
225 	table->count = 0;
226 	table->ufid_count = 0;
227 	return 0;
228 
229 free_ti:
230 	__table_instance_destroy(ti);
231 	return -ENOMEM;
232 }
233 
234 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
235 {
236 	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
237 
238 	__table_instance_destroy(ti);
239 }
240 
241 static void table_instance_destroy(struct table_instance *ti,
242 				   struct table_instance *ufid_ti,
243 				   bool deferred)
244 {
245 	int i;
246 
247 	if (!ti)
248 		return;
249 
250 	BUG_ON(!ufid_ti);
251 	if (ti->keep_flows)
252 		goto skip_flows;
253 
254 	for (i = 0; i < ti->n_buckets; i++) {
255 		struct sw_flow *flow;
256 		struct hlist_head *head = flex_array_get(ti->buckets, i);
257 		struct hlist_node *n;
258 		int ver = ti->node_ver;
259 		int ufid_ver = ufid_ti->node_ver;
260 
261 		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
262 			hlist_del_rcu(&flow->flow_table.node[ver]);
263 			if (ovs_identifier_is_ufid(&flow->id))
264 				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
265 			ovs_flow_free(flow, deferred);
266 		}
267 	}
268 
269 skip_flows:
270 	if (deferred) {
271 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
272 		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
273 	} else {
274 		__table_instance_destroy(ti);
275 		__table_instance_destroy(ufid_ti);
276 	}
277 }
278 
279 /* No need for locking this function is called from RCU callback or
280  * error path.
281  */
282 void ovs_flow_tbl_destroy(struct flow_table *table)
283 {
284 	struct table_instance *ti = rcu_dereference_raw(table->ti);
285 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
286 
287 	table_instance_destroy(ti, ufid_ti, false);
288 }
289 
290 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
291 				       u32 *bucket, u32 *last)
292 {
293 	struct sw_flow *flow;
294 	struct hlist_head *head;
295 	int ver;
296 	int i;
297 
298 	ver = ti->node_ver;
299 	while (*bucket < ti->n_buckets) {
300 		i = 0;
301 		head = flex_array_get(ti->buckets, *bucket);
302 		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
303 			if (i < *last) {
304 				i++;
305 				continue;
306 			}
307 			*last = i + 1;
308 			return flow;
309 		}
310 		(*bucket)++;
311 		*last = 0;
312 	}
313 
314 	return NULL;
315 }
316 
317 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
318 {
319 	hash = jhash_1word(hash, ti->hash_seed);
320 	return flex_array_get(ti->buckets,
321 				(hash & (ti->n_buckets - 1)));
322 }
323 
324 static void table_instance_insert(struct table_instance *ti,
325 				  struct sw_flow *flow)
326 {
327 	struct hlist_head *head;
328 
329 	head = find_bucket(ti, flow->flow_table.hash);
330 	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
331 }
332 
333 static void ufid_table_instance_insert(struct table_instance *ti,
334 				       struct sw_flow *flow)
335 {
336 	struct hlist_head *head;
337 
338 	head = find_bucket(ti, flow->ufid_table.hash);
339 	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
340 }
341 
342 static void flow_table_copy_flows(struct table_instance *old,
343 				  struct table_instance *new, bool ufid)
344 {
345 	int old_ver;
346 	int i;
347 
348 	old_ver = old->node_ver;
349 	new->node_ver = !old_ver;
350 
351 	/* Insert in new table. */
352 	for (i = 0; i < old->n_buckets; i++) {
353 		struct sw_flow *flow;
354 		struct hlist_head *head;
355 
356 		head = flex_array_get(old->buckets, i);
357 
358 		if (ufid)
359 			hlist_for_each_entry(flow, head,
360 					     ufid_table.node[old_ver])
361 				ufid_table_instance_insert(new, flow);
362 		else
363 			hlist_for_each_entry(flow, head,
364 					     flow_table.node[old_ver])
365 				table_instance_insert(new, flow);
366 	}
367 
368 	old->keep_flows = true;
369 }
370 
371 static struct table_instance *table_instance_rehash(struct table_instance *ti,
372 						    int n_buckets, bool ufid)
373 {
374 	struct table_instance *new_ti;
375 
376 	new_ti = table_instance_alloc(n_buckets);
377 	if (!new_ti)
378 		return NULL;
379 
380 	flow_table_copy_flows(ti, new_ti, ufid);
381 
382 	return new_ti;
383 }
384 
385 int ovs_flow_tbl_flush(struct flow_table *flow_table)
386 {
387 	struct table_instance *old_ti, *new_ti;
388 	struct table_instance *old_ufid_ti, *new_ufid_ti;
389 
390 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
391 	if (!new_ti)
392 		return -ENOMEM;
393 	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
394 	if (!new_ufid_ti)
395 		goto err_free_ti;
396 
397 	old_ti = ovsl_dereference(flow_table->ti);
398 	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
399 
400 	rcu_assign_pointer(flow_table->ti, new_ti);
401 	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
402 	flow_table->last_rehash = jiffies;
403 	flow_table->count = 0;
404 	flow_table->ufid_count = 0;
405 
406 	table_instance_destroy(old_ti, old_ufid_ti, true);
407 	return 0;
408 
409 err_free_ti:
410 	__table_instance_destroy(new_ti);
411 	return -ENOMEM;
412 }
413 
414 static u32 flow_hash(const struct sw_flow_key *key,
415 		     const struct sw_flow_key_range *range)
416 {
417 	int key_start = range->start;
418 	int key_end = range->end;
419 	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
420 	int hash_u32s = (key_end - key_start) >> 2;
421 
422 	/* Make sure number of hash bytes are multiple of u32. */
423 	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
424 
425 	return jhash2(hash_key, hash_u32s, 0);
426 }
427 
428 static int flow_key_start(const struct sw_flow_key *key)
429 {
430 	if (key->tun_key.u.ipv4.dst)
431 		return 0;
432 	else
433 		return rounddown(offsetof(struct sw_flow_key, phy),
434 					  sizeof(long));
435 }
436 
437 static bool cmp_key(const struct sw_flow_key *key1,
438 		    const struct sw_flow_key *key2,
439 		    int key_start, int key_end)
440 {
441 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
442 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
443 	long diffs = 0;
444 	int i;
445 
446 	for (i = key_start; i < key_end;  i += sizeof(long))
447 		diffs |= *cp1++ ^ *cp2++;
448 
449 	return diffs == 0;
450 }
451 
452 static bool flow_cmp_masked_key(const struct sw_flow *flow,
453 				const struct sw_flow_key *key,
454 				const struct sw_flow_key_range *range)
455 {
456 	return cmp_key(&flow->key, key, range->start, range->end);
457 }
458 
459 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
460 				      const struct sw_flow_match *match)
461 {
462 	struct sw_flow_key *key = match->key;
463 	int key_start = flow_key_start(key);
464 	int key_end = match->range.end;
465 
466 	BUG_ON(ovs_identifier_is_ufid(&flow->id));
467 	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
468 }
469 
470 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
471 					  const struct sw_flow_key *unmasked,
472 					  const struct sw_flow_mask *mask)
473 {
474 	struct sw_flow *flow;
475 	struct hlist_head *head;
476 	u32 hash;
477 	struct sw_flow_key masked_key;
478 
479 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
480 	hash = flow_hash(&masked_key, &mask->range);
481 	head = find_bucket(ti, hash);
482 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
483 		if (flow->mask == mask && flow->flow_table.hash == hash &&
484 		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
485 			return flow;
486 	}
487 	return NULL;
488 }
489 
490 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
491 				    const struct sw_flow_key *key,
492 				    u32 *n_mask_hit)
493 {
494 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
495 	struct sw_flow_mask *mask;
496 	struct sw_flow *flow;
497 
498 	*n_mask_hit = 0;
499 	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
500 		(*n_mask_hit)++;
501 		flow = masked_flow_lookup(ti, key, mask);
502 		if (flow)  /* Found */
503 			return flow;
504 	}
505 	return NULL;
506 }
507 
508 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
509 				    const struct sw_flow_key *key)
510 {
511 	u32 __always_unused n_mask_hit;
512 
513 	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
514 }
515 
516 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
517 					  const struct sw_flow_match *match)
518 {
519 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
520 	struct sw_flow_mask *mask;
521 	struct sw_flow *flow;
522 
523 	/* Always called under ovs-mutex. */
524 	list_for_each_entry(mask, &tbl->mask_list, list) {
525 		flow = masked_flow_lookup(ti, match->key, mask);
526 		if (flow && ovs_identifier_is_key(&flow->id) &&
527 		    ovs_flow_cmp_unmasked_key(flow, match))
528 			return flow;
529 	}
530 	return NULL;
531 }
532 
533 static u32 ufid_hash(const struct sw_flow_id *sfid)
534 {
535 	return jhash(sfid->ufid, sfid->ufid_len, 0);
536 }
537 
538 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
539 			      const struct sw_flow_id *sfid)
540 {
541 	if (flow->id.ufid_len != sfid->ufid_len)
542 		return false;
543 
544 	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
545 }
546 
547 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
548 {
549 	if (ovs_identifier_is_ufid(&flow->id))
550 		return flow_cmp_masked_key(flow, match->key, &match->range);
551 
552 	return ovs_flow_cmp_unmasked_key(flow, match);
553 }
554 
555 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
556 					 const struct sw_flow_id *ufid)
557 {
558 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
559 	struct sw_flow *flow;
560 	struct hlist_head *head;
561 	u32 hash;
562 
563 	hash = ufid_hash(ufid);
564 	head = find_bucket(ti, hash);
565 	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
566 		if (flow->ufid_table.hash == hash &&
567 		    ovs_flow_cmp_ufid(flow, ufid))
568 			return flow;
569 	}
570 	return NULL;
571 }
572 
573 int ovs_flow_tbl_num_masks(const struct flow_table *table)
574 {
575 	struct sw_flow_mask *mask;
576 	int num = 0;
577 
578 	list_for_each_entry(mask, &table->mask_list, list)
579 		num++;
580 
581 	return num;
582 }
583 
584 static struct table_instance *table_instance_expand(struct table_instance *ti,
585 						    bool ufid)
586 {
587 	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
588 }
589 
590 /* Remove 'mask' from the mask list, if it is not needed any more. */
591 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
592 {
593 	if (mask) {
594 		/* ovs-lock is required to protect mask-refcount and
595 		 * mask list.
596 		 */
597 		ASSERT_OVSL();
598 		BUG_ON(!mask->ref_count);
599 		mask->ref_count--;
600 
601 		if (!mask->ref_count) {
602 			list_del_rcu(&mask->list);
603 			kfree_rcu(mask, rcu);
604 		}
605 	}
606 }
607 
608 /* Must be called with OVS mutex held. */
609 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
610 {
611 	struct table_instance *ti = ovsl_dereference(table->ti);
612 	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
613 
614 	BUG_ON(table->count == 0);
615 	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
616 	table->count--;
617 	if (ovs_identifier_is_ufid(&flow->id)) {
618 		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
619 		table->ufid_count--;
620 	}
621 
622 	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
623 	 * accessible as long as the RCU read lock is held.
624 	 */
625 	flow_mask_remove(table, flow->mask);
626 }
627 
628 static struct sw_flow_mask *mask_alloc(void)
629 {
630 	struct sw_flow_mask *mask;
631 
632 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
633 	if (mask)
634 		mask->ref_count = 1;
635 
636 	return mask;
637 }
638 
639 static bool mask_equal(const struct sw_flow_mask *a,
640 		       const struct sw_flow_mask *b)
641 {
642 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
643 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
644 
645 	return  (a->range.end == b->range.end)
646 		&& (a->range.start == b->range.start)
647 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
648 }
649 
650 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
651 					   const struct sw_flow_mask *mask)
652 {
653 	struct list_head *ml;
654 
655 	list_for_each(ml, &tbl->mask_list) {
656 		struct sw_flow_mask *m;
657 		m = container_of(ml, struct sw_flow_mask, list);
658 		if (mask_equal(mask, m))
659 			return m;
660 	}
661 
662 	return NULL;
663 }
664 
665 /* Add 'mask' into the mask list, if it is not already there. */
666 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
667 			    const struct sw_flow_mask *new)
668 {
669 	struct sw_flow_mask *mask;
670 	mask = flow_mask_find(tbl, new);
671 	if (!mask) {
672 		/* Allocate a new mask if none exsits. */
673 		mask = mask_alloc();
674 		if (!mask)
675 			return -ENOMEM;
676 		mask->key = new->key;
677 		mask->range = new->range;
678 		list_add_rcu(&mask->list, &tbl->mask_list);
679 	} else {
680 		BUG_ON(!mask->ref_count);
681 		mask->ref_count++;
682 	}
683 
684 	flow->mask = mask;
685 	return 0;
686 }
687 
688 /* Must be called with OVS mutex held. */
689 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
690 {
691 	struct table_instance *new_ti = NULL;
692 	struct table_instance *ti;
693 
694 	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
695 	ti = ovsl_dereference(table->ti);
696 	table_instance_insert(ti, flow);
697 	table->count++;
698 
699 	/* Expand table, if necessary, to make room. */
700 	if (table->count > ti->n_buckets)
701 		new_ti = table_instance_expand(ti, false);
702 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
703 		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
704 
705 	if (new_ti) {
706 		rcu_assign_pointer(table->ti, new_ti);
707 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
708 		table->last_rehash = jiffies;
709 	}
710 }
711 
712 /* Must be called with OVS mutex held. */
713 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
714 {
715 	struct table_instance *ti;
716 
717 	flow->ufid_table.hash = ufid_hash(&flow->id);
718 	ti = ovsl_dereference(table->ufid_ti);
719 	ufid_table_instance_insert(ti, flow);
720 	table->ufid_count++;
721 
722 	/* Expand table, if necessary, to make room. */
723 	if (table->ufid_count > ti->n_buckets) {
724 		struct table_instance *new_ti;
725 
726 		new_ti = table_instance_expand(ti, true);
727 		if (new_ti) {
728 			rcu_assign_pointer(table->ufid_ti, new_ti);
729 			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
730 		}
731 	}
732 }
733 
734 /* Must be called with OVS mutex held. */
735 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
736 			const struct sw_flow_mask *mask)
737 {
738 	int err;
739 
740 	err = flow_mask_insert(table, flow, mask);
741 	if (err)
742 		return err;
743 	flow_key_insert(table, flow);
744 	if (ovs_identifier_is_ufid(&flow->id))
745 		flow_ufid_insert(table, flow);
746 
747 	return 0;
748 }
749 
750 /* Initializes the flow module.
751  * Returns zero if successful or a negative error code. */
752 int ovs_flow_init(void)
753 {
754 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
755 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
756 
757 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
758 				       + (nr_node_ids
759 					  * sizeof(struct flow_stats *)),
760 				       0, 0, NULL);
761 	if (flow_cache == NULL)
762 		return -ENOMEM;
763 
764 	flow_stats_cache
765 		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
766 				    0, SLAB_HWCACHE_ALIGN, NULL);
767 	if (flow_stats_cache == NULL) {
768 		kmem_cache_destroy(flow_cache);
769 		flow_cache = NULL;
770 		return -ENOMEM;
771 	}
772 
773 	return 0;
774 }
775 
776 /* Uninitializes the flow module. */
777 void ovs_flow_exit(void)
778 {
779 	kmem_cache_destroy(flow_stats_cache);
780 	kmem_cache_destroy(flow_cache);
781 }
782