xref: /openbmc/linux/net/openvswitch/flow_table.c (revision e0bf6c5c)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #include "flow.h"
20 #include "datapath.h"
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
43 #include <net/ip.h>
44 #include <net/ipv6.h>
45 #include <net/ndisc.h>
46 
47 #define TBL_MIN_BUCKETS		1024
48 #define REHASH_INTERVAL		(10 * 60 * HZ)
49 
50 static struct kmem_cache *flow_cache;
51 struct kmem_cache *flow_stats_cache __read_mostly;
52 
53 static u16 range_n_bytes(const struct sw_flow_key_range *range)
54 {
55 	return range->end - range->start;
56 }
57 
58 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
59 		       const struct sw_flow_mask *mask)
60 {
61 	const long *m = (const long *)((const u8 *)&mask->key +
62 				mask->range.start);
63 	const long *s = (const long *)((const u8 *)src +
64 				mask->range.start);
65 	long *d = (long *)((u8 *)dst + mask->range.start);
66 	int i;
67 
68 	/* The memory outside of the 'mask->range' are not set since
69 	 * further operations on 'dst' only uses contents within
70 	 * 'mask->range'.
71 	 */
72 	for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
73 		*d++ = *s++ & *m++;
74 }
75 
76 struct sw_flow *ovs_flow_alloc(void)
77 {
78 	struct sw_flow *flow;
79 	struct flow_stats *stats;
80 	int node;
81 
82 	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
83 	if (!flow)
84 		return ERR_PTR(-ENOMEM);
85 
86 	flow->sf_acts = NULL;
87 	flow->mask = NULL;
88 	flow->id.unmasked_key = NULL;
89 	flow->id.ufid_len = 0;
90 	flow->stats_last_writer = NUMA_NO_NODE;
91 
92 	/* Initialize the default stat node. */
93 	stats = kmem_cache_alloc_node(flow_stats_cache,
94 				      GFP_KERNEL | __GFP_ZERO, 0);
95 	if (!stats)
96 		goto err;
97 
98 	spin_lock_init(&stats->lock);
99 
100 	RCU_INIT_POINTER(flow->stats[0], stats);
101 
102 	for_each_node(node)
103 		if (node != 0)
104 			RCU_INIT_POINTER(flow->stats[node], NULL);
105 
106 	return flow;
107 err:
108 	kmem_cache_free(flow_cache, flow);
109 	return ERR_PTR(-ENOMEM);
110 }
111 
112 int ovs_flow_tbl_count(const struct flow_table *table)
113 {
114 	return table->count;
115 }
116 
117 static struct flex_array *alloc_buckets(unsigned int n_buckets)
118 {
119 	struct flex_array *buckets;
120 	int i, err;
121 
122 	buckets = flex_array_alloc(sizeof(struct hlist_head),
123 				   n_buckets, GFP_KERNEL);
124 	if (!buckets)
125 		return NULL;
126 
127 	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
128 	if (err) {
129 		flex_array_free(buckets);
130 		return NULL;
131 	}
132 
133 	for (i = 0; i < n_buckets; i++)
134 		INIT_HLIST_HEAD((struct hlist_head *)
135 					flex_array_get(buckets, i));
136 
137 	return buckets;
138 }
139 
140 static void flow_free(struct sw_flow *flow)
141 {
142 	int node;
143 
144 	if (ovs_identifier_is_key(&flow->id))
145 		kfree(flow->id.unmasked_key);
146 	kfree((struct sw_flow_actions __force *)flow->sf_acts);
147 	for_each_node(node)
148 		if (flow->stats[node])
149 			kmem_cache_free(flow_stats_cache,
150 					(struct flow_stats __force *)flow->stats[node]);
151 	kmem_cache_free(flow_cache, flow);
152 }
153 
154 static void rcu_free_flow_callback(struct rcu_head *rcu)
155 {
156 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
157 
158 	flow_free(flow);
159 }
160 
161 void ovs_flow_free(struct sw_flow *flow, bool deferred)
162 {
163 	if (!flow)
164 		return;
165 
166 	if (deferred)
167 		call_rcu(&flow->rcu, rcu_free_flow_callback);
168 	else
169 		flow_free(flow);
170 }
171 
172 static void free_buckets(struct flex_array *buckets)
173 {
174 	flex_array_free(buckets);
175 }
176 
177 
178 static void __table_instance_destroy(struct table_instance *ti)
179 {
180 	free_buckets(ti->buckets);
181 	kfree(ti);
182 }
183 
184 static struct table_instance *table_instance_alloc(int new_size)
185 {
186 	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
187 
188 	if (!ti)
189 		return NULL;
190 
191 	ti->buckets = alloc_buckets(new_size);
192 
193 	if (!ti->buckets) {
194 		kfree(ti);
195 		return NULL;
196 	}
197 	ti->n_buckets = new_size;
198 	ti->node_ver = 0;
199 	ti->keep_flows = false;
200 	get_random_bytes(&ti->hash_seed, sizeof(u32));
201 
202 	return ti;
203 }
204 
205 int ovs_flow_tbl_init(struct flow_table *table)
206 {
207 	struct table_instance *ti, *ufid_ti;
208 
209 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
210 
211 	if (!ti)
212 		return -ENOMEM;
213 
214 	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
215 	if (!ufid_ti)
216 		goto free_ti;
217 
218 	rcu_assign_pointer(table->ti, ti);
219 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
220 	INIT_LIST_HEAD(&table->mask_list);
221 	table->last_rehash = jiffies;
222 	table->count = 0;
223 	table->ufid_count = 0;
224 	return 0;
225 
226 free_ti:
227 	__table_instance_destroy(ti);
228 	return -ENOMEM;
229 }
230 
231 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
232 {
233 	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
234 
235 	__table_instance_destroy(ti);
236 }
237 
238 static void table_instance_destroy(struct table_instance *ti,
239 				   struct table_instance *ufid_ti,
240 				   bool deferred)
241 {
242 	int i;
243 
244 	if (!ti)
245 		return;
246 
247 	BUG_ON(!ufid_ti);
248 	if (ti->keep_flows)
249 		goto skip_flows;
250 
251 	for (i = 0; i < ti->n_buckets; i++) {
252 		struct sw_flow *flow;
253 		struct hlist_head *head = flex_array_get(ti->buckets, i);
254 		struct hlist_node *n;
255 		int ver = ti->node_ver;
256 		int ufid_ver = ufid_ti->node_ver;
257 
258 		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
259 			hlist_del_rcu(&flow->flow_table.node[ver]);
260 			if (ovs_identifier_is_ufid(&flow->id))
261 				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
262 			ovs_flow_free(flow, deferred);
263 		}
264 	}
265 
266 skip_flows:
267 	if (deferred) {
268 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
269 		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
270 	} else {
271 		__table_instance_destroy(ti);
272 		__table_instance_destroy(ufid_ti);
273 	}
274 }
275 
276 /* No need for locking this function is called from RCU callback or
277  * error path.
278  */
279 void ovs_flow_tbl_destroy(struct flow_table *table)
280 {
281 	struct table_instance *ti = rcu_dereference_raw(table->ti);
282 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
283 
284 	table_instance_destroy(ti, ufid_ti, false);
285 }
286 
287 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
288 				       u32 *bucket, u32 *last)
289 {
290 	struct sw_flow *flow;
291 	struct hlist_head *head;
292 	int ver;
293 	int i;
294 
295 	ver = ti->node_ver;
296 	while (*bucket < ti->n_buckets) {
297 		i = 0;
298 		head = flex_array_get(ti->buckets, *bucket);
299 		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
300 			if (i < *last) {
301 				i++;
302 				continue;
303 			}
304 			*last = i + 1;
305 			return flow;
306 		}
307 		(*bucket)++;
308 		*last = 0;
309 	}
310 
311 	return NULL;
312 }
313 
314 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
315 {
316 	hash = jhash_1word(hash, ti->hash_seed);
317 	return flex_array_get(ti->buckets,
318 				(hash & (ti->n_buckets - 1)));
319 }
320 
321 static void table_instance_insert(struct table_instance *ti,
322 				  struct sw_flow *flow)
323 {
324 	struct hlist_head *head;
325 
326 	head = find_bucket(ti, flow->flow_table.hash);
327 	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
328 }
329 
330 static void ufid_table_instance_insert(struct table_instance *ti,
331 				       struct sw_flow *flow)
332 {
333 	struct hlist_head *head;
334 
335 	head = find_bucket(ti, flow->ufid_table.hash);
336 	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
337 }
338 
339 static void flow_table_copy_flows(struct table_instance *old,
340 				  struct table_instance *new, bool ufid)
341 {
342 	int old_ver;
343 	int i;
344 
345 	old_ver = old->node_ver;
346 	new->node_ver = !old_ver;
347 
348 	/* Insert in new table. */
349 	for (i = 0; i < old->n_buckets; i++) {
350 		struct sw_flow *flow;
351 		struct hlist_head *head;
352 
353 		head = flex_array_get(old->buckets, i);
354 
355 		if (ufid)
356 			hlist_for_each_entry(flow, head,
357 					     ufid_table.node[old_ver])
358 				ufid_table_instance_insert(new, flow);
359 		else
360 			hlist_for_each_entry(flow, head,
361 					     flow_table.node[old_ver])
362 				table_instance_insert(new, flow);
363 	}
364 
365 	old->keep_flows = true;
366 }
367 
368 static struct table_instance *table_instance_rehash(struct table_instance *ti,
369 						    int n_buckets, bool ufid)
370 {
371 	struct table_instance *new_ti;
372 
373 	new_ti = table_instance_alloc(n_buckets);
374 	if (!new_ti)
375 		return NULL;
376 
377 	flow_table_copy_flows(ti, new_ti, ufid);
378 
379 	return new_ti;
380 }
381 
382 int ovs_flow_tbl_flush(struct flow_table *flow_table)
383 {
384 	struct table_instance *old_ti, *new_ti;
385 	struct table_instance *old_ufid_ti, *new_ufid_ti;
386 
387 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
388 	if (!new_ti)
389 		return -ENOMEM;
390 	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
391 	if (!new_ufid_ti)
392 		goto err_free_ti;
393 
394 	old_ti = ovsl_dereference(flow_table->ti);
395 	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
396 
397 	rcu_assign_pointer(flow_table->ti, new_ti);
398 	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
399 	flow_table->last_rehash = jiffies;
400 	flow_table->count = 0;
401 	flow_table->ufid_count = 0;
402 
403 	table_instance_destroy(old_ti, old_ufid_ti, true);
404 	return 0;
405 
406 err_free_ti:
407 	__table_instance_destroy(new_ti);
408 	return -ENOMEM;
409 }
410 
411 static u32 flow_hash(const struct sw_flow_key *key,
412 		     const struct sw_flow_key_range *range)
413 {
414 	int key_start = range->start;
415 	int key_end = range->end;
416 	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
417 	int hash_u32s = (key_end - key_start) >> 2;
418 
419 	/* Make sure number of hash bytes are multiple of u32. */
420 	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
421 
422 	return jhash2(hash_key, hash_u32s, 0);
423 }
424 
425 static int flow_key_start(const struct sw_flow_key *key)
426 {
427 	if (key->tun_key.ipv4_dst)
428 		return 0;
429 	else
430 		return rounddown(offsetof(struct sw_flow_key, phy),
431 					  sizeof(long));
432 }
433 
434 static bool cmp_key(const struct sw_flow_key *key1,
435 		    const struct sw_flow_key *key2,
436 		    int key_start, int key_end)
437 {
438 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
439 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
440 	long diffs = 0;
441 	int i;
442 
443 	for (i = key_start; i < key_end;  i += sizeof(long))
444 		diffs |= *cp1++ ^ *cp2++;
445 
446 	return diffs == 0;
447 }
448 
449 static bool flow_cmp_masked_key(const struct sw_flow *flow,
450 				const struct sw_flow_key *key,
451 				const struct sw_flow_key_range *range)
452 {
453 	return cmp_key(&flow->key, key, range->start, range->end);
454 }
455 
456 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
457 				      const struct sw_flow_match *match)
458 {
459 	struct sw_flow_key *key = match->key;
460 	int key_start = flow_key_start(key);
461 	int key_end = match->range.end;
462 
463 	BUG_ON(ovs_identifier_is_ufid(&flow->id));
464 	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
465 }
466 
467 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
468 					  const struct sw_flow_key *unmasked,
469 					  const struct sw_flow_mask *mask)
470 {
471 	struct sw_flow *flow;
472 	struct hlist_head *head;
473 	u32 hash;
474 	struct sw_flow_key masked_key;
475 
476 	ovs_flow_mask_key(&masked_key, unmasked, mask);
477 	hash = flow_hash(&masked_key, &mask->range);
478 	head = find_bucket(ti, hash);
479 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
480 		if (flow->mask == mask && flow->flow_table.hash == hash &&
481 		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
482 			return flow;
483 	}
484 	return NULL;
485 }
486 
487 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
488 				    const struct sw_flow_key *key,
489 				    u32 *n_mask_hit)
490 {
491 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
492 	struct sw_flow_mask *mask;
493 	struct sw_flow *flow;
494 
495 	*n_mask_hit = 0;
496 	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
497 		(*n_mask_hit)++;
498 		flow = masked_flow_lookup(ti, key, mask);
499 		if (flow)  /* Found */
500 			return flow;
501 	}
502 	return NULL;
503 }
504 
505 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
506 				    const struct sw_flow_key *key)
507 {
508 	u32 __always_unused n_mask_hit;
509 
510 	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
511 }
512 
513 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
514 					  const struct sw_flow_match *match)
515 {
516 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
517 	struct sw_flow_mask *mask;
518 	struct sw_flow *flow;
519 
520 	/* Always called under ovs-mutex. */
521 	list_for_each_entry(mask, &tbl->mask_list, list) {
522 		flow = masked_flow_lookup(ti, match->key, mask);
523 		if (flow && ovs_identifier_is_key(&flow->id) &&
524 		    ovs_flow_cmp_unmasked_key(flow, match))
525 			return flow;
526 	}
527 	return NULL;
528 }
529 
530 static u32 ufid_hash(const struct sw_flow_id *sfid)
531 {
532 	return jhash(sfid->ufid, sfid->ufid_len, 0);
533 }
534 
535 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
536 			      const struct sw_flow_id *sfid)
537 {
538 	if (flow->id.ufid_len != sfid->ufid_len)
539 		return false;
540 
541 	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
542 }
543 
544 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
545 {
546 	if (ovs_identifier_is_ufid(&flow->id))
547 		return flow_cmp_masked_key(flow, match->key, &match->range);
548 
549 	return ovs_flow_cmp_unmasked_key(flow, match);
550 }
551 
552 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
553 					 const struct sw_flow_id *ufid)
554 {
555 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
556 	struct sw_flow *flow;
557 	struct hlist_head *head;
558 	u32 hash;
559 
560 	hash = ufid_hash(ufid);
561 	head = find_bucket(ti, hash);
562 	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
563 		if (flow->ufid_table.hash == hash &&
564 		    ovs_flow_cmp_ufid(flow, ufid))
565 			return flow;
566 	}
567 	return NULL;
568 }
569 
570 int ovs_flow_tbl_num_masks(const struct flow_table *table)
571 {
572 	struct sw_flow_mask *mask;
573 	int num = 0;
574 
575 	list_for_each_entry(mask, &table->mask_list, list)
576 		num++;
577 
578 	return num;
579 }
580 
581 static struct table_instance *table_instance_expand(struct table_instance *ti,
582 						    bool ufid)
583 {
584 	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
585 }
586 
587 /* Remove 'mask' from the mask list, if it is not needed any more. */
588 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
589 {
590 	if (mask) {
591 		/* ovs-lock is required to protect mask-refcount and
592 		 * mask list.
593 		 */
594 		ASSERT_OVSL();
595 		BUG_ON(!mask->ref_count);
596 		mask->ref_count--;
597 
598 		if (!mask->ref_count) {
599 			list_del_rcu(&mask->list);
600 			kfree_rcu(mask, rcu);
601 		}
602 	}
603 }
604 
605 /* Must be called with OVS mutex held. */
606 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
607 {
608 	struct table_instance *ti = ovsl_dereference(table->ti);
609 	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
610 
611 	BUG_ON(table->count == 0);
612 	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
613 	table->count--;
614 	if (ovs_identifier_is_ufid(&flow->id)) {
615 		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
616 		table->ufid_count--;
617 	}
618 
619 	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
620 	 * accessible as long as the RCU read lock is held.
621 	 */
622 	flow_mask_remove(table, flow->mask);
623 }
624 
625 static struct sw_flow_mask *mask_alloc(void)
626 {
627 	struct sw_flow_mask *mask;
628 
629 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
630 	if (mask)
631 		mask->ref_count = 1;
632 
633 	return mask;
634 }
635 
636 static bool mask_equal(const struct sw_flow_mask *a,
637 		       const struct sw_flow_mask *b)
638 {
639 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
640 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
641 
642 	return  (a->range.end == b->range.end)
643 		&& (a->range.start == b->range.start)
644 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
645 }
646 
647 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
648 					   const struct sw_flow_mask *mask)
649 {
650 	struct list_head *ml;
651 
652 	list_for_each(ml, &tbl->mask_list) {
653 		struct sw_flow_mask *m;
654 		m = container_of(ml, struct sw_flow_mask, list);
655 		if (mask_equal(mask, m))
656 			return m;
657 	}
658 
659 	return NULL;
660 }
661 
662 /* Add 'mask' into the mask list, if it is not already there. */
663 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
664 			    const struct sw_flow_mask *new)
665 {
666 	struct sw_flow_mask *mask;
667 	mask = flow_mask_find(tbl, new);
668 	if (!mask) {
669 		/* Allocate a new mask if none exsits. */
670 		mask = mask_alloc();
671 		if (!mask)
672 			return -ENOMEM;
673 		mask->key = new->key;
674 		mask->range = new->range;
675 		list_add_rcu(&mask->list, &tbl->mask_list);
676 	} else {
677 		BUG_ON(!mask->ref_count);
678 		mask->ref_count++;
679 	}
680 
681 	flow->mask = mask;
682 	return 0;
683 }
684 
685 /* Must be called with OVS mutex held. */
686 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
687 {
688 	struct table_instance *new_ti = NULL;
689 	struct table_instance *ti;
690 
691 	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
692 	ti = ovsl_dereference(table->ti);
693 	table_instance_insert(ti, flow);
694 	table->count++;
695 
696 	/* Expand table, if necessary, to make room. */
697 	if (table->count > ti->n_buckets)
698 		new_ti = table_instance_expand(ti, false);
699 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
700 		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
701 
702 	if (new_ti) {
703 		rcu_assign_pointer(table->ti, new_ti);
704 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
705 		table->last_rehash = jiffies;
706 	}
707 }
708 
709 /* Must be called with OVS mutex held. */
710 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
711 {
712 	struct table_instance *ti;
713 
714 	flow->ufid_table.hash = ufid_hash(&flow->id);
715 	ti = ovsl_dereference(table->ufid_ti);
716 	ufid_table_instance_insert(ti, flow);
717 	table->ufid_count++;
718 
719 	/* Expand table, if necessary, to make room. */
720 	if (table->ufid_count > ti->n_buckets) {
721 		struct table_instance *new_ti;
722 
723 		new_ti = table_instance_expand(ti, true);
724 		if (new_ti) {
725 			rcu_assign_pointer(table->ufid_ti, new_ti);
726 			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
727 		}
728 	}
729 }
730 
731 /* Must be called with OVS mutex held. */
732 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
733 			const struct sw_flow_mask *mask)
734 {
735 	int err;
736 
737 	err = flow_mask_insert(table, flow, mask);
738 	if (err)
739 		return err;
740 	flow_key_insert(table, flow);
741 	if (ovs_identifier_is_ufid(&flow->id))
742 		flow_ufid_insert(table, flow);
743 
744 	return 0;
745 }
746 
747 /* Initializes the flow module.
748  * Returns zero if successful or a negative error code. */
749 int ovs_flow_init(void)
750 {
751 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
752 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
753 
754 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
755 				       + (num_possible_nodes()
756 					  * sizeof(struct flow_stats *)),
757 				       0, 0, NULL);
758 	if (flow_cache == NULL)
759 		return -ENOMEM;
760 
761 	flow_stats_cache
762 		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
763 				    0, SLAB_HWCACHE_ALIGN, NULL);
764 	if (flow_stats_cache == NULL) {
765 		kmem_cache_destroy(flow_cache);
766 		flow_cache = NULL;
767 		return -ENOMEM;
768 	}
769 
770 	return 0;
771 }
772 
773 /* Uninitializes the flow module. */
774 void ovs_flow_exit(void)
775 {
776 	kmem_cache_destroy(flow_stats_cache);
777 	kmem_cache_destroy(flow_cache);
778 }
779