xref: /openbmc/linux/net/openvswitch/flow_table.c (revision 22b6e7f3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2014 Nicira, Inc.
4  */
5 
6 #include "flow.h"
7 #include "datapath.h"
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
20 #include <linux/in.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/ndisc.h>
36 
37 #define TBL_MIN_BUCKETS		1024
38 #define MASK_ARRAY_SIZE_MIN	16
39 #define REHASH_INTERVAL		(10 * 60 * HZ)
40 
41 #define MC_DEFAULT_HASH_ENTRIES	256
42 #define MC_HASH_SHIFT		8
43 #define MC_HASH_SEGS		((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
44 
45 static struct kmem_cache *flow_cache;
46 struct kmem_cache *flow_stats_cache __read_mostly;
47 
48 static u16 range_n_bytes(const struct sw_flow_key_range *range)
49 {
50 	return range->end - range->start;
51 }
52 
53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
54 		       bool full, const struct sw_flow_mask *mask)
55 {
56 	int start = full ? 0 : mask->range.start;
57 	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
58 	const long *m = (const long *)((const u8 *)&mask->key + start);
59 	const long *s = (const long *)((const u8 *)src + start);
60 	long *d = (long *)((u8 *)dst + start);
61 	int i;
62 
63 	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 	 * if 'full' is false the memory outside of the 'mask->range' is left
65 	 * uninitialized. This can be used as an optimization when further
66 	 * operations on 'dst' only use contents within 'mask->range'.
67 	 */
68 	for (i = 0; i < len; i += sizeof(long))
69 		*d++ = *s++ & *m++;
70 }
71 
72 struct sw_flow *ovs_flow_alloc(void)
73 {
74 	struct sw_flow *flow;
75 	struct sw_flow_stats *stats;
76 
77 	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
78 	if (!flow)
79 		return ERR_PTR(-ENOMEM);
80 
81 	flow->stats_last_writer = -1;
82 	flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
83 
84 	/* Initialize the default stat node. */
85 	stats = kmem_cache_alloc_node(flow_stats_cache,
86 				      GFP_KERNEL | __GFP_ZERO,
87 				      node_online(0) ? 0 : NUMA_NO_NODE);
88 	if (!stats)
89 		goto err;
90 
91 	spin_lock_init(&stats->lock);
92 
93 	RCU_INIT_POINTER(flow->stats[0], stats);
94 
95 	cpumask_set_cpu(0, flow->cpu_used_mask);
96 
97 	return flow;
98 err:
99 	kmem_cache_free(flow_cache, flow);
100 	return ERR_PTR(-ENOMEM);
101 }
102 
103 int ovs_flow_tbl_count(const struct flow_table *table)
104 {
105 	return table->count;
106 }
107 
108 static void flow_free(struct sw_flow *flow)
109 {
110 	int cpu;
111 
112 	if (ovs_identifier_is_key(&flow->id))
113 		kfree(flow->id.unmasked_key);
114 	if (flow->sf_acts)
115 		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
116 					  flow->sf_acts);
117 	/* We open code this to make sure cpu 0 is always considered */
118 	for (cpu = 0; cpu < nr_cpu_ids;
119 	     cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
120 		if (flow->stats[cpu])
121 			kmem_cache_free(flow_stats_cache,
122 					(struct sw_flow_stats __force *)flow->stats[cpu]);
123 	}
124 
125 	kmem_cache_free(flow_cache, flow);
126 }
127 
128 static void rcu_free_flow_callback(struct rcu_head *rcu)
129 {
130 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
131 
132 	flow_free(flow);
133 }
134 
135 void ovs_flow_free(struct sw_flow *flow, bool deferred)
136 {
137 	if (!flow)
138 		return;
139 
140 	if (deferred)
141 		call_rcu(&flow->rcu, rcu_free_flow_callback);
142 	else
143 		flow_free(flow);
144 }
145 
146 static void __table_instance_destroy(struct table_instance *ti)
147 {
148 	kvfree(ti->buckets);
149 	kfree(ti);
150 }
151 
152 static struct table_instance *table_instance_alloc(int new_size)
153 {
154 	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
155 	int i;
156 
157 	if (!ti)
158 		return NULL;
159 
160 	ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
161 				     GFP_KERNEL);
162 	if (!ti->buckets) {
163 		kfree(ti);
164 		return NULL;
165 	}
166 
167 	for (i = 0; i < new_size; i++)
168 		INIT_HLIST_HEAD(&ti->buckets[i]);
169 
170 	ti->n_buckets = new_size;
171 	ti->node_ver = 0;
172 	get_random_bytes(&ti->hash_seed, sizeof(u32));
173 
174 	return ti;
175 }
176 
177 static void __mask_array_destroy(struct mask_array *ma)
178 {
179 	free_percpu(ma->masks_usage_stats);
180 	kfree(ma);
181 }
182 
183 static void mask_array_rcu_cb(struct rcu_head *rcu)
184 {
185 	struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
186 
187 	__mask_array_destroy(ma);
188 }
189 
190 static void tbl_mask_array_reset_counters(struct mask_array *ma)
191 {
192 	int i, cpu;
193 
194 	/* As the per CPU counters are not atomic we can not go ahead and
195 	 * reset them from another CPU. To be able to still have an approximate
196 	 * zero based counter we store the value at reset, and subtract it
197 	 * later when processing.
198 	 */
199 	for (i = 0; i < ma->max; i++) {
200 		ma->masks_usage_zero_cntr[i] = 0;
201 
202 		for_each_possible_cpu(cpu) {
203 			struct mask_array_stats *stats;
204 			unsigned int start;
205 			u64 counter;
206 
207 			stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
208 			do {
209 				start = u64_stats_fetch_begin(&stats->syncp);
210 				counter = stats->usage_cntrs[i];
211 			} while (u64_stats_fetch_retry(&stats->syncp, start));
212 
213 			ma->masks_usage_zero_cntr[i] += counter;
214 		}
215 	}
216 }
217 
218 static struct mask_array *tbl_mask_array_alloc(int size)
219 {
220 	struct mask_array *new;
221 
222 	size = max(MASK_ARRAY_SIZE_MIN, size);
223 	new = kzalloc(sizeof(struct mask_array) +
224 		      sizeof(struct sw_flow_mask *) * size +
225 		      sizeof(u64) * size, GFP_KERNEL);
226 	if (!new)
227 		return NULL;
228 
229 	new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
230 					     sizeof(struct mask_array) +
231 					     sizeof(struct sw_flow_mask *) *
232 					     size);
233 
234 	new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
235 						sizeof(u64) * size,
236 						__alignof__(u64));
237 	if (!new->masks_usage_stats) {
238 		kfree(new);
239 		return NULL;
240 	}
241 
242 	new->count = 0;
243 	new->max = size;
244 
245 	return new;
246 }
247 
248 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
249 {
250 	struct mask_array *old;
251 	struct mask_array *new;
252 
253 	new = tbl_mask_array_alloc(size);
254 	if (!new)
255 		return -ENOMEM;
256 
257 	old = ovsl_dereference(tbl->mask_array);
258 	if (old) {
259 		int i;
260 
261 		for (i = 0; i < old->max; i++) {
262 			if (ovsl_dereference(old->masks[i]))
263 				new->masks[new->count++] = old->masks[i];
264 		}
265 		call_rcu(&old->rcu, mask_array_rcu_cb);
266 	}
267 
268 	rcu_assign_pointer(tbl->mask_array, new);
269 
270 	return 0;
271 }
272 
273 static int tbl_mask_array_add_mask(struct flow_table *tbl,
274 				   struct sw_flow_mask *new)
275 {
276 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
277 	int err, ma_count = READ_ONCE(ma->count);
278 
279 	if (ma_count >= ma->max) {
280 		err = tbl_mask_array_realloc(tbl, ma->max +
281 						  MASK_ARRAY_SIZE_MIN);
282 		if (err)
283 			return err;
284 
285 		ma = ovsl_dereference(tbl->mask_array);
286 	} else {
287 		/* On every add or delete we need to reset the counters so
288 		 * every new mask gets a fair chance of being prioritized.
289 		 */
290 		tbl_mask_array_reset_counters(ma);
291 	}
292 
293 	BUG_ON(ovsl_dereference(ma->masks[ma_count]));
294 
295 	rcu_assign_pointer(ma->masks[ma_count], new);
296 	WRITE_ONCE(ma->count, ma_count + 1);
297 
298 	return 0;
299 }
300 
301 static void tbl_mask_array_del_mask(struct flow_table *tbl,
302 				    struct sw_flow_mask *mask)
303 {
304 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
305 	int i, ma_count = READ_ONCE(ma->count);
306 
307 	/* Remove the deleted mask pointers from the array */
308 	for (i = 0; i < ma_count; i++) {
309 		if (mask == ovsl_dereference(ma->masks[i]))
310 			goto found;
311 	}
312 
313 	BUG();
314 	return;
315 
316 found:
317 	WRITE_ONCE(ma->count, ma_count - 1);
318 
319 	rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
320 	RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
321 
322 	kfree_rcu(mask, rcu);
323 
324 	/* Shrink the mask array if necessary. */
325 	if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
326 	    ma_count <= (ma->max / 3))
327 		tbl_mask_array_realloc(tbl, ma->max / 2);
328 	else
329 		tbl_mask_array_reset_counters(ma);
330 
331 }
332 
333 /* Remove 'mask' from the mask list, if it is not needed any more. */
334 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
335 {
336 	if (mask) {
337 		/* ovs-lock is required to protect mask-refcount and
338 		 * mask list.
339 		 */
340 		ASSERT_OVSL();
341 		BUG_ON(!mask->ref_count);
342 		mask->ref_count--;
343 
344 		if (!mask->ref_count)
345 			tbl_mask_array_del_mask(tbl, mask);
346 	}
347 }
348 
349 static void __mask_cache_destroy(struct mask_cache *mc)
350 {
351 	free_percpu(mc->mask_cache);
352 	kfree(mc);
353 }
354 
355 static void mask_cache_rcu_cb(struct rcu_head *rcu)
356 {
357 	struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
358 
359 	__mask_cache_destroy(mc);
360 }
361 
362 static struct mask_cache *tbl_mask_cache_alloc(u32 size)
363 {
364 	struct mask_cache_entry __percpu *cache = NULL;
365 	struct mask_cache *new;
366 
367 	/* Only allow size to be 0, or a power of 2, and does not exceed
368 	 * percpu allocation size.
369 	 */
370 	if ((!is_power_of_2(size) && size != 0) ||
371 	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
372 		return NULL;
373 
374 	new = kzalloc(sizeof(*new), GFP_KERNEL);
375 	if (!new)
376 		return NULL;
377 
378 	new->cache_size = size;
379 	if (new->cache_size > 0) {
380 		cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
381 						  new->cache_size),
382 				       __alignof__(struct mask_cache_entry));
383 		if (!cache) {
384 			kfree(new);
385 			return NULL;
386 		}
387 	}
388 
389 	new->mask_cache = cache;
390 	return new;
391 }
392 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
393 {
394 	struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
395 	struct mask_cache *new;
396 
397 	if (size == mc->cache_size)
398 		return 0;
399 
400 	if ((!is_power_of_2(size) && size != 0) ||
401 	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
402 		return -EINVAL;
403 
404 	new = tbl_mask_cache_alloc(size);
405 	if (!new)
406 		return -ENOMEM;
407 
408 	rcu_assign_pointer(table->mask_cache, new);
409 	call_rcu(&mc->rcu, mask_cache_rcu_cb);
410 
411 	return 0;
412 }
413 
414 int ovs_flow_tbl_init(struct flow_table *table)
415 {
416 	struct table_instance *ti, *ufid_ti;
417 	struct mask_cache *mc;
418 	struct mask_array *ma;
419 
420 	mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
421 	if (!mc)
422 		return -ENOMEM;
423 
424 	ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
425 	if (!ma)
426 		goto free_mask_cache;
427 
428 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
429 	if (!ti)
430 		goto free_mask_array;
431 
432 	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
433 	if (!ufid_ti)
434 		goto free_ti;
435 
436 	rcu_assign_pointer(table->ti, ti);
437 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
438 	rcu_assign_pointer(table->mask_array, ma);
439 	rcu_assign_pointer(table->mask_cache, mc);
440 	table->last_rehash = jiffies;
441 	table->count = 0;
442 	table->ufid_count = 0;
443 	return 0;
444 
445 free_ti:
446 	__table_instance_destroy(ti);
447 free_mask_array:
448 	__mask_array_destroy(ma);
449 free_mask_cache:
450 	__mask_cache_destroy(mc);
451 	return -ENOMEM;
452 }
453 
454 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
455 {
456 	struct table_instance *ti;
457 
458 	ti = container_of(rcu, struct table_instance, rcu);
459 	__table_instance_destroy(ti);
460 }
461 
462 static void table_instance_flow_free(struct flow_table *table,
463 				     struct table_instance *ti,
464 				     struct table_instance *ufid_ti,
465 				     struct sw_flow *flow)
466 {
467 	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
468 	table->count--;
469 
470 	if (ovs_identifier_is_ufid(&flow->id)) {
471 		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
472 		table->ufid_count--;
473 	}
474 
475 	flow_mask_remove(table, flow->mask);
476 }
477 
478 /* Must be called with OVS mutex held. */
479 void table_instance_flow_flush(struct flow_table *table,
480 			       struct table_instance *ti,
481 			       struct table_instance *ufid_ti)
482 {
483 	int i;
484 
485 	for (i = 0; i < ti->n_buckets; i++) {
486 		struct hlist_head *head = &ti->buckets[i];
487 		struct hlist_node *n;
488 		struct sw_flow *flow;
489 
490 		hlist_for_each_entry_safe(flow, n, head,
491 					  flow_table.node[ti->node_ver]) {
492 
493 			table_instance_flow_free(table, ti, ufid_ti,
494 						 flow);
495 			ovs_flow_free(flow, true);
496 		}
497 	}
498 
499 	if (WARN_ON(table->count != 0 ||
500 		    table->ufid_count != 0)) {
501 		table->count = 0;
502 		table->ufid_count = 0;
503 	}
504 }
505 
506 static void table_instance_destroy(struct table_instance *ti,
507 				   struct table_instance *ufid_ti)
508 {
509 	call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
510 	call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
511 }
512 
513 /* No need for locking this function is called from RCU callback or
514  * error path.
515  */
516 void ovs_flow_tbl_destroy(struct flow_table *table)
517 {
518 	struct table_instance *ti = rcu_dereference_raw(table->ti);
519 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
520 	struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
521 	struct mask_array *ma = rcu_dereference_raw(table->mask_array);
522 
523 	call_rcu(&mc->rcu, mask_cache_rcu_cb);
524 	call_rcu(&ma->rcu, mask_array_rcu_cb);
525 	table_instance_destroy(ti, ufid_ti);
526 }
527 
528 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
529 				       u32 *bucket, u32 *last)
530 {
531 	struct sw_flow *flow;
532 	struct hlist_head *head;
533 	int ver;
534 	int i;
535 
536 	ver = ti->node_ver;
537 	while (*bucket < ti->n_buckets) {
538 		i = 0;
539 		head = &ti->buckets[*bucket];
540 		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
541 			if (i < *last) {
542 				i++;
543 				continue;
544 			}
545 			*last = i + 1;
546 			return flow;
547 		}
548 		(*bucket)++;
549 		*last = 0;
550 	}
551 
552 	return NULL;
553 }
554 
555 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
556 {
557 	hash = jhash_1word(hash, ti->hash_seed);
558 	return &ti->buckets[hash & (ti->n_buckets - 1)];
559 }
560 
561 static void table_instance_insert(struct table_instance *ti,
562 				  struct sw_flow *flow)
563 {
564 	struct hlist_head *head;
565 
566 	head = find_bucket(ti, flow->flow_table.hash);
567 	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
568 }
569 
570 static void ufid_table_instance_insert(struct table_instance *ti,
571 				       struct sw_flow *flow)
572 {
573 	struct hlist_head *head;
574 
575 	head = find_bucket(ti, flow->ufid_table.hash);
576 	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
577 }
578 
579 static void flow_table_copy_flows(struct table_instance *old,
580 				  struct table_instance *new, bool ufid)
581 {
582 	int old_ver;
583 	int i;
584 
585 	old_ver = old->node_ver;
586 	new->node_ver = !old_ver;
587 
588 	/* Insert in new table. */
589 	for (i = 0; i < old->n_buckets; i++) {
590 		struct sw_flow *flow;
591 		struct hlist_head *head = &old->buckets[i];
592 
593 		if (ufid)
594 			hlist_for_each_entry_rcu(flow, head,
595 						 ufid_table.node[old_ver],
596 						 lockdep_ovsl_is_held())
597 				ufid_table_instance_insert(new, flow);
598 		else
599 			hlist_for_each_entry_rcu(flow, head,
600 						 flow_table.node[old_ver],
601 						 lockdep_ovsl_is_held())
602 				table_instance_insert(new, flow);
603 	}
604 }
605 
606 static struct table_instance *table_instance_rehash(struct table_instance *ti,
607 						    int n_buckets, bool ufid)
608 {
609 	struct table_instance *new_ti;
610 
611 	new_ti = table_instance_alloc(n_buckets);
612 	if (!new_ti)
613 		return NULL;
614 
615 	flow_table_copy_flows(ti, new_ti, ufid);
616 
617 	return new_ti;
618 }
619 
620 int ovs_flow_tbl_flush(struct flow_table *flow_table)
621 {
622 	struct table_instance *old_ti, *new_ti;
623 	struct table_instance *old_ufid_ti, *new_ufid_ti;
624 
625 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
626 	if (!new_ti)
627 		return -ENOMEM;
628 	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
629 	if (!new_ufid_ti)
630 		goto err_free_ti;
631 
632 	old_ti = ovsl_dereference(flow_table->ti);
633 	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
634 
635 	rcu_assign_pointer(flow_table->ti, new_ti);
636 	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
637 	flow_table->last_rehash = jiffies;
638 
639 	table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
640 	table_instance_destroy(old_ti, old_ufid_ti);
641 	return 0;
642 
643 err_free_ti:
644 	__table_instance_destroy(new_ti);
645 	return -ENOMEM;
646 }
647 
648 static u32 flow_hash(const struct sw_flow_key *key,
649 		     const struct sw_flow_key_range *range)
650 {
651 	const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
652 
653 	/* Make sure number of hash bytes are multiple of u32. */
654 	int hash_u32s = range_n_bytes(range) >> 2;
655 
656 	return jhash2(hash_key, hash_u32s, 0);
657 }
658 
659 static int flow_key_start(const struct sw_flow_key *key)
660 {
661 	if (key->tun_proto)
662 		return 0;
663 	else
664 		return rounddown(offsetof(struct sw_flow_key, phy),
665 				 sizeof(long));
666 }
667 
668 static bool cmp_key(const struct sw_flow_key *key1,
669 		    const struct sw_flow_key *key2,
670 		    int key_start, int key_end)
671 {
672 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
673 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
674 	int i;
675 
676 	for (i = key_start; i < key_end; i += sizeof(long))
677 		if (*cp1++ ^ *cp2++)
678 			return false;
679 
680 	return true;
681 }
682 
683 static bool flow_cmp_masked_key(const struct sw_flow *flow,
684 				const struct sw_flow_key *key,
685 				const struct sw_flow_key_range *range)
686 {
687 	return cmp_key(&flow->key, key, range->start, range->end);
688 }
689 
690 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
691 				      const struct sw_flow_match *match)
692 {
693 	struct sw_flow_key *key = match->key;
694 	int key_start = flow_key_start(key);
695 	int key_end = match->range.end;
696 
697 	BUG_ON(ovs_identifier_is_ufid(&flow->id));
698 	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
699 }
700 
701 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
702 					  const struct sw_flow_key *unmasked,
703 					  const struct sw_flow_mask *mask,
704 					  u32 *n_mask_hit)
705 {
706 	struct sw_flow *flow;
707 	struct hlist_head *head;
708 	u32 hash;
709 	struct sw_flow_key masked_key;
710 
711 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
712 	hash = flow_hash(&masked_key, &mask->range);
713 	head = find_bucket(ti, hash);
714 	(*n_mask_hit)++;
715 
716 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
717 				 lockdep_ovsl_is_held()) {
718 		if (flow->mask == mask && flow->flow_table.hash == hash &&
719 		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
720 			return flow;
721 	}
722 	return NULL;
723 }
724 
725 /* Flow lookup does full lookup on flow table. It starts with
726  * mask from index passed in *index.
727  * This function MUST be called with BH disabled due to the use
728  * of CPU specific variables.
729  */
730 static struct sw_flow *flow_lookup(struct flow_table *tbl,
731 				   struct table_instance *ti,
732 				   struct mask_array *ma,
733 				   const struct sw_flow_key *key,
734 				   u32 *n_mask_hit,
735 				   u32 *n_cache_hit,
736 				   u32 *index)
737 {
738 	struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
739 	struct sw_flow *flow;
740 	struct sw_flow_mask *mask;
741 	int i;
742 
743 	if (likely(*index < ma->max)) {
744 		mask = rcu_dereference_ovsl(ma->masks[*index]);
745 		if (mask) {
746 			flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
747 			if (flow) {
748 				u64_stats_update_begin(&stats->syncp);
749 				stats->usage_cntrs[*index]++;
750 				u64_stats_update_end(&stats->syncp);
751 				(*n_cache_hit)++;
752 				return flow;
753 			}
754 		}
755 	}
756 
757 	for (i = 0; i < ma->max; i++)  {
758 
759 		if (i == *index)
760 			continue;
761 
762 		mask = rcu_dereference_ovsl(ma->masks[i]);
763 		if (unlikely(!mask))
764 			break;
765 
766 		flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
767 		if (flow) { /* Found */
768 			*index = i;
769 			u64_stats_update_begin(&stats->syncp);
770 			stats->usage_cntrs[*index]++;
771 			u64_stats_update_end(&stats->syncp);
772 			return flow;
773 		}
774 	}
775 
776 	return NULL;
777 }
778 
779 /*
780  * mask_cache maps flow to probable mask. This cache is not tightly
781  * coupled cache, It means updates to  mask list can result in inconsistent
782  * cache entry in mask cache.
783  * This is per cpu cache and is divided in MC_HASH_SEGS segments.
784  * In case of a hash collision the entry is hashed in next segment.
785  * */
786 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
787 					  const struct sw_flow_key *key,
788 					  u32 skb_hash,
789 					  u32 *n_mask_hit,
790 					  u32 *n_cache_hit)
791 {
792 	struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
793 	struct mask_array *ma = rcu_dereference(tbl->mask_array);
794 	struct table_instance *ti = rcu_dereference(tbl->ti);
795 	struct mask_cache_entry *entries, *ce;
796 	struct sw_flow *flow;
797 	u32 hash;
798 	int seg;
799 
800 	*n_mask_hit = 0;
801 	*n_cache_hit = 0;
802 	if (unlikely(!skb_hash || mc->cache_size == 0)) {
803 		u32 mask_index = 0;
804 		u32 cache = 0;
805 
806 		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
807 				   &mask_index);
808 	}
809 
810 	/* Pre and post recirulation flows usually have the same skb_hash
811 	 * value. To avoid hash collisions, rehash the 'skb_hash' with
812 	 * 'recirc_id'.  */
813 	if (key->recirc_id)
814 		skb_hash = jhash_1word(skb_hash, key->recirc_id);
815 
816 	ce = NULL;
817 	hash = skb_hash;
818 	entries = this_cpu_ptr(mc->mask_cache);
819 
820 	/* Find the cache entry 'ce' to operate on. */
821 	for (seg = 0; seg < MC_HASH_SEGS; seg++) {
822 		int index = hash & (mc->cache_size - 1);
823 		struct mask_cache_entry *e;
824 
825 		e = &entries[index];
826 		if (e->skb_hash == skb_hash) {
827 			flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
828 					   n_cache_hit, &e->mask_index);
829 			if (!flow)
830 				e->skb_hash = 0;
831 			return flow;
832 		}
833 
834 		if (!ce || e->skb_hash < ce->skb_hash)
835 			ce = e;  /* A better replacement cache candidate. */
836 
837 		hash >>= MC_HASH_SHIFT;
838 	}
839 
840 	/* Cache miss, do full lookup. */
841 	flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
842 			   &ce->mask_index);
843 	if (flow)
844 		ce->skb_hash = skb_hash;
845 
846 	*n_cache_hit = 0;
847 	return flow;
848 }
849 
850 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
851 				    const struct sw_flow_key *key)
852 {
853 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
854 	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
855 	u32 __always_unused n_mask_hit;
856 	u32 __always_unused n_cache_hit;
857 	struct sw_flow *flow;
858 	u32 index = 0;
859 
860 	/* This function gets called trough the netlink interface and therefore
861 	 * is preemptible. However, flow_lookup() function needs to be called
862 	 * with BH disabled due to CPU specific variables.
863 	 */
864 	local_bh_disable();
865 	flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
866 	local_bh_enable();
867 	return flow;
868 }
869 
870 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
871 					  const struct sw_flow_match *match)
872 {
873 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
874 	int i;
875 
876 	/* Always called under ovs-mutex. */
877 	for (i = 0; i < ma->max; i++) {
878 		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
879 		u32 __always_unused n_mask_hit;
880 		struct sw_flow_mask *mask;
881 		struct sw_flow *flow;
882 
883 		mask = ovsl_dereference(ma->masks[i]);
884 		if (!mask)
885 			continue;
886 
887 		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
888 		if (flow && ovs_identifier_is_key(&flow->id) &&
889 		    ovs_flow_cmp_unmasked_key(flow, match)) {
890 			return flow;
891 		}
892 	}
893 
894 	return NULL;
895 }
896 
897 static u32 ufid_hash(const struct sw_flow_id *sfid)
898 {
899 	return jhash(sfid->ufid, sfid->ufid_len, 0);
900 }
901 
902 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
903 			      const struct sw_flow_id *sfid)
904 {
905 	if (flow->id.ufid_len != sfid->ufid_len)
906 		return false;
907 
908 	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
909 }
910 
911 bool ovs_flow_cmp(const struct sw_flow *flow,
912 		  const struct sw_flow_match *match)
913 {
914 	if (ovs_identifier_is_ufid(&flow->id))
915 		return flow_cmp_masked_key(flow, match->key, &match->range);
916 
917 	return ovs_flow_cmp_unmasked_key(flow, match);
918 }
919 
920 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
921 					 const struct sw_flow_id *ufid)
922 {
923 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
924 	struct sw_flow *flow;
925 	struct hlist_head *head;
926 	u32 hash;
927 
928 	hash = ufid_hash(ufid);
929 	head = find_bucket(ti, hash);
930 	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
931 				 lockdep_ovsl_is_held()) {
932 		if (flow->ufid_table.hash == hash &&
933 		    ovs_flow_cmp_ufid(flow, ufid))
934 			return flow;
935 	}
936 	return NULL;
937 }
938 
939 int ovs_flow_tbl_num_masks(const struct flow_table *table)
940 {
941 	struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
942 	return READ_ONCE(ma->count);
943 }
944 
945 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
946 {
947 	struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
948 
949 	return READ_ONCE(mc->cache_size);
950 }
951 
952 static struct table_instance *table_instance_expand(struct table_instance *ti,
953 						    bool ufid)
954 {
955 	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
956 }
957 
958 /* Must be called with OVS mutex held. */
959 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
960 {
961 	struct table_instance *ti = ovsl_dereference(table->ti);
962 	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
963 
964 	BUG_ON(table->count == 0);
965 	table_instance_flow_free(table, ti, ufid_ti, flow);
966 }
967 
968 static struct sw_flow_mask *mask_alloc(void)
969 {
970 	struct sw_flow_mask *mask;
971 
972 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
973 	if (mask)
974 		mask->ref_count = 1;
975 
976 	return mask;
977 }
978 
979 static bool mask_equal(const struct sw_flow_mask *a,
980 		       const struct sw_flow_mask *b)
981 {
982 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
983 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
984 
985 	return  (a->range.end == b->range.end)
986 		&& (a->range.start == b->range.start)
987 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
988 }
989 
990 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
991 					   const struct sw_flow_mask *mask)
992 {
993 	struct mask_array *ma;
994 	int i;
995 
996 	ma = ovsl_dereference(tbl->mask_array);
997 	for (i = 0; i < ma->max; i++) {
998 		struct sw_flow_mask *t;
999 		t = ovsl_dereference(ma->masks[i]);
1000 
1001 		if (t && mask_equal(mask, t))
1002 			return t;
1003 	}
1004 
1005 	return NULL;
1006 }
1007 
1008 /* Add 'mask' into the mask list, if it is not already there. */
1009 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
1010 			    const struct sw_flow_mask *new)
1011 {
1012 	struct sw_flow_mask *mask;
1013 
1014 	mask = flow_mask_find(tbl, new);
1015 	if (!mask) {
1016 		/* Allocate a new mask if none exists. */
1017 		mask = mask_alloc();
1018 		if (!mask)
1019 			return -ENOMEM;
1020 		mask->key = new->key;
1021 		mask->range = new->range;
1022 
1023 		/* Add mask to mask-list. */
1024 		if (tbl_mask_array_add_mask(tbl, mask)) {
1025 			kfree(mask);
1026 			return -ENOMEM;
1027 		}
1028 	} else {
1029 		BUG_ON(!mask->ref_count);
1030 		mask->ref_count++;
1031 	}
1032 
1033 	flow->mask = mask;
1034 	return 0;
1035 }
1036 
1037 /* Must be called with OVS mutex held. */
1038 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
1039 {
1040 	struct table_instance *new_ti = NULL;
1041 	struct table_instance *ti;
1042 
1043 	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
1044 	ti = ovsl_dereference(table->ti);
1045 	table_instance_insert(ti, flow);
1046 	table->count++;
1047 
1048 	/* Expand table, if necessary, to make room. */
1049 	if (table->count > ti->n_buckets)
1050 		new_ti = table_instance_expand(ti, false);
1051 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
1052 		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
1053 
1054 	if (new_ti) {
1055 		rcu_assign_pointer(table->ti, new_ti);
1056 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1057 		table->last_rehash = jiffies;
1058 	}
1059 }
1060 
1061 /* Must be called with OVS mutex held. */
1062 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
1063 {
1064 	struct table_instance *ti;
1065 
1066 	flow->ufid_table.hash = ufid_hash(&flow->id);
1067 	ti = ovsl_dereference(table->ufid_ti);
1068 	ufid_table_instance_insert(ti, flow);
1069 	table->ufid_count++;
1070 
1071 	/* Expand table, if necessary, to make room. */
1072 	if (table->ufid_count > ti->n_buckets) {
1073 		struct table_instance *new_ti;
1074 
1075 		new_ti = table_instance_expand(ti, true);
1076 		if (new_ti) {
1077 			rcu_assign_pointer(table->ufid_ti, new_ti);
1078 			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1079 		}
1080 	}
1081 }
1082 
1083 /* Must be called with OVS mutex held. */
1084 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1085 			const struct sw_flow_mask *mask)
1086 {
1087 	int err;
1088 
1089 	err = flow_mask_insert(table, flow, mask);
1090 	if (err)
1091 		return err;
1092 	flow_key_insert(table, flow);
1093 	if (ovs_identifier_is_ufid(&flow->id))
1094 		flow_ufid_insert(table, flow);
1095 
1096 	return 0;
1097 }
1098 
1099 static int compare_mask_and_count(const void *a, const void *b)
1100 {
1101 	const struct mask_count *mc_a = a;
1102 	const struct mask_count *mc_b = b;
1103 
1104 	return (s64)mc_b->counter - (s64)mc_a->counter;
1105 }
1106 
1107 /* Must be called with OVS mutex held. */
1108 void ovs_flow_masks_rebalance(struct flow_table *table)
1109 {
1110 	struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1111 	struct mask_count *masks_and_count;
1112 	struct mask_array *new;
1113 	int masks_entries = 0;
1114 	int i;
1115 
1116 	/* Build array of all current entries with use counters. */
1117 	masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1118 					GFP_KERNEL);
1119 	if (!masks_and_count)
1120 		return;
1121 
1122 	for (i = 0; i < ma->max; i++) {
1123 		struct sw_flow_mask *mask;
1124 		int cpu;
1125 
1126 		mask = rcu_dereference_ovsl(ma->masks[i]);
1127 		if (unlikely(!mask))
1128 			break;
1129 
1130 		masks_and_count[i].index = i;
1131 		masks_and_count[i].counter = 0;
1132 
1133 		for_each_possible_cpu(cpu) {
1134 			struct mask_array_stats *stats;
1135 			unsigned int start;
1136 			u64 counter;
1137 
1138 			stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
1139 			do {
1140 				start = u64_stats_fetch_begin(&stats->syncp);
1141 				counter = stats->usage_cntrs[i];
1142 			} while (u64_stats_fetch_retry(&stats->syncp, start));
1143 
1144 			masks_and_count[i].counter += counter;
1145 		}
1146 
1147 		/* Subtract the zero count value. */
1148 		masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1149 
1150 		/* Rather than calling tbl_mask_array_reset_counters()
1151 		 * below when no change is needed, do it inline here.
1152 		 */
1153 		ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1154 	}
1155 
1156 	if (i == 0)
1157 		goto free_mask_entries;
1158 
1159 	/* Sort the entries */
1160 	masks_entries = i;
1161 	sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1162 	     compare_mask_and_count, NULL);
1163 
1164 	/* If the order is the same, nothing to do... */
1165 	for (i = 0; i < masks_entries; i++) {
1166 		if (i != masks_and_count[i].index)
1167 			break;
1168 	}
1169 	if (i == masks_entries)
1170 		goto free_mask_entries;
1171 
1172 	/* Rebuilt the new list in order of usage. */
1173 	new = tbl_mask_array_alloc(ma->max);
1174 	if (!new)
1175 		goto free_mask_entries;
1176 
1177 	for (i = 0; i < masks_entries; i++) {
1178 		int index = masks_and_count[i].index;
1179 
1180 		if (ovsl_dereference(ma->masks[index]))
1181 			new->masks[new->count++] = ma->masks[index];
1182 	}
1183 
1184 	rcu_assign_pointer(table->mask_array, new);
1185 	call_rcu(&ma->rcu, mask_array_rcu_cb);
1186 
1187 free_mask_entries:
1188 	kfree(masks_and_count);
1189 }
1190 
1191 /* Initializes the flow module.
1192  * Returns zero if successful or a negative error code. */
1193 int ovs_flow_init(void)
1194 {
1195 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1196 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1197 
1198 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1199 				       + (nr_cpu_ids
1200 					  * sizeof(struct sw_flow_stats *))
1201 				       + cpumask_size(),
1202 				       0, 0, NULL);
1203 	if (flow_cache == NULL)
1204 		return -ENOMEM;
1205 
1206 	flow_stats_cache
1207 		= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1208 				    0, SLAB_HWCACHE_ALIGN, NULL);
1209 	if (flow_stats_cache == NULL) {
1210 		kmem_cache_destroy(flow_cache);
1211 		flow_cache = NULL;
1212 		return -ENOMEM;
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 /* Uninitializes the flow module. */
1219 void ovs_flow_exit(void)
1220 {
1221 	kmem_cache_destroy(flow_stats_cache);
1222 	kmem_cache_destroy(flow_cache);
1223 }
1224