xref: /openbmc/linux/net/openvswitch/flow_table.c (revision b03afaa8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2014 Nicira, Inc.
4  */
5 
6 #include "flow.h"
7 #include "datapath.h"
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
20 #include <linux/in.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/ndisc.h>
36 
37 #define TBL_MIN_BUCKETS		1024
38 #define MASK_ARRAY_SIZE_MIN	16
39 #define REHASH_INTERVAL		(10 * 60 * HZ)
40 
41 #define MC_HASH_SHIFT		8
42 #define MC_HASH_ENTRIES		(1u << MC_HASH_SHIFT)
43 #define MC_HASH_SEGS		((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
44 
45 static struct kmem_cache *flow_cache;
46 struct kmem_cache *flow_stats_cache __read_mostly;
47 
48 static u16 range_n_bytes(const struct sw_flow_key_range *range)
49 {
50 	return range->end - range->start;
51 }
52 
53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
54 		       bool full, const struct sw_flow_mask *mask)
55 {
56 	int start = full ? 0 : mask->range.start;
57 	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
58 	const long *m = (const long *)((const u8 *)&mask->key + start);
59 	const long *s = (const long *)((const u8 *)src + start);
60 	long *d = (long *)((u8 *)dst + start);
61 	int i;
62 
63 	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 	 * if 'full' is false the memory outside of the 'mask->range' is left
65 	 * uninitialized. This can be used as an optimization when further
66 	 * operations on 'dst' only use contents within 'mask->range'.
67 	 */
68 	for (i = 0; i < len; i += sizeof(long))
69 		*d++ = *s++ & *m++;
70 }
71 
72 struct sw_flow *ovs_flow_alloc(void)
73 {
74 	struct sw_flow *flow;
75 	struct sw_flow_stats *stats;
76 
77 	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
78 	if (!flow)
79 		return ERR_PTR(-ENOMEM);
80 
81 	flow->stats_last_writer = -1;
82 
83 	/* Initialize the default stat node. */
84 	stats = kmem_cache_alloc_node(flow_stats_cache,
85 				      GFP_KERNEL | __GFP_ZERO,
86 				      node_online(0) ? 0 : NUMA_NO_NODE);
87 	if (!stats)
88 		goto err;
89 
90 	spin_lock_init(&stats->lock);
91 
92 	RCU_INIT_POINTER(flow->stats[0], stats);
93 
94 	cpumask_set_cpu(0, &flow->cpu_used_mask);
95 
96 	return flow;
97 err:
98 	kmem_cache_free(flow_cache, flow);
99 	return ERR_PTR(-ENOMEM);
100 }
101 
102 int ovs_flow_tbl_count(const struct flow_table *table)
103 {
104 	return table->count;
105 }
106 
107 static void flow_free(struct sw_flow *flow)
108 {
109 	int cpu;
110 
111 	if (ovs_identifier_is_key(&flow->id))
112 		kfree(flow->id.unmasked_key);
113 	if (flow->sf_acts)
114 		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
115 	/* We open code this to make sure cpu 0 is always considered */
116 	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
117 		if (flow->stats[cpu])
118 			kmem_cache_free(flow_stats_cache,
119 					(struct sw_flow_stats __force *)flow->stats[cpu]);
120 	kmem_cache_free(flow_cache, flow);
121 }
122 
123 static void rcu_free_flow_callback(struct rcu_head *rcu)
124 {
125 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
126 
127 	flow_free(flow);
128 }
129 
130 void ovs_flow_free(struct sw_flow *flow, bool deferred)
131 {
132 	if (!flow)
133 		return;
134 
135 	if (deferred)
136 		call_rcu(&flow->rcu, rcu_free_flow_callback);
137 	else
138 		flow_free(flow);
139 }
140 
141 static void __table_instance_destroy(struct table_instance *ti)
142 {
143 	kvfree(ti->buckets);
144 	kfree(ti);
145 }
146 
147 static struct table_instance *table_instance_alloc(int new_size)
148 {
149 	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
150 	int i;
151 
152 	if (!ti)
153 		return NULL;
154 
155 	ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
156 				     GFP_KERNEL);
157 	if (!ti->buckets) {
158 		kfree(ti);
159 		return NULL;
160 	}
161 
162 	for (i = 0; i < new_size; i++)
163 		INIT_HLIST_HEAD(&ti->buckets[i]);
164 
165 	ti->n_buckets = new_size;
166 	ti->node_ver = 0;
167 	ti->keep_flows = false;
168 	get_random_bytes(&ti->hash_seed, sizeof(u32));
169 
170 	return ti;
171 }
172 
173 static void __mask_array_destroy(struct mask_array *ma)
174 {
175 	free_percpu(ma->masks_usage_cntr);
176 	kfree(ma);
177 }
178 
179 static void mask_array_rcu_cb(struct rcu_head *rcu)
180 {
181 	struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
182 
183 	__mask_array_destroy(ma);
184 }
185 
186 static void tbl_mask_array_reset_counters(struct mask_array *ma)
187 {
188 	int i, cpu;
189 
190 	/* As the per CPU counters are not atomic we can not go ahead and
191 	 * reset them from another CPU. To be able to still have an approximate
192 	 * zero based counter we store the value at reset, and subtract it
193 	 * later when processing.
194 	 */
195 	for (i = 0; i < ma->max; i++)  {
196 		ma->masks_usage_zero_cntr[i] = 0;
197 
198 		for_each_possible_cpu(cpu) {
199 			u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
200 							  cpu);
201 			unsigned int start;
202 			u64 counter;
203 
204 			do {
205 				start = u64_stats_fetch_begin_irq(&ma->syncp);
206 				counter = usage_counters[i];
207 			} while (u64_stats_fetch_retry_irq(&ma->syncp, start));
208 
209 			ma->masks_usage_zero_cntr[i] += counter;
210 		}
211 	}
212 }
213 
214 static struct mask_array *tbl_mask_array_alloc(int size)
215 {
216 	struct mask_array *new;
217 
218 	size = max(MASK_ARRAY_SIZE_MIN, size);
219 	new = kzalloc(sizeof(struct mask_array) +
220 		      sizeof(struct sw_flow_mask *) * size +
221 		      sizeof(u64) * size, GFP_KERNEL);
222 	if (!new)
223 		return NULL;
224 
225 	new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
226 					     sizeof(struct mask_array) +
227 					     sizeof(struct sw_flow_mask *) *
228 					     size);
229 
230 	new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
231 					       __alignof__(u64));
232 	if (!new->masks_usage_cntr) {
233 		kfree(new);
234 		return NULL;
235 	}
236 
237 	new->count = 0;
238 	new->max = size;
239 
240 	return new;
241 }
242 
243 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
244 {
245 	struct mask_array *old;
246 	struct mask_array *new;
247 
248 	new = tbl_mask_array_alloc(size);
249 	if (!new)
250 		return -ENOMEM;
251 
252 	old = ovsl_dereference(tbl->mask_array);
253 	if (old) {
254 		int i;
255 
256 		for (i = 0; i < old->max; i++) {
257 			if (ovsl_dereference(old->masks[i]))
258 				new->masks[new->count++] = old->masks[i];
259 		}
260 		call_rcu(&old->rcu, mask_array_rcu_cb);
261 	}
262 
263 	rcu_assign_pointer(tbl->mask_array, new);
264 
265 	return 0;
266 }
267 
268 static int tbl_mask_array_add_mask(struct flow_table *tbl,
269 				   struct sw_flow_mask *new)
270 {
271 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
272 	int err, ma_count = READ_ONCE(ma->count);
273 
274 	if (ma_count >= ma->max) {
275 		err = tbl_mask_array_realloc(tbl, ma->max +
276 					      MASK_ARRAY_SIZE_MIN);
277 		if (err)
278 			return err;
279 
280 		ma = ovsl_dereference(tbl->mask_array);
281 	} else {
282 		/* On every add or delete we need to reset the counters so
283 		 * every new mask gets a fair chance of being prioritized.
284 		 */
285 		tbl_mask_array_reset_counters(ma);
286 	}
287 
288 	BUG_ON(ovsl_dereference(ma->masks[ma_count]));
289 
290 	rcu_assign_pointer(ma->masks[ma_count], new);
291 	WRITE_ONCE(ma->count, ma_count +1);
292 
293 	return 0;
294 }
295 
296 static void tbl_mask_array_del_mask(struct flow_table *tbl,
297 				    struct sw_flow_mask *mask)
298 {
299 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
300 	int i, ma_count = READ_ONCE(ma->count);
301 
302 	/* Remove the deleted mask pointers from the array */
303 	for (i = 0; i < ma_count; i++) {
304 		if (mask == ovsl_dereference(ma->masks[i]))
305 			goto found;
306 	}
307 
308 	BUG();
309 	return;
310 
311 found:
312 	WRITE_ONCE(ma->count, ma_count -1);
313 
314 	rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
315 	RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
316 
317 	kfree_rcu(mask, rcu);
318 
319 	/* Shrink the mask array if necessary. */
320 	if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
321 	    ma_count <= (ma->max / 3))
322 		tbl_mask_array_realloc(tbl, ma->max / 2);
323 	else
324 		tbl_mask_array_reset_counters(ma);
325 
326 }
327 
328 /* Remove 'mask' from the mask list, if it is not needed any more. */
329 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
330 {
331 	if (mask) {
332 		/* ovs-lock is required to protect mask-refcount and
333 		 * mask list.
334 		 */
335 		ASSERT_OVSL();
336 		BUG_ON(!mask->ref_count);
337 		mask->ref_count--;
338 
339 		if (!mask->ref_count)
340 			tbl_mask_array_del_mask(tbl, mask);
341 	}
342 }
343 
344 int ovs_flow_tbl_init(struct flow_table *table)
345 {
346 	struct table_instance *ti, *ufid_ti;
347 	struct mask_array *ma;
348 
349 	table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
350 					   MC_HASH_ENTRIES,
351 					   __alignof__(struct mask_cache_entry));
352 	if (!table->mask_cache)
353 		return -ENOMEM;
354 
355 	ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
356 	if (!ma)
357 		goto free_mask_cache;
358 
359 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
360 	if (!ti)
361 		goto free_mask_array;
362 
363 	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
364 	if (!ufid_ti)
365 		goto free_ti;
366 
367 	rcu_assign_pointer(table->ti, ti);
368 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
369 	rcu_assign_pointer(table->mask_array, ma);
370 	table->last_rehash = jiffies;
371 	table->count = 0;
372 	table->ufid_count = 0;
373 	return 0;
374 
375 free_ti:
376 	__table_instance_destroy(ti);
377 free_mask_array:
378 	__mask_array_destroy(ma);
379 free_mask_cache:
380 	free_percpu(table->mask_cache);
381 	return -ENOMEM;
382 }
383 
384 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
385 {
386 	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
387 
388 	__table_instance_destroy(ti);
389 }
390 
391 static void table_instance_flow_free(struct flow_table *table,
392 				  struct table_instance *ti,
393 				  struct table_instance *ufid_ti,
394 				  struct sw_flow *flow,
395 				  bool count)
396 {
397 	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
398 	if (count)
399 		table->count--;
400 
401 	if (ovs_identifier_is_ufid(&flow->id)) {
402 		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
403 
404 		if (count)
405 			table->ufid_count--;
406 	}
407 
408 	flow_mask_remove(table, flow->mask);
409 }
410 
411 static void table_instance_destroy(struct flow_table *table,
412 				   struct table_instance *ti,
413 				   struct table_instance *ufid_ti,
414 				   bool deferred)
415 {
416 	int i;
417 
418 	if (!ti)
419 		return;
420 
421 	BUG_ON(!ufid_ti);
422 	if (ti->keep_flows)
423 		goto skip_flows;
424 
425 	for (i = 0; i < ti->n_buckets; i++) {
426 		struct sw_flow *flow;
427 		struct hlist_head *head = &ti->buckets[i];
428 		struct hlist_node *n;
429 
430 		hlist_for_each_entry_safe(flow, n, head,
431 					  flow_table.node[ti->node_ver]) {
432 
433 			table_instance_flow_free(table, ti, ufid_ti,
434 						 flow, false);
435 			ovs_flow_free(flow, deferred);
436 		}
437 	}
438 
439 skip_flows:
440 	if (deferred) {
441 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
442 		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
443 	} else {
444 		__table_instance_destroy(ti);
445 		__table_instance_destroy(ufid_ti);
446 	}
447 }
448 
449 /* No need for locking this function is called from RCU callback or
450  * error path.
451  */
452 void ovs_flow_tbl_destroy(struct flow_table *table)
453 {
454 	struct table_instance *ti = rcu_dereference_raw(table->ti);
455 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
456 
457 	free_percpu(table->mask_cache);
458 	call_rcu(&table->mask_array->rcu, mask_array_rcu_cb);
459 	table_instance_destroy(table, ti, ufid_ti, false);
460 }
461 
462 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
463 				       u32 *bucket, u32 *last)
464 {
465 	struct sw_flow *flow;
466 	struct hlist_head *head;
467 	int ver;
468 	int i;
469 
470 	ver = ti->node_ver;
471 	while (*bucket < ti->n_buckets) {
472 		i = 0;
473 		head = &ti->buckets[*bucket];
474 		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
475 			if (i < *last) {
476 				i++;
477 				continue;
478 			}
479 			*last = i + 1;
480 			return flow;
481 		}
482 		(*bucket)++;
483 		*last = 0;
484 	}
485 
486 	return NULL;
487 }
488 
489 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
490 {
491 	hash = jhash_1word(hash, ti->hash_seed);
492 	return &ti->buckets[hash & (ti->n_buckets - 1)];
493 }
494 
495 static void table_instance_insert(struct table_instance *ti,
496 				  struct sw_flow *flow)
497 {
498 	struct hlist_head *head;
499 
500 	head = find_bucket(ti, flow->flow_table.hash);
501 	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
502 }
503 
504 static void ufid_table_instance_insert(struct table_instance *ti,
505 				       struct sw_flow *flow)
506 {
507 	struct hlist_head *head;
508 
509 	head = find_bucket(ti, flow->ufid_table.hash);
510 	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
511 }
512 
513 static void flow_table_copy_flows(struct table_instance *old,
514 				  struct table_instance *new, bool ufid)
515 {
516 	int old_ver;
517 	int i;
518 
519 	old_ver = old->node_ver;
520 	new->node_ver = !old_ver;
521 
522 	/* Insert in new table. */
523 	for (i = 0; i < old->n_buckets; i++) {
524 		struct sw_flow *flow;
525 		struct hlist_head *head = &old->buckets[i];
526 
527 		if (ufid)
528 			hlist_for_each_entry_rcu(flow, head,
529 						 ufid_table.node[old_ver],
530 						 lockdep_ovsl_is_held())
531 				ufid_table_instance_insert(new, flow);
532 		else
533 			hlist_for_each_entry_rcu(flow, head,
534 						 flow_table.node[old_ver],
535 						 lockdep_ovsl_is_held())
536 				table_instance_insert(new, flow);
537 	}
538 
539 	old->keep_flows = true;
540 }
541 
542 static struct table_instance *table_instance_rehash(struct table_instance *ti,
543 						    int n_buckets, bool ufid)
544 {
545 	struct table_instance *new_ti;
546 
547 	new_ti = table_instance_alloc(n_buckets);
548 	if (!new_ti)
549 		return NULL;
550 
551 	flow_table_copy_flows(ti, new_ti, ufid);
552 
553 	return new_ti;
554 }
555 
556 int ovs_flow_tbl_flush(struct flow_table *flow_table)
557 {
558 	struct table_instance *old_ti, *new_ti;
559 	struct table_instance *old_ufid_ti, *new_ufid_ti;
560 
561 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
562 	if (!new_ti)
563 		return -ENOMEM;
564 	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
565 	if (!new_ufid_ti)
566 		goto err_free_ti;
567 
568 	old_ti = ovsl_dereference(flow_table->ti);
569 	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
570 
571 	rcu_assign_pointer(flow_table->ti, new_ti);
572 	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
573 	flow_table->last_rehash = jiffies;
574 	flow_table->count = 0;
575 	flow_table->ufid_count = 0;
576 
577 	table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
578 	return 0;
579 
580 err_free_ti:
581 	__table_instance_destroy(new_ti);
582 	return -ENOMEM;
583 }
584 
585 static u32 flow_hash(const struct sw_flow_key *key,
586 		     const struct sw_flow_key_range *range)
587 {
588 	const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
589 
590 	/* Make sure number of hash bytes are multiple of u32. */
591 	int hash_u32s = range_n_bytes(range) >> 2;
592 
593 	return jhash2(hash_key, hash_u32s, 0);
594 }
595 
596 static int flow_key_start(const struct sw_flow_key *key)
597 {
598 	if (key->tun_proto)
599 		return 0;
600 	else
601 		return rounddown(offsetof(struct sw_flow_key, phy),
602 					  sizeof(long));
603 }
604 
605 static bool cmp_key(const struct sw_flow_key *key1,
606 		    const struct sw_flow_key *key2,
607 		    int key_start, int key_end)
608 {
609 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
610 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
611 	long diffs = 0;
612 	int i;
613 
614 	for (i = key_start; i < key_end;  i += sizeof(long))
615 		diffs |= *cp1++ ^ *cp2++;
616 
617 	return diffs == 0;
618 }
619 
620 static bool flow_cmp_masked_key(const struct sw_flow *flow,
621 				const struct sw_flow_key *key,
622 				const struct sw_flow_key_range *range)
623 {
624 	return cmp_key(&flow->key, key, range->start, range->end);
625 }
626 
627 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
628 				      const struct sw_flow_match *match)
629 {
630 	struct sw_flow_key *key = match->key;
631 	int key_start = flow_key_start(key);
632 	int key_end = match->range.end;
633 
634 	BUG_ON(ovs_identifier_is_ufid(&flow->id));
635 	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
636 }
637 
638 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
639 					  const struct sw_flow_key *unmasked,
640 					  const struct sw_flow_mask *mask,
641 					  u32 *n_mask_hit)
642 {
643 	struct sw_flow *flow;
644 	struct hlist_head *head;
645 	u32 hash;
646 	struct sw_flow_key masked_key;
647 
648 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
649 	hash = flow_hash(&masked_key, &mask->range);
650 	head = find_bucket(ti, hash);
651 	(*n_mask_hit)++;
652 
653 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
654 				lockdep_ovsl_is_held()) {
655 		if (flow->mask == mask && flow->flow_table.hash == hash &&
656 		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
657 			return flow;
658 	}
659 	return NULL;
660 }
661 
662 /* Flow lookup does full lookup on flow table. It starts with
663  * mask from index passed in *index.
664  */
665 static struct sw_flow *flow_lookup(struct flow_table *tbl,
666 				   struct table_instance *ti,
667 				   struct mask_array *ma,
668 				   const struct sw_flow_key *key,
669 				   u32 *n_mask_hit,
670 				   u32 *index)
671 {
672 	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
673 	struct sw_flow *flow;
674 	struct sw_flow_mask *mask;
675 	int i;
676 
677 	if (likely(*index < ma->max)) {
678 		mask = rcu_dereference_ovsl(ma->masks[*index]);
679 		if (mask) {
680 			flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
681 			if (flow) {
682 				u64_stats_update_begin(&ma->syncp);
683 				usage_counters[*index]++;
684 				u64_stats_update_end(&ma->syncp);
685 				return flow;
686 			}
687 		}
688 	}
689 
690 	for (i = 0; i < ma->max; i++)  {
691 
692 		if (i == *index)
693 			continue;
694 
695 		mask = rcu_dereference_ovsl(ma->masks[i]);
696 		if (unlikely(!mask))
697 			break;
698 
699 		flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
700 		if (flow) { /* Found */
701 			*index = i;
702 			u64_stats_update_begin(&ma->syncp);
703 			usage_counters[*index]++;
704 			u64_stats_update_end(&ma->syncp);
705 			return flow;
706 		}
707 	}
708 
709 	return NULL;
710 }
711 
712 /*
713  * mask_cache maps flow to probable mask. This cache is not tightly
714  * coupled cache, It means updates to  mask list can result in inconsistent
715  * cache entry in mask cache.
716  * This is per cpu cache and is divided in MC_HASH_SEGS segments.
717  * In case of a hash collision the entry is hashed in next segment.
718  * */
719 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
720 					  const struct sw_flow_key *key,
721 					  u32 skb_hash,
722 					  u32 *n_mask_hit)
723 {
724 	struct mask_array *ma = rcu_dereference(tbl->mask_array);
725 	struct table_instance *ti = rcu_dereference(tbl->ti);
726 	struct mask_cache_entry *entries, *ce;
727 	struct sw_flow *flow;
728 	u32 hash;
729 	int seg;
730 
731 	*n_mask_hit = 0;
732 	if (unlikely(!skb_hash)) {
733 		u32 mask_index = 0;
734 
735 		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
736 	}
737 
738 	/* Pre and post recirulation flows usually have the same skb_hash
739 	 * value. To avoid hash collisions, rehash the 'skb_hash' with
740 	 * 'recirc_id'.  */
741 	if (key->recirc_id)
742 		skb_hash = jhash_1word(skb_hash, key->recirc_id);
743 
744 	ce = NULL;
745 	hash = skb_hash;
746 	entries = this_cpu_ptr(tbl->mask_cache);
747 
748 	/* Find the cache entry 'ce' to operate on. */
749 	for (seg = 0; seg < MC_HASH_SEGS; seg++) {
750 		int index = hash & (MC_HASH_ENTRIES - 1);
751 		struct mask_cache_entry *e;
752 
753 		e = &entries[index];
754 		if (e->skb_hash == skb_hash) {
755 			flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
756 					   &e->mask_index);
757 			if (!flow)
758 				e->skb_hash = 0;
759 			return flow;
760 		}
761 
762 		if (!ce || e->skb_hash < ce->skb_hash)
763 			ce = e;  /* A better replacement cache candidate. */
764 
765 		hash >>= MC_HASH_SHIFT;
766 	}
767 
768 	/* Cache miss, do full lookup. */
769 	flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
770 	if (flow)
771 		ce->skb_hash = skb_hash;
772 
773 	return flow;
774 }
775 
776 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
777 				    const struct sw_flow_key *key)
778 {
779 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
780 	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
781 	u32 __always_unused n_mask_hit;
782 	u32 index = 0;
783 
784 	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
785 }
786 
787 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
788 					  const struct sw_flow_match *match)
789 {
790 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
791 	int i;
792 
793 	/* Always called under ovs-mutex. */
794 	for (i = 0; i < ma->max; i++) {
795 		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
796 		u32 __always_unused n_mask_hit;
797 		struct sw_flow_mask *mask;
798 		struct sw_flow *flow;
799 
800 		mask = ovsl_dereference(ma->masks[i]);
801 		if (!mask)
802 			continue;
803 
804 		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
805 		if (flow && ovs_identifier_is_key(&flow->id) &&
806 		    ovs_flow_cmp_unmasked_key(flow, match)) {
807 			return flow;
808 		}
809 	}
810 
811 	return NULL;
812 }
813 
814 static u32 ufid_hash(const struct sw_flow_id *sfid)
815 {
816 	return jhash(sfid->ufid, sfid->ufid_len, 0);
817 }
818 
819 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
820 			      const struct sw_flow_id *sfid)
821 {
822 	if (flow->id.ufid_len != sfid->ufid_len)
823 		return false;
824 
825 	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
826 }
827 
828 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
829 {
830 	if (ovs_identifier_is_ufid(&flow->id))
831 		return flow_cmp_masked_key(flow, match->key, &match->range);
832 
833 	return ovs_flow_cmp_unmasked_key(flow, match);
834 }
835 
836 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
837 					 const struct sw_flow_id *ufid)
838 {
839 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
840 	struct sw_flow *flow;
841 	struct hlist_head *head;
842 	u32 hash;
843 
844 	hash = ufid_hash(ufid);
845 	head = find_bucket(ti, hash);
846 	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
847 				lockdep_ovsl_is_held()) {
848 		if (flow->ufid_table.hash == hash &&
849 		    ovs_flow_cmp_ufid(flow, ufid))
850 			return flow;
851 	}
852 	return NULL;
853 }
854 
855 int ovs_flow_tbl_num_masks(const struct flow_table *table)
856 {
857 	struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
858 	return READ_ONCE(ma->count);
859 }
860 
861 static struct table_instance *table_instance_expand(struct table_instance *ti,
862 						    bool ufid)
863 {
864 	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
865 }
866 
867 /* Must be called with OVS mutex held. */
868 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
869 {
870 	struct table_instance *ti = ovsl_dereference(table->ti);
871 	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
872 
873 	BUG_ON(table->count == 0);
874 	table_instance_flow_free(table, ti, ufid_ti, flow, true);
875 }
876 
877 static struct sw_flow_mask *mask_alloc(void)
878 {
879 	struct sw_flow_mask *mask;
880 
881 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
882 	if (mask)
883 		mask->ref_count = 1;
884 
885 	return mask;
886 }
887 
888 static bool mask_equal(const struct sw_flow_mask *a,
889 		       const struct sw_flow_mask *b)
890 {
891 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
892 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
893 
894 	return  (a->range.end == b->range.end)
895 		&& (a->range.start == b->range.start)
896 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
897 }
898 
899 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
900 					   const struct sw_flow_mask *mask)
901 {
902 	struct mask_array *ma;
903 	int i;
904 
905 	ma = ovsl_dereference(tbl->mask_array);
906 	for (i = 0; i < ma->max; i++) {
907 		struct sw_flow_mask *t;
908 		t = ovsl_dereference(ma->masks[i]);
909 
910 		if (t && mask_equal(mask, t))
911 			return t;
912 	}
913 
914 	return NULL;
915 }
916 
917 /* Add 'mask' into the mask list, if it is not already there. */
918 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
919 			    const struct sw_flow_mask *new)
920 {
921 	struct sw_flow_mask *mask;
922 
923 	mask = flow_mask_find(tbl, new);
924 	if (!mask) {
925 		/* Allocate a new mask if none exsits. */
926 		mask = mask_alloc();
927 		if (!mask)
928 			return -ENOMEM;
929 		mask->key = new->key;
930 		mask->range = new->range;
931 
932 		/* Add mask to mask-list. */
933 		if (tbl_mask_array_add_mask(tbl, mask)) {
934 			kfree(mask);
935 			return -ENOMEM;
936 		}
937 	} else {
938 		BUG_ON(!mask->ref_count);
939 		mask->ref_count++;
940 	}
941 
942 	flow->mask = mask;
943 	return 0;
944 }
945 
946 /* Must be called with OVS mutex held. */
947 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
948 {
949 	struct table_instance *new_ti = NULL;
950 	struct table_instance *ti;
951 
952 	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
953 	ti = ovsl_dereference(table->ti);
954 	table_instance_insert(ti, flow);
955 	table->count++;
956 
957 	/* Expand table, if necessary, to make room. */
958 	if (table->count > ti->n_buckets)
959 		new_ti = table_instance_expand(ti, false);
960 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
961 		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
962 
963 	if (new_ti) {
964 		rcu_assign_pointer(table->ti, new_ti);
965 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
966 		table->last_rehash = jiffies;
967 	}
968 }
969 
970 /* Must be called with OVS mutex held. */
971 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
972 {
973 	struct table_instance *ti;
974 
975 	flow->ufid_table.hash = ufid_hash(&flow->id);
976 	ti = ovsl_dereference(table->ufid_ti);
977 	ufid_table_instance_insert(ti, flow);
978 	table->ufid_count++;
979 
980 	/* Expand table, if necessary, to make room. */
981 	if (table->ufid_count > ti->n_buckets) {
982 		struct table_instance *new_ti;
983 
984 		new_ti = table_instance_expand(ti, true);
985 		if (new_ti) {
986 			rcu_assign_pointer(table->ufid_ti, new_ti);
987 			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
988 		}
989 	}
990 }
991 
992 /* Must be called with OVS mutex held. */
993 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
994 			const struct sw_flow_mask *mask)
995 {
996 	int err;
997 
998 	err = flow_mask_insert(table, flow, mask);
999 	if (err)
1000 		return err;
1001 	flow_key_insert(table, flow);
1002 	if (ovs_identifier_is_ufid(&flow->id))
1003 		flow_ufid_insert(table, flow);
1004 
1005 	return 0;
1006 }
1007 
1008 static int compare_mask_and_count(const void *a, const void *b)
1009 {
1010 	const struct mask_count *mc_a = a;
1011 	const struct mask_count *mc_b = b;
1012 
1013 	return (s64)mc_b->counter - (s64)mc_a->counter;
1014 }
1015 
1016 /* Must be called with OVS mutex held. */
1017 void ovs_flow_masks_rebalance(struct flow_table *table)
1018 {
1019 	struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1020 	struct mask_count *masks_and_count;
1021 	struct mask_array *new;
1022 	int masks_entries = 0;
1023 	int i;
1024 
1025 	/* Build array of all current entries with use counters. */
1026 	masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1027 					GFP_KERNEL);
1028 	if (!masks_and_count)
1029 		return;
1030 
1031 	for (i = 0; i < ma->max; i++)  {
1032 		struct sw_flow_mask *mask;
1033 		unsigned int start;
1034 		int cpu;
1035 
1036 		mask = rcu_dereference_ovsl(ma->masks[i]);
1037 		if (unlikely(!mask))
1038 			break;
1039 
1040 		masks_and_count[i].index = i;
1041 		masks_and_count[i].counter = 0;
1042 
1043 		for_each_possible_cpu(cpu) {
1044 			u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
1045 							  cpu);
1046 			u64 counter;
1047 
1048 			do {
1049 				start = u64_stats_fetch_begin_irq(&ma->syncp);
1050 				counter = usage_counters[i];
1051 			} while (u64_stats_fetch_retry_irq(&ma->syncp, start));
1052 
1053 			masks_and_count[i].counter += counter;
1054 		}
1055 
1056 		/* Subtract the zero count value. */
1057 		masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1058 
1059 		/* Rather than calling tbl_mask_array_reset_counters()
1060 		 * below when no change is needed, do it inline here.
1061 		 */
1062 		ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1063 	}
1064 
1065 	if (i == 0)
1066 		goto free_mask_entries;
1067 
1068 	/* Sort the entries */
1069 	masks_entries = i;
1070 	sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1071 	     compare_mask_and_count, NULL);
1072 
1073 	/* If the order is the same, nothing to do... */
1074 	for (i = 0; i < masks_entries; i++) {
1075 		if (i != masks_and_count[i].index)
1076 			break;
1077 	}
1078 	if (i == masks_entries)
1079 		goto free_mask_entries;
1080 
1081 	/* Rebuilt the new list in order of usage. */
1082 	new = tbl_mask_array_alloc(ma->max);
1083 	if (!new)
1084 		goto free_mask_entries;
1085 
1086 	for (i = 0; i < masks_entries; i++) {
1087 		int index = masks_and_count[i].index;
1088 
1089 		new->masks[new->count++] =
1090 			rcu_dereference_ovsl(ma->masks[index]);
1091 	}
1092 
1093 	rcu_assign_pointer(table->mask_array, new);
1094 	call_rcu(&ma->rcu, mask_array_rcu_cb);
1095 
1096 free_mask_entries:
1097 	kfree(masks_and_count);
1098 }
1099 
1100 /* Initializes the flow module.
1101  * Returns zero if successful or a negative error code. */
1102 int ovs_flow_init(void)
1103 {
1104 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1105 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1106 
1107 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1108 				       + (nr_cpu_ids
1109 					  * sizeof(struct sw_flow_stats *)),
1110 				       0, 0, NULL);
1111 	if (flow_cache == NULL)
1112 		return -ENOMEM;
1113 
1114 	flow_stats_cache
1115 		= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1116 				    0, SLAB_HWCACHE_ALIGN, NULL);
1117 	if (flow_stats_cache == NULL) {
1118 		kmem_cache_destroy(flow_cache);
1119 		flow_cache = NULL;
1120 		return -ENOMEM;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 /* Uninitializes the flow module. */
1127 void ovs_flow_exit(void)
1128 {
1129 	kmem_cache_destroy(flow_stats_cache);
1130 	kmem_cache_destroy(flow_cache);
1131 }
1132