xref: /openbmc/linux/net/openvswitch/flow_table.c (revision 04b7d136d015f220b1003e6c573834658d507a31)
1c9422999SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e6445719SPravin B Shelar /*
39b996e54SPravin B Shelar  * Copyright (c) 2007-2014 Nicira, Inc.
4e6445719SPravin B Shelar  */
5e6445719SPravin B Shelar 
6e6445719SPravin B Shelar #include "flow.h"
7e6445719SPravin B Shelar #include "datapath.h"
834ae932aSThomas Graf #include "flow_netlink.h"
9e6445719SPravin B Shelar #include <linux/uaccess.h>
10e6445719SPravin B Shelar #include <linux/netdevice.h>
11e6445719SPravin B Shelar #include <linux/etherdevice.h>
12e6445719SPravin B Shelar #include <linux/if_ether.h>
13e6445719SPravin B Shelar #include <linux/if_vlan.h>
14e6445719SPravin B Shelar #include <net/llc_pdu.h>
15e6445719SPravin B Shelar #include <linux/kernel.h>
1687545899SDaniel Borkmann #include <linux/jhash.h>
17e6445719SPravin B Shelar #include <linux/jiffies.h>
18e6445719SPravin B Shelar #include <linux/llc.h>
19e6445719SPravin B Shelar #include <linux/module.h>
20e6445719SPravin B Shelar #include <linux/in.h>
21e6445719SPravin B Shelar #include <linux/rcupdate.h>
22db74a333SThadeu Lima de Souza Cascardo #include <linux/cpumask.h>
23e6445719SPravin B Shelar #include <linux/if_arp.h>
24e6445719SPravin B Shelar #include <linux/ip.h>
25e6445719SPravin B Shelar #include <linux/ipv6.h>
26e6445719SPravin B Shelar #include <linux/sctp.h>
27e6445719SPravin B Shelar #include <linux/tcp.h>
28e6445719SPravin B Shelar #include <linux/udp.h>
29e6445719SPravin B Shelar #include <linux/icmp.h>
30e6445719SPravin B Shelar #include <linux/icmpv6.h>
31e6445719SPravin B Shelar #include <linux/rculist.h>
32e6445719SPravin B Shelar #include <net/ip.h>
33e6445719SPravin B Shelar #include <net/ipv6.h>
34e6445719SPravin B Shelar #include <net/ndisc.h>
35e6445719SPravin B Shelar 
36b637e498SPravin B Shelar #define TBL_MIN_BUCKETS		1024
37b637e498SPravin B Shelar #define REHASH_INTERVAL		(10 * 60 * HZ)
38b637e498SPravin B Shelar 
39*04b7d136STonghao Zhang #define MC_HASH_SHIFT		8
40*04b7d136STonghao Zhang #define MC_HASH_ENTRIES		(1u << MC_HASH_SHIFT)
41*04b7d136STonghao Zhang #define MC_HASH_SEGS		((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
42*04b7d136STonghao Zhang 
43e6445719SPravin B Shelar static struct kmem_cache *flow_cache;
4463e7959cSJarno Rajahalme struct kmem_cache *flow_stats_cache __read_mostly;
45e6445719SPravin B Shelar 
46e6445719SPravin B Shelar static u16 range_n_bytes(const struct sw_flow_key_range *range)
47e6445719SPravin B Shelar {
48e6445719SPravin B Shelar 	return range->end - range->start;
49e6445719SPravin B Shelar }
50e6445719SPravin B Shelar 
51e6445719SPravin B Shelar void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
52ae5f2fb1SJesse Gross 		       bool full, const struct sw_flow_mask *mask)
53e6445719SPravin B Shelar {
54ae5f2fb1SJesse Gross 	int start = full ? 0 : mask->range.start;
55ae5f2fb1SJesse Gross 	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
56ae5f2fb1SJesse Gross 	const long *m = (const long *)((const u8 *)&mask->key + start);
57ae5f2fb1SJesse Gross 	const long *s = (const long *)((const u8 *)src + start);
58ae5f2fb1SJesse Gross 	long *d = (long *)((u8 *)dst + start);
59e6445719SPravin B Shelar 	int i;
60e6445719SPravin B Shelar 
61ae5f2fb1SJesse Gross 	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
62ae5f2fb1SJesse Gross 	 * if 'full' is false the memory outside of the 'mask->range' is left
63ae5f2fb1SJesse Gross 	 * uninitialized. This can be used as an optimization when further
64ae5f2fb1SJesse Gross 	 * operations on 'dst' only use contents within 'mask->range'.
65e6445719SPravin B Shelar 	 */
66ae5f2fb1SJesse Gross 	for (i = 0; i < len; i += sizeof(long))
67e6445719SPravin B Shelar 		*d++ = *s++ & *m++;
68e6445719SPravin B Shelar }
69e6445719SPravin B Shelar 
7023dabf88SJarno Rajahalme struct sw_flow *ovs_flow_alloc(void)
71e6445719SPravin B Shelar {
72e6445719SPravin B Shelar 	struct sw_flow *flow;
73aef833c5SPablo Neira Ayuso 	struct sw_flow_stats *stats;
74e6445719SPravin B Shelar 
75db74a333SThadeu Lima de Souza Cascardo 	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
76e6445719SPravin B Shelar 	if (!flow)
77e6445719SPravin B Shelar 		return ERR_PTR(-ENOMEM);
78e6445719SPravin B Shelar 
79db74a333SThadeu Lima de Souza Cascardo 	flow->stats_last_writer = -1;
80e6445719SPravin B Shelar 
8163e7959cSJarno Rajahalme 	/* Initialize the default stat node. */
8263e7959cSJarno Rajahalme 	stats = kmem_cache_alloc_node(flow_stats_cache,
83598c12d0SKonstantin Khlebnikov 				      GFP_KERNEL | __GFP_ZERO,
84598c12d0SKonstantin Khlebnikov 				      node_online(0) ? 0 : NUMA_NO_NODE);
8563e7959cSJarno Rajahalme 	if (!stats)
86e298e505SPravin B Shelar 		goto err;
87e298e505SPravin B Shelar 
8863e7959cSJarno Rajahalme 	spin_lock_init(&stats->lock);
89e298e505SPravin B Shelar 
9063e7959cSJarno Rajahalme 	RCU_INIT_POINTER(flow->stats[0], stats);
9163e7959cSJarno Rajahalme 
92c4b2bf6bSTonghao Zhang 	cpumask_set_cpu(0, &flow->cpu_used_mask);
93c4b2bf6bSTonghao Zhang 
94e6445719SPravin B Shelar 	return flow;
95e298e505SPravin B Shelar err:
96ece37c87SWei Yongjun 	kmem_cache_free(flow_cache, flow);
97e298e505SPravin B Shelar 	return ERR_PTR(-ENOMEM);
98e6445719SPravin B Shelar }
99e6445719SPravin B Shelar 
10012eb18f7SThomas Graf int ovs_flow_tbl_count(const struct flow_table *table)
101b637e498SPravin B Shelar {
102b637e498SPravin B Shelar 	return table->count;
103b637e498SPravin B Shelar }
104b637e498SPravin B Shelar 
105e6445719SPravin B Shelar static void flow_free(struct sw_flow *flow)
106e6445719SPravin B Shelar {
107db74a333SThadeu Lima de Souza Cascardo 	int cpu;
10863e7959cSJarno Rajahalme 
10974ed7ab9SJoe Stringer 	if (ovs_identifier_is_key(&flow->id))
11074ed7ab9SJoe Stringer 		kfree(flow->id.unmasked_key);
11134ae932aSThomas Graf 	if (flow->sf_acts)
11234ae932aSThomas Graf 		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
113db74a333SThadeu Lima de Souza Cascardo 	/* We open code this to make sure cpu 0 is always considered */
114c4b2bf6bSTonghao Zhang 	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
115db74a333SThadeu Lima de Souza Cascardo 		if (flow->stats[cpu])
11663e7959cSJarno Rajahalme 			kmem_cache_free(flow_stats_cache,
117aef833c5SPablo Neira Ayuso 					(struct sw_flow_stats __force *)flow->stats[cpu]);
118e6445719SPravin B Shelar 	kmem_cache_free(flow_cache, flow);
119e6445719SPravin B Shelar }
120e6445719SPravin B Shelar 
121e6445719SPravin B Shelar static void rcu_free_flow_callback(struct rcu_head *rcu)
122e6445719SPravin B Shelar {
123e6445719SPravin B Shelar 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
124e6445719SPravin B Shelar 
125e6445719SPravin B Shelar 	flow_free(flow);
126e6445719SPravin B Shelar }
127e6445719SPravin B Shelar 
128e80857ccSAndy Zhou void ovs_flow_free(struct sw_flow *flow, bool deferred)
129618ed0c8SPravin B Shelar {
130e80857ccSAndy Zhou 	if (!flow)
131618ed0c8SPravin B Shelar 		return;
132618ed0c8SPravin B Shelar 
133e6445719SPravin B Shelar 	if (deferred)
134e6445719SPravin B Shelar 		call_rcu(&flow->rcu, rcu_free_flow_callback);
135e6445719SPravin B Shelar 	else
136e6445719SPravin B Shelar 		flow_free(flow);
137e6445719SPravin B Shelar }
138e6445719SPravin B Shelar 
139b637e498SPravin B Shelar static void __table_instance_destroy(struct table_instance *ti)
140e6445719SPravin B Shelar {
141ee9c5e67SKent Overstreet 	kvfree(ti->buckets);
142b637e498SPravin B Shelar 	kfree(ti);
143e6445719SPravin B Shelar }
144e6445719SPravin B Shelar 
145b637e498SPravin B Shelar static struct table_instance *table_instance_alloc(int new_size)
146e6445719SPravin B Shelar {
147b637e498SPravin B Shelar 	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
148ee9c5e67SKent Overstreet 	int i;
149e6445719SPravin B Shelar 
150b637e498SPravin B Shelar 	if (!ti)
151e6445719SPravin B Shelar 		return NULL;
152e6445719SPravin B Shelar 
153ee9c5e67SKent Overstreet 	ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
154ee9c5e67SKent Overstreet 				     GFP_KERNEL);
155b637e498SPravin B Shelar 	if (!ti->buckets) {
156b637e498SPravin B Shelar 		kfree(ti);
157e6445719SPravin B Shelar 		return NULL;
158e6445719SPravin B Shelar 	}
159ee9c5e67SKent Overstreet 
160ee9c5e67SKent Overstreet 	for (i = 0; i < new_size; i++)
161ee9c5e67SKent Overstreet 		INIT_HLIST_HEAD(&ti->buckets[i]);
162ee9c5e67SKent Overstreet 
163b637e498SPravin B Shelar 	ti->n_buckets = new_size;
164b637e498SPravin B Shelar 	ti->node_ver = 0;
165b637e498SPravin B Shelar 	ti->keep_flows = false;
166b637e498SPravin B Shelar 	get_random_bytes(&ti->hash_seed, sizeof(u32));
167b637e498SPravin B Shelar 
168b637e498SPravin B Shelar 	return ti;
169b637e498SPravin B Shelar }
170b637e498SPravin B Shelar 
171b637e498SPravin B Shelar int ovs_flow_tbl_init(struct flow_table *table)
172b637e498SPravin B Shelar {
17374ed7ab9SJoe Stringer 	struct table_instance *ti, *ufid_ti;
174b637e498SPravin B Shelar 
175*04b7d136STonghao Zhang 	table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
176*04b7d136STonghao Zhang 					   MC_HASH_ENTRIES,
177*04b7d136STonghao Zhang 					   __alignof__(struct mask_cache_entry));
178*04b7d136STonghao Zhang 	if (!table->mask_cache)
179b637e498SPravin B Shelar 		return -ENOMEM;
180b637e498SPravin B Shelar 
181*04b7d136STonghao Zhang 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
182*04b7d136STonghao Zhang 	if (!ti)
183*04b7d136STonghao Zhang 		goto free_mask_cache;
184*04b7d136STonghao Zhang 
18574ed7ab9SJoe Stringer 	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
18674ed7ab9SJoe Stringer 	if (!ufid_ti)
18774ed7ab9SJoe Stringer 		goto free_ti;
18874ed7ab9SJoe Stringer 
189b637e498SPravin B Shelar 	rcu_assign_pointer(table->ti, ti);
19074ed7ab9SJoe Stringer 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
191b637e498SPravin B Shelar 	INIT_LIST_HEAD(&table->mask_list);
192b637e498SPravin B Shelar 	table->last_rehash = jiffies;
193e6445719SPravin B Shelar 	table->count = 0;
19474ed7ab9SJoe Stringer 	table->ufid_count = 0;
195b637e498SPravin B Shelar 	return 0;
19674ed7ab9SJoe Stringer 
19774ed7ab9SJoe Stringer free_ti:
19874ed7ab9SJoe Stringer 	__table_instance_destroy(ti);
199*04b7d136STonghao Zhang free_mask_cache:
200*04b7d136STonghao Zhang 	free_percpu(table->mask_cache);
20174ed7ab9SJoe Stringer 	return -ENOMEM;
202e6445719SPravin B Shelar }
203e6445719SPravin B Shelar 
204e6445719SPravin B Shelar static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
205e6445719SPravin B Shelar {
206b637e498SPravin B Shelar 	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
207e6445719SPravin B Shelar 
208b637e498SPravin B Shelar 	__table_instance_destroy(ti);
209b637e498SPravin B Shelar }
210b637e498SPravin B Shelar 
21174ed7ab9SJoe Stringer static void table_instance_destroy(struct table_instance *ti,
21274ed7ab9SJoe Stringer 				   struct table_instance *ufid_ti,
21374ed7ab9SJoe Stringer 				   bool deferred)
214b637e498SPravin B Shelar {
215e80857ccSAndy Zhou 	int i;
216e80857ccSAndy Zhou 
217b637e498SPravin B Shelar 	if (!ti)
218b637e498SPravin B Shelar 		return;
219b637e498SPravin B Shelar 
22074ed7ab9SJoe Stringer 	BUG_ON(!ufid_ti);
221e80857ccSAndy Zhou 	if (ti->keep_flows)
222e80857ccSAndy Zhou 		goto skip_flows;
223e80857ccSAndy Zhou 
224e80857ccSAndy Zhou 	for (i = 0; i < ti->n_buckets; i++) {
225e80857ccSAndy Zhou 		struct sw_flow *flow;
226ee9c5e67SKent Overstreet 		struct hlist_head *head = &ti->buckets[i];
227e80857ccSAndy Zhou 		struct hlist_node *n;
228e80857ccSAndy Zhou 		int ver = ti->node_ver;
22974ed7ab9SJoe Stringer 		int ufid_ver = ufid_ti->node_ver;
230e80857ccSAndy Zhou 
23174ed7ab9SJoe Stringer 		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
23274ed7ab9SJoe Stringer 			hlist_del_rcu(&flow->flow_table.node[ver]);
23374ed7ab9SJoe Stringer 			if (ovs_identifier_is_ufid(&flow->id))
23474ed7ab9SJoe Stringer 				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
235e80857ccSAndy Zhou 			ovs_flow_free(flow, deferred);
236e80857ccSAndy Zhou 		}
237e80857ccSAndy Zhou 	}
238e80857ccSAndy Zhou 
239e80857ccSAndy Zhou skip_flows:
24074ed7ab9SJoe Stringer 	if (deferred) {
241b637e498SPravin B Shelar 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
24274ed7ab9SJoe Stringer 		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
24374ed7ab9SJoe Stringer 	} else {
244b637e498SPravin B Shelar 		__table_instance_destroy(ti);
24574ed7ab9SJoe Stringer 		__table_instance_destroy(ufid_ti);
24674ed7ab9SJoe Stringer 	}
247e6445719SPravin B Shelar }
248e6445719SPravin B Shelar 
2499b996e54SPravin B Shelar /* No need for locking this function is called from RCU callback or
2509b996e54SPravin B Shelar  * error path.
2519b996e54SPravin B Shelar  */
2529b996e54SPravin B Shelar void ovs_flow_tbl_destroy(struct flow_table *table)
253e6445719SPravin B Shelar {
2549b996e54SPravin B Shelar 	struct table_instance *ti = rcu_dereference_raw(table->ti);
25574ed7ab9SJoe Stringer 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
256e6445719SPravin B Shelar 
257*04b7d136STonghao Zhang 	free_percpu(table->mask_cache);
25874ed7ab9SJoe Stringer 	table_instance_destroy(ti, ufid_ti, false);
259e6445719SPravin B Shelar }
260e6445719SPravin B Shelar 
261b637e498SPravin B Shelar struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
262e6445719SPravin B Shelar 				       u32 *bucket, u32 *last)
263e6445719SPravin B Shelar {
264e6445719SPravin B Shelar 	struct sw_flow *flow;
265e6445719SPravin B Shelar 	struct hlist_head *head;
266e6445719SPravin B Shelar 	int ver;
267e6445719SPravin B Shelar 	int i;
268e6445719SPravin B Shelar 
269b637e498SPravin B Shelar 	ver = ti->node_ver;
270b637e498SPravin B Shelar 	while (*bucket < ti->n_buckets) {
271e6445719SPravin B Shelar 		i = 0;
272ee9c5e67SKent Overstreet 		head = &ti->buckets[*bucket];
27374ed7ab9SJoe Stringer 		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
274e6445719SPravin B Shelar 			if (i < *last) {
275e6445719SPravin B Shelar 				i++;
276e6445719SPravin B Shelar 				continue;
277e6445719SPravin B Shelar 			}
278e6445719SPravin B Shelar 			*last = i + 1;
279e6445719SPravin B Shelar 			return flow;
280e6445719SPravin B Shelar 		}
281e6445719SPravin B Shelar 		(*bucket)++;
282e6445719SPravin B Shelar 		*last = 0;
283e6445719SPravin B Shelar 	}
284e6445719SPravin B Shelar 
285e6445719SPravin B Shelar 	return NULL;
286e6445719SPravin B Shelar }
287e6445719SPravin B Shelar 
288b637e498SPravin B Shelar static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
289e6445719SPravin B Shelar {
290b637e498SPravin B Shelar 	hash = jhash_1word(hash, ti->hash_seed);
291ee9c5e67SKent Overstreet 	return &ti->buckets[hash & (ti->n_buckets - 1)];
292e6445719SPravin B Shelar }
293e6445719SPravin B Shelar 
29474ed7ab9SJoe Stringer static void table_instance_insert(struct table_instance *ti,
29574ed7ab9SJoe Stringer 				  struct sw_flow *flow)
296e6445719SPravin B Shelar {
297e6445719SPravin B Shelar 	struct hlist_head *head;
298e6445719SPravin B Shelar 
29974ed7ab9SJoe Stringer 	head = find_bucket(ti, flow->flow_table.hash);
30074ed7ab9SJoe Stringer 	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
30174ed7ab9SJoe Stringer }
30274ed7ab9SJoe Stringer 
30374ed7ab9SJoe Stringer static void ufid_table_instance_insert(struct table_instance *ti,
30474ed7ab9SJoe Stringer 				       struct sw_flow *flow)
30574ed7ab9SJoe Stringer {
30674ed7ab9SJoe Stringer 	struct hlist_head *head;
30774ed7ab9SJoe Stringer 
30874ed7ab9SJoe Stringer 	head = find_bucket(ti, flow->ufid_table.hash);
30974ed7ab9SJoe Stringer 	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
310e6445719SPravin B Shelar }
311e6445719SPravin B Shelar 
312b637e498SPravin B Shelar static void flow_table_copy_flows(struct table_instance *old,
31374ed7ab9SJoe Stringer 				  struct table_instance *new, bool ufid)
314e6445719SPravin B Shelar {
315e6445719SPravin B Shelar 	int old_ver;
316e6445719SPravin B Shelar 	int i;
317e6445719SPravin B Shelar 
318e6445719SPravin B Shelar 	old_ver = old->node_ver;
319e6445719SPravin B Shelar 	new->node_ver = !old_ver;
320e6445719SPravin B Shelar 
321e6445719SPravin B Shelar 	/* Insert in new table. */
322e6445719SPravin B Shelar 	for (i = 0; i < old->n_buckets; i++) {
323e6445719SPravin B Shelar 		struct sw_flow *flow;
324ee9c5e67SKent Overstreet 		struct hlist_head *head = &old->buckets[i];
325e6445719SPravin B Shelar 
32674ed7ab9SJoe Stringer 		if (ufid)
32774ed7ab9SJoe Stringer 			hlist_for_each_entry(flow, head,
32874ed7ab9SJoe Stringer 					     ufid_table.node[old_ver])
32974ed7ab9SJoe Stringer 				ufid_table_instance_insert(new, flow);
33074ed7ab9SJoe Stringer 		else
33174ed7ab9SJoe Stringer 			hlist_for_each_entry(flow, head,
33274ed7ab9SJoe Stringer 					     flow_table.node[old_ver])
333b637e498SPravin B Shelar 				table_instance_insert(new, flow);
334e6445719SPravin B Shelar 	}
335e6445719SPravin B Shelar 
336e6445719SPravin B Shelar 	old->keep_flows = true;
337e6445719SPravin B Shelar }
338e6445719SPravin B Shelar 
339b637e498SPravin B Shelar static struct table_instance *table_instance_rehash(struct table_instance *ti,
34074ed7ab9SJoe Stringer 						    int n_buckets, bool ufid)
341e6445719SPravin B Shelar {
342b637e498SPravin B Shelar 	struct table_instance *new_ti;
343e6445719SPravin B Shelar 
344b637e498SPravin B Shelar 	new_ti = table_instance_alloc(n_buckets);
345b637e498SPravin B Shelar 	if (!new_ti)
346618ed0c8SPravin B Shelar 		return NULL;
347e6445719SPravin B Shelar 
34874ed7ab9SJoe Stringer 	flow_table_copy_flows(ti, new_ti, ufid);
349e6445719SPravin B Shelar 
350b637e498SPravin B Shelar 	return new_ti;
351e6445719SPravin B Shelar }
352e6445719SPravin B Shelar 
353b637e498SPravin B Shelar int ovs_flow_tbl_flush(struct flow_table *flow_table)
354e6445719SPravin B Shelar {
35574ed7ab9SJoe Stringer 	struct table_instance *old_ti, *new_ti;
35674ed7ab9SJoe Stringer 	struct table_instance *old_ufid_ti, *new_ufid_ti;
357e6445719SPravin B Shelar 
358b637e498SPravin B Shelar 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
359b637e498SPravin B Shelar 	if (!new_ti)
360b637e498SPravin B Shelar 		return -ENOMEM;
36174ed7ab9SJoe Stringer 	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
36274ed7ab9SJoe Stringer 	if (!new_ufid_ti)
36374ed7ab9SJoe Stringer 		goto err_free_ti;
36474ed7ab9SJoe Stringer 
36574ed7ab9SJoe Stringer 	old_ti = ovsl_dereference(flow_table->ti);
36674ed7ab9SJoe Stringer 	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
367b637e498SPravin B Shelar 
368b637e498SPravin B Shelar 	rcu_assign_pointer(flow_table->ti, new_ti);
36974ed7ab9SJoe Stringer 	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
370b637e498SPravin B Shelar 	flow_table->last_rehash = jiffies;
371b637e498SPravin B Shelar 	flow_table->count = 0;
37274ed7ab9SJoe Stringer 	flow_table->ufid_count = 0;
373b637e498SPravin B Shelar 
37474ed7ab9SJoe Stringer 	table_instance_destroy(old_ti, old_ufid_ti, true);
375b637e498SPravin B Shelar 	return 0;
37674ed7ab9SJoe Stringer 
37774ed7ab9SJoe Stringer err_free_ti:
37874ed7ab9SJoe Stringer 	__table_instance_destroy(new_ti);
37974ed7ab9SJoe Stringer 	return -ENOMEM;
380e6445719SPravin B Shelar }
381e6445719SPravin B Shelar 
382272c2cf8SJoe Stringer static u32 flow_hash(const struct sw_flow_key *key,
383272c2cf8SJoe Stringer 		     const struct sw_flow_key_range *range)
384e6445719SPravin B Shelar {
385272c2cf8SJoe Stringer 	int key_start = range->start;
386272c2cf8SJoe Stringer 	int key_end = range->end;
3877085130bSDaniele Di Proietto 	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
388e6445719SPravin B Shelar 	int hash_u32s = (key_end - key_start) >> 2;
389e6445719SPravin B Shelar 
390e6445719SPravin B Shelar 	/* Make sure number of hash bytes are multiple of u32. */
391e6445719SPravin B Shelar 	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
392e6445719SPravin B Shelar 
39387545899SDaniel Borkmann 	return jhash2(hash_key, hash_u32s, 0);
394e6445719SPravin B Shelar }
395e6445719SPravin B Shelar 
396e6445719SPravin B Shelar static int flow_key_start(const struct sw_flow_key *key)
397e6445719SPravin B Shelar {
39800a93babSJiri Benc 	if (key->tun_proto)
399e6445719SPravin B Shelar 		return 0;
400e6445719SPravin B Shelar 	else
401e6445719SPravin B Shelar 		return rounddown(offsetof(struct sw_flow_key, phy),
402e6445719SPravin B Shelar 					  sizeof(long));
403e6445719SPravin B Shelar }
404e6445719SPravin B Shelar 
405e6445719SPravin B Shelar static bool cmp_key(const struct sw_flow_key *key1,
406e6445719SPravin B Shelar 		    const struct sw_flow_key *key2,
407e6445719SPravin B Shelar 		    int key_start, int key_end)
408e6445719SPravin B Shelar {
4097085130bSDaniele Di Proietto 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
4107085130bSDaniele Di Proietto 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
411e6445719SPravin B Shelar 	long diffs = 0;
412e6445719SPravin B Shelar 	int i;
413e6445719SPravin B Shelar 
414e6445719SPravin B Shelar 	for (i = key_start; i < key_end;  i += sizeof(long))
415e6445719SPravin B Shelar 		diffs |= *cp1++ ^ *cp2++;
416e6445719SPravin B Shelar 
417e6445719SPravin B Shelar 	return diffs == 0;
418e6445719SPravin B Shelar }
419e6445719SPravin B Shelar 
420e6445719SPravin B Shelar static bool flow_cmp_masked_key(const struct sw_flow *flow,
421e6445719SPravin B Shelar 				const struct sw_flow_key *key,
422272c2cf8SJoe Stringer 				const struct sw_flow_key_range *range)
423e6445719SPravin B Shelar {
424272c2cf8SJoe Stringer 	return cmp_key(&flow->key, key, range->start, range->end);
425e6445719SPravin B Shelar }
426e6445719SPravin B Shelar 
42774ed7ab9SJoe Stringer static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
42812eb18f7SThomas Graf 				      const struct sw_flow_match *match)
429e6445719SPravin B Shelar {
430e6445719SPravin B Shelar 	struct sw_flow_key *key = match->key;
431e6445719SPravin B Shelar 	int key_start = flow_key_start(key);
432e6445719SPravin B Shelar 	int key_end = match->range.end;
433e6445719SPravin B Shelar 
43474ed7ab9SJoe Stringer 	BUG_ON(ovs_identifier_is_ufid(&flow->id));
43574ed7ab9SJoe Stringer 	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
436e6445719SPravin B Shelar }
437e6445719SPravin B Shelar 
438b637e498SPravin B Shelar static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
439e6445719SPravin B Shelar 					  const struct sw_flow_key *unmasked,
440*04b7d136STonghao Zhang 					  const struct sw_flow_mask *mask,
441*04b7d136STonghao Zhang 					  u32 *n_mask_hit)
442e6445719SPravin B Shelar {
443e6445719SPravin B Shelar 	struct sw_flow *flow;
444e6445719SPravin B Shelar 	struct hlist_head *head;
445e6445719SPravin B Shelar 	u32 hash;
446e6445719SPravin B Shelar 	struct sw_flow_key masked_key;
447e6445719SPravin B Shelar 
448ae5f2fb1SJesse Gross 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
449272c2cf8SJoe Stringer 	hash = flow_hash(&masked_key, &mask->range);
450b637e498SPravin B Shelar 	head = find_bucket(ti, hash);
451*04b7d136STonghao Zhang 	(*n_mask_hit)++;
452*04b7d136STonghao Zhang 
45374ed7ab9SJoe Stringer 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
45474ed7ab9SJoe Stringer 		if (flow->mask == mask && flow->flow_table.hash == hash &&
455272c2cf8SJoe Stringer 		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
456e6445719SPravin B Shelar 			return flow;
457e6445719SPravin B Shelar 	}
458e6445719SPravin B Shelar 	return NULL;
459e6445719SPravin B Shelar }
460e6445719SPravin B Shelar 
461*04b7d136STonghao Zhang static struct sw_flow *flow_lookup(struct flow_table *tbl,
462*04b7d136STonghao Zhang 				   struct table_instance *ti,
4631bd7116fSAndy Zhou 				   const struct sw_flow_key *key,
4641bd7116fSAndy Zhou 				   u32 *n_mask_hit)
465e6445719SPravin B Shelar {
466e6445719SPravin B Shelar 	struct sw_flow_mask *mask;
467b637e498SPravin B Shelar 	struct sw_flow *flow;
468e6445719SPravin B Shelar 
469b637e498SPravin B Shelar 	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
470*04b7d136STonghao Zhang 		flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
471e6445719SPravin B Shelar 		if (flow)  /* Found */
472b637e498SPravin B Shelar 			return flow;
473b637e498SPravin B Shelar 	}
474b637e498SPravin B Shelar 	return NULL;
475e6445719SPravin B Shelar }
476e6445719SPravin B Shelar 
477*04b7d136STonghao Zhang /*
478*04b7d136STonghao Zhang  * mask_cache maps flow to probable mask. This cache is not tightly
479*04b7d136STonghao Zhang  * coupled cache, It means updates to  mask list can result in inconsistent
480*04b7d136STonghao Zhang  * cache entry in mask cache.
481*04b7d136STonghao Zhang  * This is per cpu cache and is divided in MC_HASH_SEGS segments.
482*04b7d136STonghao Zhang  * In case of a hash collision the entry is hashed in next segment.
483*04b7d136STonghao Zhang  * */
484*04b7d136STonghao Zhang struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
485*04b7d136STonghao Zhang 					  const struct sw_flow_key *key,
486*04b7d136STonghao Zhang 					  u32 skb_hash,
487*04b7d136STonghao Zhang 					  u32 *n_mask_hit)
488*04b7d136STonghao Zhang {
489*04b7d136STonghao Zhang 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
490*04b7d136STonghao Zhang 	struct mask_cache_entry  *entries, *ce, *del;
491*04b7d136STonghao Zhang 	struct sw_flow *flow;
492*04b7d136STonghao Zhang 	u32 hash = skb_hash;
493*04b7d136STonghao Zhang 	int seg;
494*04b7d136STonghao Zhang 
495*04b7d136STonghao Zhang 	*n_mask_hit = 0;
496*04b7d136STonghao Zhang 	if (unlikely(!skb_hash))
497*04b7d136STonghao Zhang 		return flow_lookup(tbl, ti, key, n_mask_hit);
498*04b7d136STonghao Zhang 
499*04b7d136STonghao Zhang 	del = NULL;
500*04b7d136STonghao Zhang 	entries = this_cpu_ptr(tbl->mask_cache);
501*04b7d136STonghao Zhang 
502*04b7d136STonghao Zhang 	for (seg = 0; seg < MC_HASH_SEGS; seg++) {
503*04b7d136STonghao Zhang 		int index;
504*04b7d136STonghao Zhang 
505*04b7d136STonghao Zhang 		index = hash & (MC_HASH_ENTRIES - 1);
506*04b7d136STonghao Zhang 		ce = &entries[index];
507*04b7d136STonghao Zhang 
508*04b7d136STonghao Zhang 		if (ce->skb_hash == skb_hash) {
509*04b7d136STonghao Zhang 			struct sw_flow_mask *mask;
510*04b7d136STonghao Zhang 			int i;
511*04b7d136STonghao Zhang 
512*04b7d136STonghao Zhang 			i = 0;
513*04b7d136STonghao Zhang 			list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
514*04b7d136STonghao Zhang 				if (ce->mask_index == i++) {
515*04b7d136STonghao Zhang 					flow = masked_flow_lookup(ti, key, mask,
516*04b7d136STonghao Zhang 								  n_mask_hit);
517*04b7d136STonghao Zhang 					if (flow)  /* Found */
518*04b7d136STonghao Zhang 						return flow;
519*04b7d136STonghao Zhang 
520*04b7d136STonghao Zhang 					break;
521*04b7d136STonghao Zhang 				}
522*04b7d136STonghao Zhang 			}
523*04b7d136STonghao Zhang 
524*04b7d136STonghao Zhang 			del = ce;
525*04b7d136STonghao Zhang 			break;
526*04b7d136STonghao Zhang 		}
527*04b7d136STonghao Zhang 
528*04b7d136STonghao Zhang 		if (!del || (del->skb_hash && !ce->skb_hash)) {
529*04b7d136STonghao Zhang 			del = ce;
530*04b7d136STonghao Zhang 		}
531*04b7d136STonghao Zhang 
532*04b7d136STonghao Zhang 		hash >>= MC_HASH_SHIFT;
533*04b7d136STonghao Zhang 	}
534*04b7d136STonghao Zhang 
535*04b7d136STonghao Zhang 	flow = flow_lookup(tbl, ti, key, n_mask_hit);
536*04b7d136STonghao Zhang 
537*04b7d136STonghao Zhang 	if (flow) {
538*04b7d136STonghao Zhang 		del->skb_hash = skb_hash;
539*04b7d136STonghao Zhang 		del->mask_index = (*n_mask_hit - 1);
540*04b7d136STonghao Zhang 	}
541*04b7d136STonghao Zhang 
542*04b7d136STonghao Zhang 	return flow;
543*04b7d136STonghao Zhang }
544*04b7d136STonghao Zhang 
5455bb50632SAndy Zhou struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
5465bb50632SAndy Zhou 				    const struct sw_flow_key *key)
5475bb50632SAndy Zhou {
548*04b7d136STonghao Zhang 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
5495bb50632SAndy Zhou 	u32 __always_unused n_mask_hit;
5505bb50632SAndy Zhou 
551*04b7d136STonghao Zhang 	return flow_lookup(tbl, ti, key, &n_mask_hit);
5525bb50632SAndy Zhou }
5535bb50632SAndy Zhou 
5544a46b24eSAlex Wang struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
55512eb18f7SThomas Graf 					  const struct sw_flow_match *match)
5564a46b24eSAlex Wang {
5574a46b24eSAlex Wang 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
5584a46b24eSAlex Wang 	struct sw_flow_mask *mask;
5594a46b24eSAlex Wang 	struct sw_flow *flow;
560*04b7d136STonghao Zhang 	u32 __always_unused n_mask_hit;
5614a46b24eSAlex Wang 
5624a46b24eSAlex Wang 	/* Always called under ovs-mutex. */
5634a46b24eSAlex Wang 	list_for_each_entry(mask, &tbl->mask_list, list) {
564*04b7d136STonghao Zhang 		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
56574ed7ab9SJoe Stringer 		if (flow && ovs_identifier_is_key(&flow->id) &&
56674ed7ab9SJoe Stringer 		    ovs_flow_cmp_unmasked_key(flow, match))
56774ed7ab9SJoe Stringer 			return flow;
56874ed7ab9SJoe Stringer 	}
56974ed7ab9SJoe Stringer 	return NULL;
57074ed7ab9SJoe Stringer }
57174ed7ab9SJoe Stringer 
57274ed7ab9SJoe Stringer static u32 ufid_hash(const struct sw_flow_id *sfid)
57374ed7ab9SJoe Stringer {
57474ed7ab9SJoe Stringer 	return jhash(sfid->ufid, sfid->ufid_len, 0);
57574ed7ab9SJoe Stringer }
57674ed7ab9SJoe Stringer 
57774ed7ab9SJoe Stringer static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
57874ed7ab9SJoe Stringer 			      const struct sw_flow_id *sfid)
57974ed7ab9SJoe Stringer {
58074ed7ab9SJoe Stringer 	if (flow->id.ufid_len != sfid->ufid_len)
58174ed7ab9SJoe Stringer 		return false;
58274ed7ab9SJoe Stringer 
58374ed7ab9SJoe Stringer 	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
58474ed7ab9SJoe Stringer }
58574ed7ab9SJoe Stringer 
58674ed7ab9SJoe Stringer bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
58774ed7ab9SJoe Stringer {
58874ed7ab9SJoe Stringer 	if (ovs_identifier_is_ufid(&flow->id))
58974ed7ab9SJoe Stringer 		return flow_cmp_masked_key(flow, match->key, &match->range);
59074ed7ab9SJoe Stringer 
59174ed7ab9SJoe Stringer 	return ovs_flow_cmp_unmasked_key(flow, match);
59274ed7ab9SJoe Stringer }
59374ed7ab9SJoe Stringer 
59474ed7ab9SJoe Stringer struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
59574ed7ab9SJoe Stringer 					 const struct sw_flow_id *ufid)
59674ed7ab9SJoe Stringer {
59774ed7ab9SJoe Stringer 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
59874ed7ab9SJoe Stringer 	struct sw_flow *flow;
59974ed7ab9SJoe Stringer 	struct hlist_head *head;
60074ed7ab9SJoe Stringer 	u32 hash;
60174ed7ab9SJoe Stringer 
60274ed7ab9SJoe Stringer 	hash = ufid_hash(ufid);
60374ed7ab9SJoe Stringer 	head = find_bucket(ti, hash);
60474ed7ab9SJoe Stringer 	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
60574ed7ab9SJoe Stringer 		if (flow->ufid_table.hash == hash &&
60674ed7ab9SJoe Stringer 		    ovs_flow_cmp_ufid(flow, ufid))
6074a46b24eSAlex Wang 			return flow;
6084a46b24eSAlex Wang 	}
6094a46b24eSAlex Wang 	return NULL;
6104a46b24eSAlex Wang }
6114a46b24eSAlex Wang 
6121bd7116fSAndy Zhou int ovs_flow_tbl_num_masks(const struct flow_table *table)
6131bd7116fSAndy Zhou {
6141bd7116fSAndy Zhou 	struct sw_flow_mask *mask;
6151bd7116fSAndy Zhou 	int num = 0;
6161bd7116fSAndy Zhou 
6171bd7116fSAndy Zhou 	list_for_each_entry(mask, &table->mask_list, list)
6181bd7116fSAndy Zhou 		num++;
6191bd7116fSAndy Zhou 
6201bd7116fSAndy Zhou 	return num;
6211bd7116fSAndy Zhou }
6221bd7116fSAndy Zhou 
62374ed7ab9SJoe Stringer static struct table_instance *table_instance_expand(struct table_instance *ti,
62474ed7ab9SJoe Stringer 						    bool ufid)
625b637e498SPravin B Shelar {
62674ed7ab9SJoe Stringer 	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
627e6445719SPravin B Shelar }
628e6445719SPravin B Shelar 
62956c19868SJarno Rajahalme /* Remove 'mask' from the mask list, if it is not needed any more. */
63056c19868SJarno Rajahalme static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
63156c19868SJarno Rajahalme {
63256c19868SJarno Rajahalme 	if (mask) {
63356c19868SJarno Rajahalme 		/* ovs-lock is required to protect mask-refcount and
63456c19868SJarno Rajahalme 		 * mask list.
63556c19868SJarno Rajahalme 		 */
63656c19868SJarno Rajahalme 		ASSERT_OVSL();
63756c19868SJarno Rajahalme 		BUG_ON(!mask->ref_count);
63856c19868SJarno Rajahalme 		mask->ref_count--;
63956c19868SJarno Rajahalme 
64056c19868SJarno Rajahalme 		if (!mask->ref_count) {
64156c19868SJarno Rajahalme 			list_del_rcu(&mask->list);
64256c19868SJarno Rajahalme 			kfree_rcu(mask, rcu);
64356c19868SJarno Rajahalme 		}
64456c19868SJarno Rajahalme 	}
64556c19868SJarno Rajahalme }
64656c19868SJarno Rajahalme 
64756c19868SJarno Rajahalme /* Must be called with OVS mutex held. */
648e6445719SPravin B Shelar void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
649e6445719SPravin B Shelar {
650b637e498SPravin B Shelar 	struct table_instance *ti = ovsl_dereference(table->ti);
65174ed7ab9SJoe Stringer 	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
652b637e498SPravin B Shelar 
653e6445719SPravin B Shelar 	BUG_ON(table->count == 0);
65474ed7ab9SJoe Stringer 	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
655e6445719SPravin B Shelar 	table->count--;
65674ed7ab9SJoe Stringer 	if (ovs_identifier_is_ufid(&flow->id)) {
65774ed7ab9SJoe Stringer 		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
65874ed7ab9SJoe Stringer 		table->ufid_count--;
65974ed7ab9SJoe Stringer 	}
66056c19868SJarno Rajahalme 
66156c19868SJarno Rajahalme 	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
66256c19868SJarno Rajahalme 	 * accessible as long as the RCU read lock is held.
66356c19868SJarno Rajahalme 	 */
66456c19868SJarno Rajahalme 	flow_mask_remove(table, flow->mask);
665e6445719SPravin B Shelar }
666e6445719SPravin B Shelar 
667618ed0c8SPravin B Shelar static struct sw_flow_mask *mask_alloc(void)
668e6445719SPravin B Shelar {
669e6445719SPravin B Shelar 	struct sw_flow_mask *mask;
670e6445719SPravin B Shelar 
671e6445719SPravin B Shelar 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
672e6445719SPravin B Shelar 	if (mask)
673e80857ccSAndy Zhou 		mask->ref_count = 1;
674e6445719SPravin B Shelar 
675e6445719SPravin B Shelar 	return mask;
676e6445719SPravin B Shelar }
677e6445719SPravin B Shelar 
678e6445719SPravin B Shelar static bool mask_equal(const struct sw_flow_mask *a,
679e6445719SPravin B Shelar 		       const struct sw_flow_mask *b)
680e6445719SPravin B Shelar {
6817085130bSDaniele Di Proietto 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
6827085130bSDaniele Di Proietto 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
683e6445719SPravin B Shelar 
684e6445719SPravin B Shelar 	return  (a->range.end == b->range.end)
685e6445719SPravin B Shelar 		&& (a->range.start == b->range.start)
686e6445719SPravin B Shelar 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
687e6445719SPravin B Shelar }
688e6445719SPravin B Shelar 
689618ed0c8SPravin B Shelar static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
690e6445719SPravin B Shelar 					   const struct sw_flow_mask *mask)
691e6445719SPravin B Shelar {
692e6445719SPravin B Shelar 	struct list_head *ml;
693e6445719SPravin B Shelar 
694b637e498SPravin B Shelar 	list_for_each(ml, &tbl->mask_list) {
695e6445719SPravin B Shelar 		struct sw_flow_mask *m;
696e6445719SPravin B Shelar 		m = container_of(ml, struct sw_flow_mask, list);
697e6445719SPravin B Shelar 		if (mask_equal(mask, m))
698e6445719SPravin B Shelar 			return m;
699e6445719SPravin B Shelar 	}
700e6445719SPravin B Shelar 
701e6445719SPravin B Shelar 	return NULL;
702e6445719SPravin B Shelar }
703e6445719SPravin B Shelar 
704d1211908SBen Pfaff /* Add 'mask' into the mask list, if it is not already there. */
705618ed0c8SPravin B Shelar static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
70612eb18f7SThomas Graf 			    const struct sw_flow_mask *new)
707e6445719SPravin B Shelar {
708618ed0c8SPravin B Shelar 	struct sw_flow_mask *mask;
709618ed0c8SPravin B Shelar 	mask = flow_mask_find(tbl, new);
710618ed0c8SPravin B Shelar 	if (!mask) {
711618ed0c8SPravin B Shelar 		/* Allocate a new mask if none exsits. */
712618ed0c8SPravin B Shelar 		mask = mask_alloc();
713618ed0c8SPravin B Shelar 		if (!mask)
714618ed0c8SPravin B Shelar 			return -ENOMEM;
715618ed0c8SPravin B Shelar 		mask->key = new->key;
716618ed0c8SPravin B Shelar 		mask->range = new->range;
717*04b7d136STonghao Zhang 		list_add_tail_rcu(&mask->list, &tbl->mask_list);
718e80857ccSAndy Zhou 	} else {
719e80857ccSAndy Zhou 		BUG_ON(!mask->ref_count);
720e80857ccSAndy Zhou 		mask->ref_count++;
721e6445719SPravin B Shelar 	}
722e6445719SPravin B Shelar 
723618ed0c8SPravin B Shelar 	flow->mask = mask;
724618ed0c8SPravin B Shelar 	return 0;
725618ed0c8SPravin B Shelar }
726618ed0c8SPravin B Shelar 
72756c19868SJarno Rajahalme /* Must be called with OVS mutex held. */
728d29ab6f8SJoe Stringer static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
729618ed0c8SPravin B Shelar {
730618ed0c8SPravin B Shelar 	struct table_instance *new_ti = NULL;
731618ed0c8SPravin B Shelar 	struct table_instance *ti;
732618ed0c8SPravin B Shelar 
73374ed7ab9SJoe Stringer 	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
734618ed0c8SPravin B Shelar 	ti = ovsl_dereference(table->ti);
735618ed0c8SPravin B Shelar 	table_instance_insert(ti, flow);
736618ed0c8SPravin B Shelar 	table->count++;
737618ed0c8SPravin B Shelar 
738618ed0c8SPravin B Shelar 	/* Expand table, if necessary, to make room. */
739618ed0c8SPravin B Shelar 	if (table->count > ti->n_buckets)
74074ed7ab9SJoe Stringer 		new_ti = table_instance_expand(ti, false);
741618ed0c8SPravin B Shelar 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
74274ed7ab9SJoe Stringer 		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
743618ed0c8SPravin B Shelar 
744618ed0c8SPravin B Shelar 	if (new_ti) {
745618ed0c8SPravin B Shelar 		rcu_assign_pointer(table->ti, new_ti);
74674ed7ab9SJoe Stringer 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
747618ed0c8SPravin B Shelar 		table->last_rehash = jiffies;
748618ed0c8SPravin B Shelar 	}
749d29ab6f8SJoe Stringer }
750d29ab6f8SJoe Stringer 
751d29ab6f8SJoe Stringer /* Must be called with OVS mutex held. */
75274ed7ab9SJoe Stringer static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
75374ed7ab9SJoe Stringer {
75474ed7ab9SJoe Stringer 	struct table_instance *ti;
75574ed7ab9SJoe Stringer 
75674ed7ab9SJoe Stringer 	flow->ufid_table.hash = ufid_hash(&flow->id);
75774ed7ab9SJoe Stringer 	ti = ovsl_dereference(table->ufid_ti);
75874ed7ab9SJoe Stringer 	ufid_table_instance_insert(ti, flow);
75974ed7ab9SJoe Stringer 	table->ufid_count++;
76074ed7ab9SJoe Stringer 
76174ed7ab9SJoe Stringer 	/* Expand table, if necessary, to make room. */
76274ed7ab9SJoe Stringer 	if (table->ufid_count > ti->n_buckets) {
76374ed7ab9SJoe Stringer 		struct table_instance *new_ti;
76474ed7ab9SJoe Stringer 
76574ed7ab9SJoe Stringer 		new_ti = table_instance_expand(ti, true);
76674ed7ab9SJoe Stringer 		if (new_ti) {
76774ed7ab9SJoe Stringer 			rcu_assign_pointer(table->ufid_ti, new_ti);
76874ed7ab9SJoe Stringer 			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
76974ed7ab9SJoe Stringer 		}
77074ed7ab9SJoe Stringer 	}
77174ed7ab9SJoe Stringer }
77274ed7ab9SJoe Stringer 
77374ed7ab9SJoe Stringer /* Must be called with OVS mutex held. */
774d29ab6f8SJoe Stringer int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
775d29ab6f8SJoe Stringer 			const struct sw_flow_mask *mask)
776d29ab6f8SJoe Stringer {
777d29ab6f8SJoe Stringer 	int err;
778d29ab6f8SJoe Stringer 
779d29ab6f8SJoe Stringer 	err = flow_mask_insert(table, flow, mask);
780d29ab6f8SJoe Stringer 	if (err)
781d29ab6f8SJoe Stringer 		return err;
782d29ab6f8SJoe Stringer 	flow_key_insert(table, flow);
78374ed7ab9SJoe Stringer 	if (ovs_identifier_is_ufid(&flow->id))
78474ed7ab9SJoe Stringer 		flow_ufid_insert(table, flow);
785d29ab6f8SJoe Stringer 
786618ed0c8SPravin B Shelar 	return 0;
787618ed0c8SPravin B Shelar }
788618ed0c8SPravin B Shelar 
789e6445719SPravin B Shelar /* Initializes the flow module.
790e6445719SPravin B Shelar  * Returns zero if successful or a negative error code. */
791e6445719SPravin B Shelar int ovs_flow_init(void)
792e6445719SPravin B Shelar {
793e6445719SPravin B Shelar 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
794e6445719SPravin B Shelar 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
795e6445719SPravin B Shelar 
79663e7959cSJarno Rajahalme 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
797db74a333SThadeu Lima de Souza Cascardo 				       + (nr_cpu_ids
798aef833c5SPablo Neira Ayuso 					  * sizeof(struct sw_flow_stats *)),
79963e7959cSJarno Rajahalme 				       0, 0, NULL);
800e6445719SPravin B Shelar 	if (flow_cache == NULL)
801e6445719SPravin B Shelar 		return -ENOMEM;
802e6445719SPravin B Shelar 
80363e7959cSJarno Rajahalme 	flow_stats_cache
804aef833c5SPablo Neira Ayuso 		= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
80563e7959cSJarno Rajahalme 				    0, SLAB_HWCACHE_ALIGN, NULL);
80663e7959cSJarno Rajahalme 	if (flow_stats_cache == NULL) {
80763e7959cSJarno Rajahalme 		kmem_cache_destroy(flow_cache);
80863e7959cSJarno Rajahalme 		flow_cache = NULL;
80963e7959cSJarno Rajahalme 		return -ENOMEM;
81063e7959cSJarno Rajahalme 	}
81163e7959cSJarno Rajahalme 
812e6445719SPravin B Shelar 	return 0;
813e6445719SPravin B Shelar }
814e6445719SPravin B Shelar 
815e6445719SPravin B Shelar /* Uninitializes the flow module. */
816e6445719SPravin B Shelar void ovs_flow_exit(void)
817e6445719SPravin B Shelar {
81863e7959cSJarno Rajahalme 	kmem_cache_destroy(flow_stats_cache);
819e6445719SPravin B Shelar 	kmem_cache_destroy(flow_cache);
820e6445719SPravin B Shelar }
821