xref: /openbmc/linux/net/openvswitch/datapath.c (revision d4fd6347)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53 
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "meter.h"
59 #include "vport-internal_dev.h"
60 #include "vport-netdev.h"
61 
62 unsigned int ovs_net_id __read_mostly;
63 
64 static struct genl_family dp_packet_genl_family;
65 static struct genl_family dp_flow_genl_family;
66 static struct genl_family dp_datapath_genl_family;
67 
68 static const struct nla_policy flow_policy[];
69 
70 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
71 	.name = OVS_FLOW_MCGROUP,
72 };
73 
74 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
75 	.name = OVS_DATAPATH_MCGROUP,
76 };
77 
78 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
79 	.name = OVS_VPORT_MCGROUP,
80 };
81 
82 /* Check if need to build a reply message.
83  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
84 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
85 			    unsigned int group)
86 {
87 	return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
88 	       genl_has_listeners(family, genl_info_net(info), group);
89 }
90 
91 static void ovs_notify(struct genl_family *family,
92 		       struct sk_buff *skb, struct genl_info *info)
93 {
94 	genl_notify(family, skb, info, 0, GFP_KERNEL);
95 }
96 
97 /**
98  * DOC: Locking:
99  *
100  * All writes e.g. Writes to device state (add/remove datapath, port, set
101  * operations on vports, etc.), Writes to other state (flow table
102  * modifications, set miscellaneous datapath parameters, etc.) are protected
103  * by ovs_lock.
104  *
105  * Reads are protected by RCU.
106  *
107  * There are a few special cases (mostly stats) that have their own
108  * synchronization but they nest under all of above and don't interact with
109  * each other.
110  *
111  * The RTNL lock nests inside ovs_mutex.
112  */
113 
114 static DEFINE_MUTEX(ovs_mutex);
115 
116 void ovs_lock(void)
117 {
118 	mutex_lock(&ovs_mutex);
119 }
120 
121 void ovs_unlock(void)
122 {
123 	mutex_unlock(&ovs_mutex);
124 }
125 
126 #ifdef CONFIG_LOCKDEP
127 int lockdep_ovsl_is_held(void)
128 {
129 	if (debug_locks)
130 		return lockdep_is_held(&ovs_mutex);
131 	else
132 		return 1;
133 }
134 #endif
135 
136 static struct vport *new_vport(const struct vport_parms *);
137 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
138 			     const struct sw_flow_key *,
139 			     const struct dp_upcall_info *,
140 			     uint32_t cutlen);
141 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
142 				  const struct sw_flow_key *,
143 				  const struct dp_upcall_info *,
144 				  uint32_t cutlen);
145 
146 /* Must be called with rcu_read_lock or ovs_mutex. */
147 const char *ovs_dp_name(const struct datapath *dp)
148 {
149 	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
150 	return ovs_vport_name(vport);
151 }
152 
153 static int get_dpifindex(const struct datapath *dp)
154 {
155 	struct vport *local;
156 	int ifindex;
157 
158 	rcu_read_lock();
159 
160 	local = ovs_vport_rcu(dp, OVSP_LOCAL);
161 	if (local)
162 		ifindex = local->dev->ifindex;
163 	else
164 		ifindex = 0;
165 
166 	rcu_read_unlock();
167 
168 	return ifindex;
169 }
170 
171 static void destroy_dp_rcu(struct rcu_head *rcu)
172 {
173 	struct datapath *dp = container_of(rcu, struct datapath, rcu);
174 
175 	ovs_flow_tbl_destroy(&dp->table);
176 	free_percpu(dp->stats_percpu);
177 	kfree(dp->ports);
178 	ovs_meters_exit(dp);
179 	kfree(dp);
180 }
181 
182 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
183 					    u16 port_no)
184 {
185 	return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
186 }
187 
188 /* Called with ovs_mutex or RCU read lock. */
189 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
190 {
191 	struct vport *vport;
192 	struct hlist_head *head;
193 
194 	head = vport_hash_bucket(dp, port_no);
195 	hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
196 		if (vport->port_no == port_no)
197 			return vport;
198 	}
199 	return NULL;
200 }
201 
202 /* Called with ovs_mutex. */
203 static struct vport *new_vport(const struct vport_parms *parms)
204 {
205 	struct vport *vport;
206 
207 	vport = ovs_vport_add(parms);
208 	if (!IS_ERR(vport)) {
209 		struct datapath *dp = parms->dp;
210 		struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
211 
212 		hlist_add_head_rcu(&vport->dp_hash_node, head);
213 	}
214 	return vport;
215 }
216 
217 void ovs_dp_detach_port(struct vport *p)
218 {
219 	ASSERT_OVSL();
220 
221 	/* First drop references to device. */
222 	hlist_del_rcu(&p->dp_hash_node);
223 
224 	/* Then destroy it. */
225 	ovs_vport_del(p);
226 }
227 
228 /* Must be called with rcu_read_lock. */
229 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
230 {
231 	const struct vport *p = OVS_CB(skb)->input_vport;
232 	struct datapath *dp = p->dp;
233 	struct sw_flow *flow;
234 	struct sw_flow_actions *sf_acts;
235 	struct dp_stats_percpu *stats;
236 	u64 *stats_counter;
237 	u32 n_mask_hit;
238 
239 	stats = this_cpu_ptr(dp->stats_percpu);
240 
241 	/* Look up flow. */
242 	flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
243 	if (unlikely(!flow)) {
244 		struct dp_upcall_info upcall;
245 		int error;
246 
247 		memset(&upcall, 0, sizeof(upcall));
248 		upcall.cmd = OVS_PACKET_CMD_MISS;
249 		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
250 		upcall.mru = OVS_CB(skb)->mru;
251 		error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
252 		if (unlikely(error))
253 			kfree_skb(skb);
254 		else
255 			consume_skb(skb);
256 		stats_counter = &stats->n_missed;
257 		goto out;
258 	}
259 
260 	ovs_flow_stats_update(flow, key->tp.flags, skb);
261 	sf_acts = rcu_dereference(flow->sf_acts);
262 	ovs_execute_actions(dp, skb, sf_acts, key);
263 
264 	stats_counter = &stats->n_hit;
265 
266 out:
267 	/* Update datapath statistics. */
268 	u64_stats_update_begin(&stats->syncp);
269 	(*stats_counter)++;
270 	stats->n_mask_hit += n_mask_hit;
271 	u64_stats_update_end(&stats->syncp);
272 }
273 
274 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
275 		  const struct sw_flow_key *key,
276 		  const struct dp_upcall_info *upcall_info,
277 		  uint32_t cutlen)
278 {
279 	struct dp_stats_percpu *stats;
280 	int err;
281 
282 	if (upcall_info->portid == 0) {
283 		err = -ENOTCONN;
284 		goto err;
285 	}
286 
287 	if (!skb_is_gso(skb))
288 		err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
289 	else
290 		err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
291 	if (err)
292 		goto err;
293 
294 	return 0;
295 
296 err:
297 	stats = this_cpu_ptr(dp->stats_percpu);
298 
299 	u64_stats_update_begin(&stats->syncp);
300 	stats->n_lost++;
301 	u64_stats_update_end(&stats->syncp);
302 
303 	return err;
304 }
305 
306 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
307 			     const struct sw_flow_key *key,
308 			     const struct dp_upcall_info *upcall_info,
309 				 uint32_t cutlen)
310 {
311 	unsigned int gso_type = skb_shinfo(skb)->gso_type;
312 	struct sw_flow_key later_key;
313 	struct sk_buff *segs, *nskb;
314 	int err;
315 
316 	BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
317 	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
318 	if (IS_ERR(segs))
319 		return PTR_ERR(segs);
320 	if (segs == NULL)
321 		return -EINVAL;
322 
323 	if (gso_type & SKB_GSO_UDP) {
324 		/* The initial flow key extracted by ovs_flow_key_extract()
325 		 * in this case is for a first fragment, so we need to
326 		 * properly mark later fragments.
327 		 */
328 		later_key = *key;
329 		later_key.ip.frag = OVS_FRAG_TYPE_LATER;
330 	}
331 
332 	/* Queue all of the segments. */
333 	skb = segs;
334 	do {
335 		if (gso_type & SKB_GSO_UDP && skb != segs)
336 			key = &later_key;
337 
338 		err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
339 		if (err)
340 			break;
341 
342 	} while ((skb = skb->next));
343 
344 	/* Free all of the segments. */
345 	skb = segs;
346 	do {
347 		nskb = skb->next;
348 		if (err)
349 			kfree_skb(skb);
350 		else
351 			consume_skb(skb);
352 	} while ((skb = nskb));
353 	return err;
354 }
355 
356 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
357 			      unsigned int hdrlen, int actions_attrlen)
358 {
359 	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
360 		+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
361 		+ nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
362 		+ nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
363 
364 	/* OVS_PACKET_ATTR_USERDATA */
365 	if (upcall_info->userdata)
366 		size += NLA_ALIGN(upcall_info->userdata->nla_len);
367 
368 	/* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
369 	if (upcall_info->egress_tun_info)
370 		size += nla_total_size(ovs_tun_key_attr_size());
371 
372 	/* OVS_PACKET_ATTR_ACTIONS */
373 	if (upcall_info->actions_len)
374 		size += nla_total_size(actions_attrlen);
375 
376 	/* OVS_PACKET_ATTR_MRU */
377 	if (upcall_info->mru)
378 		size += nla_total_size(sizeof(upcall_info->mru));
379 
380 	return size;
381 }
382 
383 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
384 {
385 	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
386 		size_t plen = NLA_ALIGN(skb->len) - skb->len;
387 
388 		if (plen > 0)
389 			skb_put_zero(skb, plen);
390 	}
391 }
392 
393 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
394 				  const struct sw_flow_key *key,
395 				  const struct dp_upcall_info *upcall_info,
396 				  uint32_t cutlen)
397 {
398 	struct ovs_header *upcall;
399 	struct sk_buff *nskb = NULL;
400 	struct sk_buff *user_skb = NULL; /* to be queued to userspace */
401 	struct nlattr *nla;
402 	size_t len;
403 	unsigned int hlen;
404 	int err, dp_ifindex;
405 
406 	dp_ifindex = get_dpifindex(dp);
407 	if (!dp_ifindex)
408 		return -ENODEV;
409 
410 	if (skb_vlan_tag_present(skb)) {
411 		nskb = skb_clone(skb, GFP_ATOMIC);
412 		if (!nskb)
413 			return -ENOMEM;
414 
415 		nskb = __vlan_hwaccel_push_inside(nskb);
416 		if (!nskb)
417 			return -ENOMEM;
418 
419 		skb = nskb;
420 	}
421 
422 	if (nla_attr_size(skb->len) > USHRT_MAX) {
423 		err = -EFBIG;
424 		goto out;
425 	}
426 
427 	/* Complete checksum if needed */
428 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
429 	    (err = skb_csum_hwoffload_help(skb, 0)))
430 		goto out;
431 
432 	/* Older versions of OVS user space enforce alignment of the last
433 	 * Netlink attribute to NLA_ALIGNTO which would require extensive
434 	 * padding logic. Only perform zerocopy if padding is not required.
435 	 */
436 	if (dp->user_features & OVS_DP_F_UNALIGNED)
437 		hlen = skb_zerocopy_headlen(skb);
438 	else
439 		hlen = skb->len;
440 
441 	len = upcall_msg_size(upcall_info, hlen - cutlen,
442 			      OVS_CB(skb)->acts_origlen);
443 	user_skb = genlmsg_new(len, GFP_ATOMIC);
444 	if (!user_skb) {
445 		err = -ENOMEM;
446 		goto out;
447 	}
448 
449 	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
450 			     0, upcall_info->cmd);
451 	if (!upcall) {
452 		err = -EINVAL;
453 		goto out;
454 	}
455 	upcall->dp_ifindex = dp_ifindex;
456 
457 	err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
458 	if (err)
459 		goto out;
460 
461 	if (upcall_info->userdata)
462 		__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
463 			  nla_len(upcall_info->userdata),
464 			  nla_data(upcall_info->userdata));
465 
466 	if (upcall_info->egress_tun_info) {
467 		nla = nla_nest_start_noflag(user_skb,
468 					    OVS_PACKET_ATTR_EGRESS_TUN_KEY);
469 		if (!nla) {
470 			err = -EMSGSIZE;
471 			goto out;
472 		}
473 		err = ovs_nla_put_tunnel_info(user_skb,
474 					      upcall_info->egress_tun_info);
475 		if (err)
476 			goto out;
477 
478 		nla_nest_end(user_skb, nla);
479 	}
480 
481 	if (upcall_info->actions_len) {
482 		nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS);
483 		if (!nla) {
484 			err = -EMSGSIZE;
485 			goto out;
486 		}
487 		err = ovs_nla_put_actions(upcall_info->actions,
488 					  upcall_info->actions_len,
489 					  user_skb);
490 		if (!err)
491 			nla_nest_end(user_skb, nla);
492 		else
493 			nla_nest_cancel(user_skb, nla);
494 	}
495 
496 	/* Add OVS_PACKET_ATTR_MRU */
497 	if (upcall_info->mru) {
498 		if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
499 				upcall_info->mru)) {
500 			err = -ENOBUFS;
501 			goto out;
502 		}
503 		pad_packet(dp, user_skb);
504 	}
505 
506 	/* Add OVS_PACKET_ATTR_LEN when packet is truncated */
507 	if (cutlen > 0) {
508 		if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
509 				skb->len)) {
510 			err = -ENOBUFS;
511 			goto out;
512 		}
513 		pad_packet(dp, user_skb);
514 	}
515 
516 	/* Only reserve room for attribute header, packet data is added
517 	 * in skb_zerocopy() */
518 	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
519 		err = -ENOBUFS;
520 		goto out;
521 	}
522 	nla->nla_len = nla_attr_size(skb->len - cutlen);
523 
524 	err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
525 	if (err)
526 		goto out;
527 
528 	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
529 	pad_packet(dp, user_skb);
530 
531 	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
532 
533 	err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
534 	user_skb = NULL;
535 out:
536 	if (err)
537 		skb_tx_error(skb);
538 	kfree_skb(user_skb);
539 	kfree_skb(nskb);
540 	return err;
541 }
542 
543 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
544 {
545 	struct ovs_header *ovs_header = info->userhdr;
546 	struct net *net = sock_net(skb->sk);
547 	struct nlattr **a = info->attrs;
548 	struct sw_flow_actions *acts;
549 	struct sk_buff *packet;
550 	struct sw_flow *flow;
551 	struct sw_flow_actions *sf_acts;
552 	struct datapath *dp;
553 	struct vport *input_vport;
554 	u16 mru = 0;
555 	int len;
556 	int err;
557 	bool log = !a[OVS_PACKET_ATTR_PROBE];
558 
559 	err = -EINVAL;
560 	if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
561 	    !a[OVS_PACKET_ATTR_ACTIONS])
562 		goto err;
563 
564 	len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
565 	packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
566 	err = -ENOMEM;
567 	if (!packet)
568 		goto err;
569 	skb_reserve(packet, NET_IP_ALIGN);
570 
571 	nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
572 
573 	/* Set packet's mru */
574 	if (a[OVS_PACKET_ATTR_MRU]) {
575 		mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
576 		packet->ignore_df = 1;
577 	}
578 	OVS_CB(packet)->mru = mru;
579 
580 	/* Build an sw_flow for sending this packet. */
581 	flow = ovs_flow_alloc();
582 	err = PTR_ERR(flow);
583 	if (IS_ERR(flow))
584 		goto err_kfree_skb;
585 
586 	err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
587 					     packet, &flow->key, log);
588 	if (err)
589 		goto err_flow_free;
590 
591 	err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
592 				   &flow->key, &acts, log);
593 	if (err)
594 		goto err_flow_free;
595 
596 	rcu_assign_pointer(flow->sf_acts, acts);
597 	packet->priority = flow->key.phy.priority;
598 	packet->mark = flow->key.phy.skb_mark;
599 
600 	rcu_read_lock();
601 	dp = get_dp_rcu(net, ovs_header->dp_ifindex);
602 	err = -ENODEV;
603 	if (!dp)
604 		goto err_unlock;
605 
606 	input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
607 	if (!input_vport)
608 		input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
609 
610 	if (!input_vport)
611 		goto err_unlock;
612 
613 	packet->dev = input_vport->dev;
614 	OVS_CB(packet)->input_vport = input_vport;
615 	sf_acts = rcu_dereference(flow->sf_acts);
616 
617 	local_bh_disable();
618 	err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
619 	local_bh_enable();
620 	rcu_read_unlock();
621 
622 	ovs_flow_free(flow, false);
623 	return err;
624 
625 err_unlock:
626 	rcu_read_unlock();
627 err_flow_free:
628 	ovs_flow_free(flow, false);
629 err_kfree_skb:
630 	kfree_skb(packet);
631 err:
632 	return err;
633 }
634 
635 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
636 	[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
637 	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
638 	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
639 	[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
640 	[OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
641 };
642 
643 static const struct genl_ops dp_packet_genl_ops[] = {
644 	{ .cmd = OVS_PACKET_CMD_EXECUTE,
645 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
646 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
647 	  .doit = ovs_packet_cmd_execute
648 	}
649 };
650 
651 static struct genl_family dp_packet_genl_family __ro_after_init = {
652 	.hdrsize = sizeof(struct ovs_header),
653 	.name = OVS_PACKET_FAMILY,
654 	.version = OVS_PACKET_VERSION,
655 	.maxattr = OVS_PACKET_ATTR_MAX,
656 	.policy = packet_policy,
657 	.netnsok = true,
658 	.parallel_ops = true,
659 	.ops = dp_packet_genl_ops,
660 	.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
661 	.module = THIS_MODULE,
662 };
663 
664 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
665 			 struct ovs_dp_megaflow_stats *mega_stats)
666 {
667 	int i;
668 
669 	memset(mega_stats, 0, sizeof(*mega_stats));
670 
671 	stats->n_flows = ovs_flow_tbl_count(&dp->table);
672 	mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
673 
674 	stats->n_hit = stats->n_missed = stats->n_lost = 0;
675 
676 	for_each_possible_cpu(i) {
677 		const struct dp_stats_percpu *percpu_stats;
678 		struct dp_stats_percpu local_stats;
679 		unsigned int start;
680 
681 		percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
682 
683 		do {
684 			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
685 			local_stats = *percpu_stats;
686 		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
687 
688 		stats->n_hit += local_stats.n_hit;
689 		stats->n_missed += local_stats.n_missed;
690 		stats->n_lost += local_stats.n_lost;
691 		mega_stats->n_mask_hit += local_stats.n_mask_hit;
692 	}
693 }
694 
695 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
696 {
697 	return ovs_identifier_is_ufid(sfid) &&
698 	       !(ufid_flags & OVS_UFID_F_OMIT_KEY);
699 }
700 
701 static bool should_fill_mask(uint32_t ufid_flags)
702 {
703 	return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
704 }
705 
706 static bool should_fill_actions(uint32_t ufid_flags)
707 {
708 	return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
709 }
710 
711 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
712 				    const struct sw_flow_id *sfid,
713 				    uint32_t ufid_flags)
714 {
715 	size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
716 
717 	/* OVS_FLOW_ATTR_UFID */
718 	if (sfid && ovs_identifier_is_ufid(sfid))
719 		len += nla_total_size(sfid->ufid_len);
720 
721 	/* OVS_FLOW_ATTR_KEY */
722 	if (!sfid || should_fill_key(sfid, ufid_flags))
723 		len += nla_total_size(ovs_key_attr_size());
724 
725 	/* OVS_FLOW_ATTR_MASK */
726 	if (should_fill_mask(ufid_flags))
727 		len += nla_total_size(ovs_key_attr_size());
728 
729 	/* OVS_FLOW_ATTR_ACTIONS */
730 	if (should_fill_actions(ufid_flags))
731 		len += nla_total_size(acts->orig_len);
732 
733 	return len
734 		+ nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
735 		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
736 		+ nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
737 }
738 
739 /* Called with ovs_mutex or RCU read lock. */
740 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
741 				   struct sk_buff *skb)
742 {
743 	struct ovs_flow_stats stats;
744 	__be16 tcp_flags;
745 	unsigned long used;
746 
747 	ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
748 
749 	if (used &&
750 	    nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
751 			      OVS_FLOW_ATTR_PAD))
752 		return -EMSGSIZE;
753 
754 	if (stats.n_packets &&
755 	    nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
756 			  sizeof(struct ovs_flow_stats), &stats,
757 			  OVS_FLOW_ATTR_PAD))
758 		return -EMSGSIZE;
759 
760 	if ((u8)ntohs(tcp_flags) &&
761 	     nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
762 		return -EMSGSIZE;
763 
764 	return 0;
765 }
766 
767 /* Called with ovs_mutex or RCU read lock. */
768 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
769 				     struct sk_buff *skb, int skb_orig_len)
770 {
771 	struct nlattr *start;
772 	int err;
773 
774 	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
775 	 * this is the first flow to be dumped into 'skb'.  This is unusual for
776 	 * Netlink but individual action lists can be longer than
777 	 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
778 	 * The userspace caller can always fetch the actions separately if it
779 	 * really wants them.  (Most userspace callers in fact don't care.)
780 	 *
781 	 * This can only fail for dump operations because the skb is always
782 	 * properly sized for single flows.
783 	 */
784 	start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
785 	if (start) {
786 		const struct sw_flow_actions *sf_acts;
787 
788 		sf_acts = rcu_dereference_ovsl(flow->sf_acts);
789 		err = ovs_nla_put_actions(sf_acts->actions,
790 					  sf_acts->actions_len, skb);
791 
792 		if (!err)
793 			nla_nest_end(skb, start);
794 		else {
795 			if (skb_orig_len)
796 				return err;
797 
798 			nla_nest_cancel(skb, start);
799 		}
800 	} else if (skb_orig_len) {
801 		return -EMSGSIZE;
802 	}
803 
804 	return 0;
805 }
806 
807 /* Called with ovs_mutex or RCU read lock. */
808 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
809 				  struct sk_buff *skb, u32 portid,
810 				  u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
811 {
812 	const int skb_orig_len = skb->len;
813 	struct ovs_header *ovs_header;
814 	int err;
815 
816 	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
817 				 flags, cmd);
818 	if (!ovs_header)
819 		return -EMSGSIZE;
820 
821 	ovs_header->dp_ifindex = dp_ifindex;
822 
823 	err = ovs_nla_put_identifier(flow, skb);
824 	if (err)
825 		goto error;
826 
827 	if (should_fill_key(&flow->id, ufid_flags)) {
828 		err = ovs_nla_put_masked_key(flow, skb);
829 		if (err)
830 			goto error;
831 	}
832 
833 	if (should_fill_mask(ufid_flags)) {
834 		err = ovs_nla_put_mask(flow, skb);
835 		if (err)
836 			goto error;
837 	}
838 
839 	err = ovs_flow_cmd_fill_stats(flow, skb);
840 	if (err)
841 		goto error;
842 
843 	if (should_fill_actions(ufid_flags)) {
844 		err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
845 		if (err)
846 			goto error;
847 	}
848 
849 	genlmsg_end(skb, ovs_header);
850 	return 0;
851 
852 error:
853 	genlmsg_cancel(skb, ovs_header);
854 	return err;
855 }
856 
857 /* May not be called with RCU read lock. */
858 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
859 					       const struct sw_flow_id *sfid,
860 					       struct genl_info *info,
861 					       bool always,
862 					       uint32_t ufid_flags)
863 {
864 	struct sk_buff *skb;
865 	size_t len;
866 
867 	if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
868 		return NULL;
869 
870 	len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
871 	skb = genlmsg_new(len, GFP_KERNEL);
872 	if (!skb)
873 		return ERR_PTR(-ENOMEM);
874 
875 	return skb;
876 }
877 
878 /* Called with ovs_mutex. */
879 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
880 					       int dp_ifindex,
881 					       struct genl_info *info, u8 cmd,
882 					       bool always, u32 ufid_flags)
883 {
884 	struct sk_buff *skb;
885 	int retval;
886 
887 	skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
888 				      &flow->id, info, always, ufid_flags);
889 	if (IS_ERR_OR_NULL(skb))
890 		return skb;
891 
892 	retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
893 					info->snd_portid, info->snd_seq, 0,
894 					cmd, ufid_flags);
895 	BUG_ON(retval < 0);
896 	return skb;
897 }
898 
899 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
900 {
901 	struct net *net = sock_net(skb->sk);
902 	struct nlattr **a = info->attrs;
903 	struct ovs_header *ovs_header = info->userhdr;
904 	struct sw_flow *flow = NULL, *new_flow;
905 	struct sw_flow_mask mask;
906 	struct sk_buff *reply;
907 	struct datapath *dp;
908 	struct sw_flow_actions *acts;
909 	struct sw_flow_match match;
910 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
911 	int error;
912 	bool log = !a[OVS_FLOW_ATTR_PROBE];
913 
914 	/* Must have key and actions. */
915 	error = -EINVAL;
916 	if (!a[OVS_FLOW_ATTR_KEY]) {
917 		OVS_NLERR(log, "Flow key attr not present in new flow.");
918 		goto error;
919 	}
920 	if (!a[OVS_FLOW_ATTR_ACTIONS]) {
921 		OVS_NLERR(log, "Flow actions attr not present in new flow.");
922 		goto error;
923 	}
924 
925 	/* Most of the time we need to allocate a new flow, do it before
926 	 * locking.
927 	 */
928 	new_flow = ovs_flow_alloc();
929 	if (IS_ERR(new_flow)) {
930 		error = PTR_ERR(new_flow);
931 		goto error;
932 	}
933 
934 	/* Extract key. */
935 	ovs_match_init(&match, &new_flow->key, false, &mask);
936 	error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
937 				  a[OVS_FLOW_ATTR_MASK], log);
938 	if (error)
939 		goto err_kfree_flow;
940 
941 	/* Extract flow identifier. */
942 	error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
943 				       &new_flow->key, log);
944 	if (error)
945 		goto err_kfree_flow;
946 
947 	/* unmasked key is needed to match when ufid is not used. */
948 	if (ovs_identifier_is_key(&new_flow->id))
949 		match.key = new_flow->id.unmasked_key;
950 
951 	ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
952 
953 	/* Validate actions. */
954 	error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
955 				     &new_flow->key, &acts, log);
956 	if (error) {
957 		OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
958 		goto err_kfree_flow;
959 	}
960 
961 	reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
962 					ufid_flags);
963 	if (IS_ERR(reply)) {
964 		error = PTR_ERR(reply);
965 		goto err_kfree_acts;
966 	}
967 
968 	ovs_lock();
969 	dp = get_dp(net, ovs_header->dp_ifindex);
970 	if (unlikely(!dp)) {
971 		error = -ENODEV;
972 		goto err_unlock_ovs;
973 	}
974 
975 	/* Check if this is a duplicate flow */
976 	if (ovs_identifier_is_ufid(&new_flow->id))
977 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
978 	if (!flow)
979 		flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
980 	if (likely(!flow)) {
981 		rcu_assign_pointer(new_flow->sf_acts, acts);
982 
983 		/* Put flow in bucket. */
984 		error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
985 		if (unlikely(error)) {
986 			acts = NULL;
987 			goto err_unlock_ovs;
988 		}
989 
990 		if (unlikely(reply)) {
991 			error = ovs_flow_cmd_fill_info(new_flow,
992 						       ovs_header->dp_ifindex,
993 						       reply, info->snd_portid,
994 						       info->snd_seq, 0,
995 						       OVS_FLOW_CMD_NEW,
996 						       ufid_flags);
997 			BUG_ON(error < 0);
998 		}
999 		ovs_unlock();
1000 	} else {
1001 		struct sw_flow_actions *old_acts;
1002 
1003 		/* Bail out if we're not allowed to modify an existing flow.
1004 		 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1005 		 * because Generic Netlink treats the latter as a dump
1006 		 * request.  We also accept NLM_F_EXCL in case that bug ever
1007 		 * gets fixed.
1008 		 */
1009 		if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1010 							 | NLM_F_EXCL))) {
1011 			error = -EEXIST;
1012 			goto err_unlock_ovs;
1013 		}
1014 		/* The flow identifier has to be the same for flow updates.
1015 		 * Look for any overlapping flow.
1016 		 */
1017 		if (unlikely(!ovs_flow_cmp(flow, &match))) {
1018 			if (ovs_identifier_is_key(&flow->id))
1019 				flow = ovs_flow_tbl_lookup_exact(&dp->table,
1020 								 &match);
1021 			else /* UFID matches but key is different */
1022 				flow = NULL;
1023 			if (!flow) {
1024 				error = -ENOENT;
1025 				goto err_unlock_ovs;
1026 			}
1027 		}
1028 		/* Update actions. */
1029 		old_acts = ovsl_dereference(flow->sf_acts);
1030 		rcu_assign_pointer(flow->sf_acts, acts);
1031 
1032 		if (unlikely(reply)) {
1033 			error = ovs_flow_cmd_fill_info(flow,
1034 						       ovs_header->dp_ifindex,
1035 						       reply, info->snd_portid,
1036 						       info->snd_seq, 0,
1037 						       OVS_FLOW_CMD_NEW,
1038 						       ufid_flags);
1039 			BUG_ON(error < 0);
1040 		}
1041 		ovs_unlock();
1042 
1043 		ovs_nla_free_flow_actions_rcu(old_acts);
1044 		ovs_flow_free(new_flow, false);
1045 	}
1046 
1047 	if (reply)
1048 		ovs_notify(&dp_flow_genl_family, reply, info);
1049 	return 0;
1050 
1051 err_unlock_ovs:
1052 	ovs_unlock();
1053 	kfree_skb(reply);
1054 err_kfree_acts:
1055 	ovs_nla_free_flow_actions(acts);
1056 err_kfree_flow:
1057 	ovs_flow_free(new_flow, false);
1058 error:
1059 	return error;
1060 }
1061 
1062 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1063 static struct sw_flow_actions *get_flow_actions(struct net *net,
1064 						const struct nlattr *a,
1065 						const struct sw_flow_key *key,
1066 						const struct sw_flow_mask *mask,
1067 						bool log)
1068 {
1069 	struct sw_flow_actions *acts;
1070 	struct sw_flow_key masked_key;
1071 	int error;
1072 
1073 	ovs_flow_mask_key(&masked_key, key, true, mask);
1074 	error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1075 	if (error) {
1076 		OVS_NLERR(log,
1077 			  "Actions may not be safe on all matching packets");
1078 		return ERR_PTR(error);
1079 	}
1080 
1081 	return acts;
1082 }
1083 
1084 /* Factor out match-init and action-copy to avoid
1085  * "Wframe-larger-than=1024" warning. Because mask is only
1086  * used to get actions, we new a function to save some
1087  * stack space.
1088  *
1089  * If there are not key and action attrs, we return 0
1090  * directly. In the case, the caller will also not use the
1091  * match as before. If there is action attr, we try to get
1092  * actions and save them to *acts. Before returning from
1093  * the function, we reset the match->mask pointer. Because
1094  * we should not to return match object with dangling reference
1095  * to mask.
1096  * */
1097 static int ovs_nla_init_match_and_action(struct net *net,
1098 					 struct sw_flow_match *match,
1099 					 struct sw_flow_key *key,
1100 					 struct nlattr **a,
1101 					 struct sw_flow_actions **acts,
1102 					 bool log)
1103 {
1104 	struct sw_flow_mask mask;
1105 	int error = 0;
1106 
1107 	if (a[OVS_FLOW_ATTR_KEY]) {
1108 		ovs_match_init(match, key, true, &mask);
1109 		error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1110 					  a[OVS_FLOW_ATTR_MASK], log);
1111 		if (error)
1112 			goto error;
1113 	}
1114 
1115 	if (a[OVS_FLOW_ATTR_ACTIONS]) {
1116 		if (!a[OVS_FLOW_ATTR_KEY]) {
1117 			OVS_NLERR(log,
1118 				  "Flow key attribute not present in set flow.");
1119 			error = -EINVAL;
1120 			goto error;
1121 		}
1122 
1123 		*acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1124 					 &mask, log);
1125 		if (IS_ERR(*acts)) {
1126 			error = PTR_ERR(*acts);
1127 			goto error;
1128 		}
1129 	}
1130 
1131 	/* On success, error is 0. */
1132 error:
1133 	match->mask = NULL;
1134 	return error;
1135 }
1136 
1137 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1138 {
1139 	struct net *net = sock_net(skb->sk);
1140 	struct nlattr **a = info->attrs;
1141 	struct ovs_header *ovs_header = info->userhdr;
1142 	struct sw_flow_key key;
1143 	struct sw_flow *flow;
1144 	struct sk_buff *reply = NULL;
1145 	struct datapath *dp;
1146 	struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1147 	struct sw_flow_match match;
1148 	struct sw_flow_id sfid;
1149 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1150 	int error = 0;
1151 	bool log = !a[OVS_FLOW_ATTR_PROBE];
1152 	bool ufid_present;
1153 
1154 	ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1155 	if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
1156 		OVS_NLERR(log,
1157 			  "Flow set message rejected, Key attribute missing.");
1158 		return -EINVAL;
1159 	}
1160 
1161 	error = ovs_nla_init_match_and_action(net, &match, &key, a,
1162 					      &acts, log);
1163 	if (error)
1164 		goto error;
1165 
1166 	if (acts) {
1167 		/* Can allocate before locking if have acts. */
1168 		reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1169 						ufid_flags);
1170 		if (IS_ERR(reply)) {
1171 			error = PTR_ERR(reply);
1172 			goto err_kfree_acts;
1173 		}
1174 	}
1175 
1176 	ovs_lock();
1177 	dp = get_dp(net, ovs_header->dp_ifindex);
1178 	if (unlikely(!dp)) {
1179 		error = -ENODEV;
1180 		goto err_unlock_ovs;
1181 	}
1182 	/* Check that the flow exists. */
1183 	if (ufid_present)
1184 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1185 	else
1186 		flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1187 	if (unlikely(!flow)) {
1188 		error = -ENOENT;
1189 		goto err_unlock_ovs;
1190 	}
1191 
1192 	/* Update actions, if present. */
1193 	if (likely(acts)) {
1194 		old_acts = ovsl_dereference(flow->sf_acts);
1195 		rcu_assign_pointer(flow->sf_acts, acts);
1196 
1197 		if (unlikely(reply)) {
1198 			error = ovs_flow_cmd_fill_info(flow,
1199 						       ovs_header->dp_ifindex,
1200 						       reply, info->snd_portid,
1201 						       info->snd_seq, 0,
1202 						       OVS_FLOW_CMD_SET,
1203 						       ufid_flags);
1204 			BUG_ON(error < 0);
1205 		}
1206 	} else {
1207 		/* Could not alloc without acts before locking. */
1208 		reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1209 						info, OVS_FLOW_CMD_SET, false,
1210 						ufid_flags);
1211 
1212 		if (IS_ERR(reply)) {
1213 			error = PTR_ERR(reply);
1214 			goto err_unlock_ovs;
1215 		}
1216 	}
1217 
1218 	/* Clear stats. */
1219 	if (a[OVS_FLOW_ATTR_CLEAR])
1220 		ovs_flow_stats_clear(flow);
1221 	ovs_unlock();
1222 
1223 	if (reply)
1224 		ovs_notify(&dp_flow_genl_family, reply, info);
1225 	if (old_acts)
1226 		ovs_nla_free_flow_actions_rcu(old_acts);
1227 
1228 	return 0;
1229 
1230 err_unlock_ovs:
1231 	ovs_unlock();
1232 	kfree_skb(reply);
1233 err_kfree_acts:
1234 	ovs_nla_free_flow_actions(acts);
1235 error:
1236 	return error;
1237 }
1238 
1239 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1240 {
1241 	struct nlattr **a = info->attrs;
1242 	struct ovs_header *ovs_header = info->userhdr;
1243 	struct net *net = sock_net(skb->sk);
1244 	struct sw_flow_key key;
1245 	struct sk_buff *reply;
1246 	struct sw_flow *flow;
1247 	struct datapath *dp;
1248 	struct sw_flow_match match;
1249 	struct sw_flow_id ufid;
1250 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1251 	int err = 0;
1252 	bool log = !a[OVS_FLOW_ATTR_PROBE];
1253 	bool ufid_present;
1254 
1255 	ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1256 	if (a[OVS_FLOW_ATTR_KEY]) {
1257 		ovs_match_init(&match, &key, true, NULL);
1258 		err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1259 					log);
1260 	} else if (!ufid_present) {
1261 		OVS_NLERR(log,
1262 			  "Flow get message rejected, Key attribute missing.");
1263 		err = -EINVAL;
1264 	}
1265 	if (err)
1266 		return err;
1267 
1268 	ovs_lock();
1269 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1270 	if (!dp) {
1271 		err = -ENODEV;
1272 		goto unlock;
1273 	}
1274 
1275 	if (ufid_present)
1276 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1277 	else
1278 		flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1279 	if (!flow) {
1280 		err = -ENOENT;
1281 		goto unlock;
1282 	}
1283 
1284 	reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1285 					OVS_FLOW_CMD_GET, true, ufid_flags);
1286 	if (IS_ERR(reply)) {
1287 		err = PTR_ERR(reply);
1288 		goto unlock;
1289 	}
1290 
1291 	ovs_unlock();
1292 	return genlmsg_reply(reply, info);
1293 unlock:
1294 	ovs_unlock();
1295 	return err;
1296 }
1297 
1298 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1299 {
1300 	struct nlattr **a = info->attrs;
1301 	struct ovs_header *ovs_header = info->userhdr;
1302 	struct net *net = sock_net(skb->sk);
1303 	struct sw_flow_key key;
1304 	struct sk_buff *reply;
1305 	struct sw_flow *flow = NULL;
1306 	struct datapath *dp;
1307 	struct sw_flow_match match;
1308 	struct sw_flow_id ufid;
1309 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1310 	int err;
1311 	bool log = !a[OVS_FLOW_ATTR_PROBE];
1312 	bool ufid_present;
1313 
1314 	ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1315 	if (a[OVS_FLOW_ATTR_KEY]) {
1316 		ovs_match_init(&match, &key, true, NULL);
1317 		err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1318 					NULL, log);
1319 		if (unlikely(err))
1320 			return err;
1321 	}
1322 
1323 	ovs_lock();
1324 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1325 	if (unlikely(!dp)) {
1326 		err = -ENODEV;
1327 		goto unlock;
1328 	}
1329 
1330 	if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1331 		err = ovs_flow_tbl_flush(&dp->table);
1332 		goto unlock;
1333 	}
1334 
1335 	if (ufid_present)
1336 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1337 	else
1338 		flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1339 	if (unlikely(!flow)) {
1340 		err = -ENOENT;
1341 		goto unlock;
1342 	}
1343 
1344 	ovs_flow_tbl_remove(&dp->table, flow);
1345 	ovs_unlock();
1346 
1347 	reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1348 					&flow->id, info, false, ufid_flags);
1349 	if (likely(reply)) {
1350 		if (likely(!IS_ERR(reply))) {
1351 			rcu_read_lock();	/*To keep RCU checker happy. */
1352 			err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1353 						     reply, info->snd_portid,
1354 						     info->snd_seq, 0,
1355 						     OVS_FLOW_CMD_DEL,
1356 						     ufid_flags);
1357 			rcu_read_unlock();
1358 			BUG_ON(err < 0);
1359 
1360 			ovs_notify(&dp_flow_genl_family, reply, info);
1361 		} else {
1362 			netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1363 		}
1364 	}
1365 
1366 	ovs_flow_free(flow, true);
1367 	return 0;
1368 unlock:
1369 	ovs_unlock();
1370 	return err;
1371 }
1372 
1373 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1374 {
1375 	struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1376 	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1377 	struct table_instance *ti;
1378 	struct datapath *dp;
1379 	u32 ufid_flags;
1380 	int err;
1381 
1382 	err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
1383 				       OVS_FLOW_ATTR_MAX, flow_policy, NULL);
1384 	if (err)
1385 		return err;
1386 	ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1387 
1388 	rcu_read_lock();
1389 	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1390 	if (!dp) {
1391 		rcu_read_unlock();
1392 		return -ENODEV;
1393 	}
1394 
1395 	ti = rcu_dereference(dp->table.ti);
1396 	for (;;) {
1397 		struct sw_flow *flow;
1398 		u32 bucket, obj;
1399 
1400 		bucket = cb->args[0];
1401 		obj = cb->args[1];
1402 		flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1403 		if (!flow)
1404 			break;
1405 
1406 		if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1407 					   NETLINK_CB(cb->skb).portid,
1408 					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
1409 					   OVS_FLOW_CMD_GET, ufid_flags) < 0)
1410 			break;
1411 
1412 		cb->args[0] = bucket;
1413 		cb->args[1] = obj;
1414 	}
1415 	rcu_read_unlock();
1416 	return skb->len;
1417 }
1418 
1419 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1420 	[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1421 	[OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1422 	[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1423 	[OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1424 	[OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1425 	[OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1426 	[OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1427 };
1428 
1429 static const struct genl_ops dp_flow_genl_ops[] = {
1430 	{ .cmd = OVS_FLOW_CMD_NEW,
1431 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1432 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1433 	  .doit = ovs_flow_cmd_new
1434 	},
1435 	{ .cmd = OVS_FLOW_CMD_DEL,
1436 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1437 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1438 	  .doit = ovs_flow_cmd_del
1439 	},
1440 	{ .cmd = OVS_FLOW_CMD_GET,
1441 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1442 	  .flags = 0,		    /* OK for unprivileged users. */
1443 	  .doit = ovs_flow_cmd_get,
1444 	  .dumpit = ovs_flow_cmd_dump
1445 	},
1446 	{ .cmd = OVS_FLOW_CMD_SET,
1447 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1448 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1449 	  .doit = ovs_flow_cmd_set,
1450 	},
1451 };
1452 
1453 static struct genl_family dp_flow_genl_family __ro_after_init = {
1454 	.hdrsize = sizeof(struct ovs_header),
1455 	.name = OVS_FLOW_FAMILY,
1456 	.version = OVS_FLOW_VERSION,
1457 	.maxattr = OVS_FLOW_ATTR_MAX,
1458 	.policy = flow_policy,
1459 	.netnsok = true,
1460 	.parallel_ops = true,
1461 	.ops = dp_flow_genl_ops,
1462 	.n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1463 	.mcgrps = &ovs_dp_flow_multicast_group,
1464 	.n_mcgrps = 1,
1465 	.module = THIS_MODULE,
1466 };
1467 
1468 static size_t ovs_dp_cmd_msg_size(void)
1469 {
1470 	size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1471 
1472 	msgsize += nla_total_size(IFNAMSIZ);
1473 	msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1474 	msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1475 	msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1476 
1477 	return msgsize;
1478 }
1479 
1480 /* Called with ovs_mutex. */
1481 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1482 				u32 portid, u32 seq, u32 flags, u8 cmd)
1483 {
1484 	struct ovs_header *ovs_header;
1485 	struct ovs_dp_stats dp_stats;
1486 	struct ovs_dp_megaflow_stats dp_megaflow_stats;
1487 	int err;
1488 
1489 	ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1490 				   flags, cmd);
1491 	if (!ovs_header)
1492 		goto error;
1493 
1494 	ovs_header->dp_ifindex = get_dpifindex(dp);
1495 
1496 	err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1497 	if (err)
1498 		goto nla_put_failure;
1499 
1500 	get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1501 	if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1502 			  &dp_stats, OVS_DP_ATTR_PAD))
1503 		goto nla_put_failure;
1504 
1505 	if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1506 			  sizeof(struct ovs_dp_megaflow_stats),
1507 			  &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1508 		goto nla_put_failure;
1509 
1510 	if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1511 		goto nla_put_failure;
1512 
1513 	genlmsg_end(skb, ovs_header);
1514 	return 0;
1515 
1516 nla_put_failure:
1517 	genlmsg_cancel(skb, ovs_header);
1518 error:
1519 	return -EMSGSIZE;
1520 }
1521 
1522 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1523 {
1524 	return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1525 }
1526 
1527 /* Called with rcu_read_lock or ovs_mutex. */
1528 static struct datapath *lookup_datapath(struct net *net,
1529 					const struct ovs_header *ovs_header,
1530 					struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1531 {
1532 	struct datapath *dp;
1533 
1534 	if (!a[OVS_DP_ATTR_NAME])
1535 		dp = get_dp(net, ovs_header->dp_ifindex);
1536 	else {
1537 		struct vport *vport;
1538 
1539 		vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1540 		dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1541 	}
1542 	return dp ? dp : ERR_PTR(-ENODEV);
1543 }
1544 
1545 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1546 {
1547 	struct datapath *dp;
1548 
1549 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1550 	if (IS_ERR(dp))
1551 		return;
1552 
1553 	WARN(dp->user_features, "Dropping previously announced user features\n");
1554 	dp->user_features = 0;
1555 }
1556 
1557 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1558 {
1559 	if (a[OVS_DP_ATTR_USER_FEATURES])
1560 		dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1561 }
1562 
1563 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1564 {
1565 	struct nlattr **a = info->attrs;
1566 	struct vport_parms parms;
1567 	struct sk_buff *reply;
1568 	struct datapath *dp;
1569 	struct vport *vport;
1570 	struct ovs_net *ovs_net;
1571 	int err, i;
1572 
1573 	err = -EINVAL;
1574 	if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1575 		goto err;
1576 
1577 	reply = ovs_dp_cmd_alloc_info();
1578 	if (!reply)
1579 		return -ENOMEM;
1580 
1581 	err = -ENOMEM;
1582 	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1583 	if (dp == NULL)
1584 		goto err_free_reply;
1585 
1586 	ovs_dp_set_net(dp, sock_net(skb->sk));
1587 
1588 	/* Allocate table. */
1589 	err = ovs_flow_tbl_init(&dp->table);
1590 	if (err)
1591 		goto err_free_dp;
1592 
1593 	dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1594 	if (!dp->stats_percpu) {
1595 		err = -ENOMEM;
1596 		goto err_destroy_table;
1597 	}
1598 
1599 	dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1600 				  sizeof(struct hlist_head),
1601 				  GFP_KERNEL);
1602 	if (!dp->ports) {
1603 		err = -ENOMEM;
1604 		goto err_destroy_percpu;
1605 	}
1606 
1607 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1608 		INIT_HLIST_HEAD(&dp->ports[i]);
1609 
1610 	err = ovs_meters_init(dp);
1611 	if (err)
1612 		goto err_destroy_ports_array;
1613 
1614 	/* Set up our datapath device. */
1615 	parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1616 	parms.type = OVS_VPORT_TYPE_INTERNAL;
1617 	parms.options = NULL;
1618 	parms.dp = dp;
1619 	parms.port_no = OVSP_LOCAL;
1620 	parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1621 
1622 	ovs_dp_change(dp, a);
1623 
1624 	/* So far only local changes have been made, now need the lock. */
1625 	ovs_lock();
1626 
1627 	vport = new_vport(&parms);
1628 	if (IS_ERR(vport)) {
1629 		err = PTR_ERR(vport);
1630 		if (err == -EBUSY)
1631 			err = -EEXIST;
1632 
1633 		if (err == -EEXIST) {
1634 			/* An outdated user space instance that does not understand
1635 			 * the concept of user_features has attempted to create a new
1636 			 * datapath and is likely to reuse it. Drop all user features.
1637 			 */
1638 			if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1639 				ovs_dp_reset_user_features(skb, info);
1640 		}
1641 
1642 		goto err_destroy_meters;
1643 	}
1644 
1645 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1646 				   info->snd_seq, 0, OVS_DP_CMD_NEW);
1647 	BUG_ON(err < 0);
1648 
1649 	ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1650 	list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1651 
1652 	ovs_unlock();
1653 
1654 	ovs_notify(&dp_datapath_genl_family, reply, info);
1655 	return 0;
1656 
1657 err_destroy_meters:
1658 	ovs_unlock();
1659 	ovs_meters_exit(dp);
1660 err_destroy_ports_array:
1661 	kfree(dp->ports);
1662 err_destroy_percpu:
1663 	free_percpu(dp->stats_percpu);
1664 err_destroy_table:
1665 	ovs_flow_tbl_destroy(&dp->table);
1666 err_free_dp:
1667 	kfree(dp);
1668 err_free_reply:
1669 	kfree_skb(reply);
1670 err:
1671 	return err;
1672 }
1673 
1674 /* Called with ovs_mutex. */
1675 static void __dp_destroy(struct datapath *dp)
1676 {
1677 	int i;
1678 
1679 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1680 		struct vport *vport;
1681 		struct hlist_node *n;
1682 
1683 		hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1684 			if (vport->port_no != OVSP_LOCAL)
1685 				ovs_dp_detach_port(vport);
1686 	}
1687 
1688 	list_del_rcu(&dp->list_node);
1689 
1690 	/* OVSP_LOCAL is datapath internal port. We need to make sure that
1691 	 * all ports in datapath are destroyed first before freeing datapath.
1692 	 */
1693 	ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1694 
1695 	/* RCU destroy the flow table */
1696 	call_rcu(&dp->rcu, destroy_dp_rcu);
1697 }
1698 
1699 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1700 {
1701 	struct sk_buff *reply;
1702 	struct datapath *dp;
1703 	int err;
1704 
1705 	reply = ovs_dp_cmd_alloc_info();
1706 	if (!reply)
1707 		return -ENOMEM;
1708 
1709 	ovs_lock();
1710 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1711 	err = PTR_ERR(dp);
1712 	if (IS_ERR(dp))
1713 		goto err_unlock_free;
1714 
1715 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1716 				   info->snd_seq, 0, OVS_DP_CMD_DEL);
1717 	BUG_ON(err < 0);
1718 
1719 	__dp_destroy(dp);
1720 	ovs_unlock();
1721 
1722 	ovs_notify(&dp_datapath_genl_family, reply, info);
1723 
1724 	return 0;
1725 
1726 err_unlock_free:
1727 	ovs_unlock();
1728 	kfree_skb(reply);
1729 	return err;
1730 }
1731 
1732 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1733 {
1734 	struct sk_buff *reply;
1735 	struct datapath *dp;
1736 	int err;
1737 
1738 	reply = ovs_dp_cmd_alloc_info();
1739 	if (!reply)
1740 		return -ENOMEM;
1741 
1742 	ovs_lock();
1743 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1744 	err = PTR_ERR(dp);
1745 	if (IS_ERR(dp))
1746 		goto err_unlock_free;
1747 
1748 	ovs_dp_change(dp, info->attrs);
1749 
1750 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1751 				   info->snd_seq, 0, OVS_DP_CMD_SET);
1752 	BUG_ON(err < 0);
1753 
1754 	ovs_unlock();
1755 	ovs_notify(&dp_datapath_genl_family, reply, info);
1756 
1757 	return 0;
1758 
1759 err_unlock_free:
1760 	ovs_unlock();
1761 	kfree_skb(reply);
1762 	return err;
1763 }
1764 
1765 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1766 {
1767 	struct sk_buff *reply;
1768 	struct datapath *dp;
1769 	int err;
1770 
1771 	reply = ovs_dp_cmd_alloc_info();
1772 	if (!reply)
1773 		return -ENOMEM;
1774 
1775 	ovs_lock();
1776 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1777 	if (IS_ERR(dp)) {
1778 		err = PTR_ERR(dp);
1779 		goto err_unlock_free;
1780 	}
1781 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1782 				   info->snd_seq, 0, OVS_DP_CMD_GET);
1783 	BUG_ON(err < 0);
1784 	ovs_unlock();
1785 
1786 	return genlmsg_reply(reply, info);
1787 
1788 err_unlock_free:
1789 	ovs_unlock();
1790 	kfree_skb(reply);
1791 	return err;
1792 }
1793 
1794 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1795 {
1796 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1797 	struct datapath *dp;
1798 	int skip = cb->args[0];
1799 	int i = 0;
1800 
1801 	ovs_lock();
1802 	list_for_each_entry(dp, &ovs_net->dps, list_node) {
1803 		if (i >= skip &&
1804 		    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1805 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1806 					 OVS_DP_CMD_GET) < 0)
1807 			break;
1808 		i++;
1809 	}
1810 	ovs_unlock();
1811 
1812 	cb->args[0] = i;
1813 
1814 	return skb->len;
1815 }
1816 
1817 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1818 	[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1819 	[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1820 	[OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1821 };
1822 
1823 static const struct genl_ops dp_datapath_genl_ops[] = {
1824 	{ .cmd = OVS_DP_CMD_NEW,
1825 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1826 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1827 	  .doit = ovs_dp_cmd_new
1828 	},
1829 	{ .cmd = OVS_DP_CMD_DEL,
1830 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1831 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1832 	  .doit = ovs_dp_cmd_del
1833 	},
1834 	{ .cmd = OVS_DP_CMD_GET,
1835 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1836 	  .flags = 0,		    /* OK for unprivileged users. */
1837 	  .doit = ovs_dp_cmd_get,
1838 	  .dumpit = ovs_dp_cmd_dump
1839 	},
1840 	{ .cmd = OVS_DP_CMD_SET,
1841 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1842 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1843 	  .doit = ovs_dp_cmd_set,
1844 	},
1845 };
1846 
1847 static struct genl_family dp_datapath_genl_family __ro_after_init = {
1848 	.hdrsize = sizeof(struct ovs_header),
1849 	.name = OVS_DATAPATH_FAMILY,
1850 	.version = OVS_DATAPATH_VERSION,
1851 	.maxattr = OVS_DP_ATTR_MAX,
1852 	.policy = datapath_policy,
1853 	.netnsok = true,
1854 	.parallel_ops = true,
1855 	.ops = dp_datapath_genl_ops,
1856 	.n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1857 	.mcgrps = &ovs_dp_datapath_multicast_group,
1858 	.n_mcgrps = 1,
1859 	.module = THIS_MODULE,
1860 };
1861 
1862 /* Called with ovs_mutex or RCU read lock. */
1863 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1864 				   struct net *net, u32 portid, u32 seq,
1865 				   u32 flags, u8 cmd)
1866 {
1867 	struct ovs_header *ovs_header;
1868 	struct ovs_vport_stats vport_stats;
1869 	int err;
1870 
1871 	ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1872 				 flags, cmd);
1873 	if (!ovs_header)
1874 		return -EMSGSIZE;
1875 
1876 	ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1877 
1878 	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1879 	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1880 	    nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1881 			   ovs_vport_name(vport)) ||
1882 	    nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
1883 		goto nla_put_failure;
1884 
1885 	if (!net_eq(net, dev_net(vport->dev))) {
1886 		int id = peernet2id_alloc(net, dev_net(vport->dev));
1887 
1888 		if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1889 			goto nla_put_failure;
1890 	}
1891 
1892 	ovs_vport_get_stats(vport, &vport_stats);
1893 	if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1894 			  sizeof(struct ovs_vport_stats), &vport_stats,
1895 			  OVS_VPORT_ATTR_PAD))
1896 		goto nla_put_failure;
1897 
1898 	if (ovs_vport_get_upcall_portids(vport, skb))
1899 		goto nla_put_failure;
1900 
1901 	err = ovs_vport_get_options(vport, skb);
1902 	if (err == -EMSGSIZE)
1903 		goto error;
1904 
1905 	genlmsg_end(skb, ovs_header);
1906 	return 0;
1907 
1908 nla_put_failure:
1909 	err = -EMSGSIZE;
1910 error:
1911 	genlmsg_cancel(skb, ovs_header);
1912 	return err;
1913 }
1914 
1915 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1916 {
1917 	return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1918 }
1919 
1920 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1921 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
1922 					 u32 portid, u32 seq, u8 cmd)
1923 {
1924 	struct sk_buff *skb;
1925 	int retval;
1926 
1927 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1928 	if (!skb)
1929 		return ERR_PTR(-ENOMEM);
1930 
1931 	retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
1932 	BUG_ON(retval < 0);
1933 
1934 	return skb;
1935 }
1936 
1937 /* Called with ovs_mutex or RCU read lock. */
1938 static struct vport *lookup_vport(struct net *net,
1939 				  const struct ovs_header *ovs_header,
1940 				  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1941 {
1942 	struct datapath *dp;
1943 	struct vport *vport;
1944 
1945 	if (a[OVS_VPORT_ATTR_IFINDEX])
1946 		return ERR_PTR(-EOPNOTSUPP);
1947 	if (a[OVS_VPORT_ATTR_NAME]) {
1948 		vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1949 		if (!vport)
1950 			return ERR_PTR(-ENODEV);
1951 		if (ovs_header->dp_ifindex &&
1952 		    ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1953 			return ERR_PTR(-ENODEV);
1954 		return vport;
1955 	} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1956 		u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1957 
1958 		if (port_no >= DP_MAX_PORTS)
1959 			return ERR_PTR(-EFBIG);
1960 
1961 		dp = get_dp(net, ovs_header->dp_ifindex);
1962 		if (!dp)
1963 			return ERR_PTR(-ENODEV);
1964 
1965 		vport = ovs_vport_ovsl_rcu(dp, port_no);
1966 		if (!vport)
1967 			return ERR_PTR(-ENODEV);
1968 		return vport;
1969 	} else
1970 		return ERR_PTR(-EINVAL);
1971 
1972 }
1973 
1974 /* Called with ovs_mutex */
1975 static void update_headroom(struct datapath *dp)
1976 {
1977 	unsigned dev_headroom, max_headroom = 0;
1978 	struct net_device *dev;
1979 	struct vport *vport;
1980 	int i;
1981 
1982 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1983 		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1984 			dev = vport->dev;
1985 			dev_headroom = netdev_get_fwd_headroom(dev);
1986 			if (dev_headroom > max_headroom)
1987 				max_headroom = dev_headroom;
1988 		}
1989 	}
1990 
1991 	dp->max_headroom = max_headroom;
1992 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1993 		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
1994 			netdev_set_rx_headroom(vport->dev, max_headroom);
1995 }
1996 
1997 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1998 {
1999 	struct nlattr **a = info->attrs;
2000 	struct ovs_header *ovs_header = info->userhdr;
2001 	struct vport_parms parms;
2002 	struct sk_buff *reply;
2003 	struct vport *vport;
2004 	struct datapath *dp;
2005 	u32 port_no;
2006 	int err;
2007 
2008 	if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2009 	    !a[OVS_VPORT_ATTR_UPCALL_PID])
2010 		return -EINVAL;
2011 	if (a[OVS_VPORT_ATTR_IFINDEX])
2012 		return -EOPNOTSUPP;
2013 
2014 	port_no = a[OVS_VPORT_ATTR_PORT_NO]
2015 		? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2016 	if (port_no >= DP_MAX_PORTS)
2017 		return -EFBIG;
2018 
2019 	reply = ovs_vport_cmd_alloc_info();
2020 	if (!reply)
2021 		return -ENOMEM;
2022 
2023 	ovs_lock();
2024 restart:
2025 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2026 	err = -ENODEV;
2027 	if (!dp)
2028 		goto exit_unlock_free;
2029 
2030 	if (port_no) {
2031 		vport = ovs_vport_ovsl(dp, port_no);
2032 		err = -EBUSY;
2033 		if (vport)
2034 			goto exit_unlock_free;
2035 	} else {
2036 		for (port_no = 1; ; port_no++) {
2037 			if (port_no >= DP_MAX_PORTS) {
2038 				err = -EFBIG;
2039 				goto exit_unlock_free;
2040 			}
2041 			vport = ovs_vport_ovsl(dp, port_no);
2042 			if (!vport)
2043 				break;
2044 		}
2045 	}
2046 
2047 	parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2048 	parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2049 	parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2050 	parms.dp = dp;
2051 	parms.port_no = port_no;
2052 	parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2053 
2054 	vport = new_vport(&parms);
2055 	err = PTR_ERR(vport);
2056 	if (IS_ERR(vport)) {
2057 		if (err == -EAGAIN)
2058 			goto restart;
2059 		goto exit_unlock_free;
2060 	}
2061 
2062 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2063 				      info->snd_portid, info->snd_seq, 0,
2064 				      OVS_VPORT_CMD_NEW);
2065 
2066 	if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
2067 		update_headroom(dp);
2068 	else
2069 		netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2070 
2071 	BUG_ON(err < 0);
2072 	ovs_unlock();
2073 
2074 	ovs_notify(&dp_vport_genl_family, reply, info);
2075 	return 0;
2076 
2077 exit_unlock_free:
2078 	ovs_unlock();
2079 	kfree_skb(reply);
2080 	return err;
2081 }
2082 
2083 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2084 {
2085 	struct nlattr **a = info->attrs;
2086 	struct sk_buff *reply;
2087 	struct vport *vport;
2088 	int err;
2089 
2090 	reply = ovs_vport_cmd_alloc_info();
2091 	if (!reply)
2092 		return -ENOMEM;
2093 
2094 	ovs_lock();
2095 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2096 	err = PTR_ERR(vport);
2097 	if (IS_ERR(vport))
2098 		goto exit_unlock_free;
2099 
2100 	if (a[OVS_VPORT_ATTR_TYPE] &&
2101 	    nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2102 		err = -EINVAL;
2103 		goto exit_unlock_free;
2104 	}
2105 
2106 	if (a[OVS_VPORT_ATTR_OPTIONS]) {
2107 		err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2108 		if (err)
2109 			goto exit_unlock_free;
2110 	}
2111 
2112 
2113 	if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2114 		struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2115 
2116 		err = ovs_vport_set_upcall_portids(vport, ids);
2117 		if (err)
2118 			goto exit_unlock_free;
2119 	}
2120 
2121 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2122 				      info->snd_portid, info->snd_seq, 0,
2123 				      OVS_VPORT_CMD_SET);
2124 	BUG_ON(err < 0);
2125 
2126 	ovs_unlock();
2127 	ovs_notify(&dp_vport_genl_family, reply, info);
2128 	return 0;
2129 
2130 exit_unlock_free:
2131 	ovs_unlock();
2132 	kfree_skb(reply);
2133 	return err;
2134 }
2135 
2136 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2137 {
2138 	bool must_update_headroom = false;
2139 	struct nlattr **a = info->attrs;
2140 	struct sk_buff *reply;
2141 	struct datapath *dp;
2142 	struct vport *vport;
2143 	int err;
2144 
2145 	reply = ovs_vport_cmd_alloc_info();
2146 	if (!reply)
2147 		return -ENOMEM;
2148 
2149 	ovs_lock();
2150 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2151 	err = PTR_ERR(vport);
2152 	if (IS_ERR(vport))
2153 		goto exit_unlock_free;
2154 
2155 	if (vport->port_no == OVSP_LOCAL) {
2156 		err = -EINVAL;
2157 		goto exit_unlock_free;
2158 	}
2159 
2160 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2161 				      info->snd_portid, info->snd_seq, 0,
2162 				      OVS_VPORT_CMD_DEL);
2163 	BUG_ON(err < 0);
2164 
2165 	/* the vport deletion may trigger dp headroom update */
2166 	dp = vport->dp;
2167 	if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2168 		must_update_headroom = true;
2169 	netdev_reset_rx_headroom(vport->dev);
2170 	ovs_dp_detach_port(vport);
2171 
2172 	if (must_update_headroom)
2173 		update_headroom(dp);
2174 	ovs_unlock();
2175 
2176 	ovs_notify(&dp_vport_genl_family, reply, info);
2177 	return 0;
2178 
2179 exit_unlock_free:
2180 	ovs_unlock();
2181 	kfree_skb(reply);
2182 	return err;
2183 }
2184 
2185 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2186 {
2187 	struct nlattr **a = info->attrs;
2188 	struct ovs_header *ovs_header = info->userhdr;
2189 	struct sk_buff *reply;
2190 	struct vport *vport;
2191 	int err;
2192 
2193 	reply = ovs_vport_cmd_alloc_info();
2194 	if (!reply)
2195 		return -ENOMEM;
2196 
2197 	rcu_read_lock();
2198 	vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2199 	err = PTR_ERR(vport);
2200 	if (IS_ERR(vport))
2201 		goto exit_unlock_free;
2202 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2203 				      info->snd_portid, info->snd_seq, 0,
2204 				      OVS_VPORT_CMD_GET);
2205 	BUG_ON(err < 0);
2206 	rcu_read_unlock();
2207 
2208 	return genlmsg_reply(reply, info);
2209 
2210 exit_unlock_free:
2211 	rcu_read_unlock();
2212 	kfree_skb(reply);
2213 	return err;
2214 }
2215 
2216 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2217 {
2218 	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2219 	struct datapath *dp;
2220 	int bucket = cb->args[0], skip = cb->args[1];
2221 	int i, j = 0;
2222 
2223 	rcu_read_lock();
2224 	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2225 	if (!dp) {
2226 		rcu_read_unlock();
2227 		return -ENODEV;
2228 	}
2229 	for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2230 		struct vport *vport;
2231 
2232 		j = 0;
2233 		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2234 			if (j >= skip &&
2235 			    ovs_vport_cmd_fill_info(vport, skb,
2236 						    sock_net(skb->sk),
2237 						    NETLINK_CB(cb->skb).portid,
2238 						    cb->nlh->nlmsg_seq,
2239 						    NLM_F_MULTI,
2240 						    OVS_VPORT_CMD_GET) < 0)
2241 				goto out;
2242 
2243 			j++;
2244 		}
2245 		skip = 0;
2246 	}
2247 out:
2248 	rcu_read_unlock();
2249 
2250 	cb->args[0] = i;
2251 	cb->args[1] = j;
2252 
2253 	return skb->len;
2254 }
2255 
2256 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2257 	[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2258 	[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2259 	[OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2260 	[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2261 	[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2262 	[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2263 	[OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2264 	[OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
2265 };
2266 
2267 static const struct genl_ops dp_vport_genl_ops[] = {
2268 	{ .cmd = OVS_VPORT_CMD_NEW,
2269 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2270 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2271 	  .doit = ovs_vport_cmd_new
2272 	},
2273 	{ .cmd = OVS_VPORT_CMD_DEL,
2274 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2275 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2276 	  .doit = ovs_vport_cmd_del
2277 	},
2278 	{ .cmd = OVS_VPORT_CMD_GET,
2279 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2280 	  .flags = 0,		    /* OK for unprivileged users. */
2281 	  .doit = ovs_vport_cmd_get,
2282 	  .dumpit = ovs_vport_cmd_dump
2283 	},
2284 	{ .cmd = OVS_VPORT_CMD_SET,
2285 	  .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2286 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2287 	  .doit = ovs_vport_cmd_set,
2288 	},
2289 };
2290 
2291 struct genl_family dp_vport_genl_family __ro_after_init = {
2292 	.hdrsize = sizeof(struct ovs_header),
2293 	.name = OVS_VPORT_FAMILY,
2294 	.version = OVS_VPORT_VERSION,
2295 	.maxattr = OVS_VPORT_ATTR_MAX,
2296 	.policy = vport_policy,
2297 	.netnsok = true,
2298 	.parallel_ops = true,
2299 	.ops = dp_vport_genl_ops,
2300 	.n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2301 	.mcgrps = &ovs_dp_vport_multicast_group,
2302 	.n_mcgrps = 1,
2303 	.module = THIS_MODULE,
2304 };
2305 
2306 static struct genl_family * const dp_genl_families[] = {
2307 	&dp_datapath_genl_family,
2308 	&dp_vport_genl_family,
2309 	&dp_flow_genl_family,
2310 	&dp_packet_genl_family,
2311 	&dp_meter_genl_family,
2312 #if	IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2313 	&dp_ct_limit_genl_family,
2314 #endif
2315 };
2316 
2317 static void dp_unregister_genl(int n_families)
2318 {
2319 	int i;
2320 
2321 	for (i = 0; i < n_families; i++)
2322 		genl_unregister_family(dp_genl_families[i]);
2323 }
2324 
2325 static int __init dp_register_genl(void)
2326 {
2327 	int err;
2328 	int i;
2329 
2330 	for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2331 
2332 		err = genl_register_family(dp_genl_families[i]);
2333 		if (err)
2334 			goto error;
2335 	}
2336 
2337 	return 0;
2338 
2339 error:
2340 	dp_unregister_genl(i);
2341 	return err;
2342 }
2343 
2344 static int __net_init ovs_init_net(struct net *net)
2345 {
2346 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2347 
2348 	INIT_LIST_HEAD(&ovs_net->dps);
2349 	INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2350 	return ovs_ct_init(net);
2351 }
2352 
2353 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2354 					    struct list_head *head)
2355 {
2356 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2357 	struct datapath *dp;
2358 
2359 	list_for_each_entry(dp, &ovs_net->dps, list_node) {
2360 		int i;
2361 
2362 		for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2363 			struct vport *vport;
2364 
2365 			hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2366 				if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2367 					continue;
2368 
2369 				if (dev_net(vport->dev) == dnet)
2370 					list_add(&vport->detach_list, head);
2371 			}
2372 		}
2373 	}
2374 }
2375 
2376 static void __net_exit ovs_exit_net(struct net *dnet)
2377 {
2378 	struct datapath *dp, *dp_next;
2379 	struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2380 	struct vport *vport, *vport_next;
2381 	struct net *net;
2382 	LIST_HEAD(head);
2383 
2384 	ovs_ct_exit(dnet);
2385 	ovs_lock();
2386 	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2387 		__dp_destroy(dp);
2388 
2389 	down_read(&net_rwsem);
2390 	for_each_net(net)
2391 		list_vports_from_net(net, dnet, &head);
2392 	up_read(&net_rwsem);
2393 
2394 	/* Detach all vports from given namespace. */
2395 	list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2396 		list_del(&vport->detach_list);
2397 		ovs_dp_detach_port(vport);
2398 	}
2399 
2400 	ovs_unlock();
2401 
2402 	cancel_work_sync(&ovs_net->dp_notify_work);
2403 }
2404 
2405 static struct pernet_operations ovs_net_ops = {
2406 	.init = ovs_init_net,
2407 	.exit = ovs_exit_net,
2408 	.id   = &ovs_net_id,
2409 	.size = sizeof(struct ovs_net),
2410 };
2411 
2412 static int __init dp_init(void)
2413 {
2414 	int err;
2415 
2416 	BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2417 
2418 	pr_info("Open vSwitch switching datapath\n");
2419 
2420 	err = action_fifos_init();
2421 	if (err)
2422 		goto error;
2423 
2424 	err = ovs_internal_dev_rtnl_link_register();
2425 	if (err)
2426 		goto error_action_fifos_exit;
2427 
2428 	err = ovs_flow_init();
2429 	if (err)
2430 		goto error_unreg_rtnl_link;
2431 
2432 	err = ovs_vport_init();
2433 	if (err)
2434 		goto error_flow_exit;
2435 
2436 	err = register_pernet_device(&ovs_net_ops);
2437 	if (err)
2438 		goto error_vport_exit;
2439 
2440 	err = register_netdevice_notifier(&ovs_dp_device_notifier);
2441 	if (err)
2442 		goto error_netns_exit;
2443 
2444 	err = ovs_netdev_init();
2445 	if (err)
2446 		goto error_unreg_notifier;
2447 
2448 	err = dp_register_genl();
2449 	if (err < 0)
2450 		goto error_unreg_netdev;
2451 
2452 	return 0;
2453 
2454 error_unreg_netdev:
2455 	ovs_netdev_exit();
2456 error_unreg_notifier:
2457 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
2458 error_netns_exit:
2459 	unregister_pernet_device(&ovs_net_ops);
2460 error_vport_exit:
2461 	ovs_vport_exit();
2462 error_flow_exit:
2463 	ovs_flow_exit();
2464 error_unreg_rtnl_link:
2465 	ovs_internal_dev_rtnl_link_unregister();
2466 error_action_fifos_exit:
2467 	action_fifos_exit();
2468 error:
2469 	return err;
2470 }
2471 
2472 static void dp_cleanup(void)
2473 {
2474 	dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2475 	ovs_netdev_exit();
2476 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
2477 	unregister_pernet_device(&ovs_net_ops);
2478 	rcu_barrier();
2479 	ovs_vport_exit();
2480 	ovs_flow_exit();
2481 	ovs_internal_dev_rtnl_link_unregister();
2482 	action_fifos_exit();
2483 }
2484 
2485 module_init(dp_init);
2486 module_exit(dp_cleanup);
2487 
2488 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2489 MODULE_LICENSE("GPL");
2490 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2491 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2492 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2493 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
2494 MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
2495 MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);
2496