xref: /openbmc/linux/net/openvswitch/datapath.c (revision af958a38)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53 
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "vport-internal_dev.h"
59 #include "vport-netdev.h"
60 
61 int ovs_net_id __read_mostly;
62 
63 static struct genl_family dp_packet_genl_family;
64 static struct genl_family dp_flow_genl_family;
65 static struct genl_family dp_datapath_genl_family;
66 
67 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
68 	.name = OVS_FLOW_MCGROUP,
69 };
70 
71 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
72 	.name = OVS_DATAPATH_MCGROUP,
73 };
74 
75 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
76 	.name = OVS_VPORT_MCGROUP,
77 };
78 
79 /* Check if need to build a reply message.
80  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
81 static bool ovs_must_notify(struct genl_info *info,
82 			    const struct genl_multicast_group *grp)
83 {
84 	return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
85 		netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
86 }
87 
88 static void ovs_notify(struct genl_family *family,
89 		       struct sk_buff *skb, struct genl_info *info)
90 {
91 	genl_notify(family, skb, genl_info_net(info), info->snd_portid,
92 		    0, info->nlhdr, GFP_KERNEL);
93 }
94 
95 /**
96  * DOC: Locking:
97  *
98  * All writes e.g. Writes to device state (add/remove datapath, port, set
99  * operations on vports, etc.), Writes to other state (flow table
100  * modifications, set miscellaneous datapath parameters, etc.) are protected
101  * by ovs_lock.
102  *
103  * Reads are protected by RCU.
104  *
105  * There are a few special cases (mostly stats) that have their own
106  * synchronization but they nest under all of above and don't interact with
107  * each other.
108  *
109  * The RTNL lock nests inside ovs_mutex.
110  */
111 
112 static DEFINE_MUTEX(ovs_mutex);
113 
114 void ovs_lock(void)
115 {
116 	mutex_lock(&ovs_mutex);
117 }
118 
119 void ovs_unlock(void)
120 {
121 	mutex_unlock(&ovs_mutex);
122 }
123 
124 #ifdef CONFIG_LOCKDEP
125 int lockdep_ovsl_is_held(void)
126 {
127 	if (debug_locks)
128 		return lockdep_is_held(&ovs_mutex);
129 	else
130 		return 1;
131 }
132 #endif
133 
134 static struct vport *new_vport(const struct vport_parms *);
135 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
136 			     const struct dp_upcall_info *);
137 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
138 				  const struct dp_upcall_info *);
139 
140 /* Must be called with rcu_read_lock or ovs_mutex. */
141 static struct datapath *get_dp(struct net *net, int dp_ifindex)
142 {
143 	struct datapath *dp = NULL;
144 	struct net_device *dev;
145 
146 	rcu_read_lock();
147 	dev = dev_get_by_index_rcu(net, dp_ifindex);
148 	if (dev) {
149 		struct vport *vport = ovs_internal_dev_get_vport(dev);
150 		if (vport)
151 			dp = vport->dp;
152 	}
153 	rcu_read_unlock();
154 
155 	return dp;
156 }
157 
158 /* Must be called with rcu_read_lock or ovs_mutex. */
159 static const char *ovs_dp_name(const struct datapath *dp)
160 {
161 	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
162 	return vport->ops->get_name(vport);
163 }
164 
165 static int get_dpifindex(struct datapath *dp)
166 {
167 	struct vport *local;
168 	int ifindex;
169 
170 	rcu_read_lock();
171 
172 	local = ovs_vport_rcu(dp, OVSP_LOCAL);
173 	if (local)
174 		ifindex = netdev_vport_priv(local)->dev->ifindex;
175 	else
176 		ifindex = 0;
177 
178 	rcu_read_unlock();
179 
180 	return ifindex;
181 }
182 
183 static void destroy_dp_rcu(struct rcu_head *rcu)
184 {
185 	struct datapath *dp = container_of(rcu, struct datapath, rcu);
186 
187 	free_percpu(dp->stats_percpu);
188 	release_net(ovs_dp_get_net(dp));
189 	kfree(dp->ports);
190 	kfree(dp);
191 }
192 
193 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
194 					    u16 port_no)
195 {
196 	return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
197 }
198 
199 /* Called with ovs_mutex or RCU read lock. */
200 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
201 {
202 	struct vport *vport;
203 	struct hlist_head *head;
204 
205 	head = vport_hash_bucket(dp, port_no);
206 	hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
207 		if (vport->port_no == port_no)
208 			return vport;
209 	}
210 	return NULL;
211 }
212 
213 /* Called with ovs_mutex. */
214 static struct vport *new_vport(const struct vport_parms *parms)
215 {
216 	struct vport *vport;
217 
218 	vport = ovs_vport_add(parms);
219 	if (!IS_ERR(vport)) {
220 		struct datapath *dp = parms->dp;
221 		struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
222 
223 		hlist_add_head_rcu(&vport->dp_hash_node, head);
224 	}
225 	return vport;
226 }
227 
228 void ovs_dp_detach_port(struct vport *p)
229 {
230 	ASSERT_OVSL();
231 
232 	/* First drop references to device. */
233 	hlist_del_rcu(&p->dp_hash_node);
234 
235 	/* Then destroy it. */
236 	ovs_vport_del(p);
237 }
238 
239 /* Must be called with rcu_read_lock. */
240 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
241 {
242 	struct datapath *dp = p->dp;
243 	struct sw_flow *flow;
244 	struct dp_stats_percpu *stats;
245 	struct sw_flow_key key;
246 	u64 *stats_counter;
247 	u32 n_mask_hit;
248 	int error;
249 
250 	stats = this_cpu_ptr(dp->stats_percpu);
251 
252 	/* Extract flow from 'skb' into 'key'. */
253 	error = ovs_flow_extract(skb, p->port_no, &key);
254 	if (unlikely(error)) {
255 		kfree_skb(skb);
256 		return;
257 	}
258 
259 	/* Look up flow. */
260 	flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit);
261 	if (unlikely(!flow)) {
262 		struct dp_upcall_info upcall;
263 
264 		upcall.cmd = OVS_PACKET_CMD_MISS;
265 		upcall.key = &key;
266 		upcall.userdata = NULL;
267 		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
268 		error = ovs_dp_upcall(dp, skb, &upcall);
269 		if (unlikely(error))
270 			kfree_skb(skb);
271 		else
272 			consume_skb(skb);
273 		stats_counter = &stats->n_missed;
274 		goto out;
275 	}
276 
277 	OVS_CB(skb)->flow = flow;
278 	OVS_CB(skb)->pkt_key = &key;
279 
280 	ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
281 	ovs_execute_actions(dp, skb);
282 	stats_counter = &stats->n_hit;
283 
284 out:
285 	/* Update datapath statistics. */
286 	u64_stats_update_begin(&stats->syncp);
287 	(*stats_counter)++;
288 	stats->n_mask_hit += n_mask_hit;
289 	u64_stats_update_end(&stats->syncp);
290 }
291 
292 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
293 		  const struct dp_upcall_info *upcall_info)
294 {
295 	struct dp_stats_percpu *stats;
296 	int err;
297 
298 	if (upcall_info->portid == 0) {
299 		err = -ENOTCONN;
300 		goto err;
301 	}
302 
303 	if (!skb_is_gso(skb))
304 		err = queue_userspace_packet(dp, skb, upcall_info);
305 	else
306 		err = queue_gso_packets(dp, skb, upcall_info);
307 	if (err)
308 		goto err;
309 
310 	return 0;
311 
312 err:
313 	stats = this_cpu_ptr(dp->stats_percpu);
314 
315 	u64_stats_update_begin(&stats->syncp);
316 	stats->n_lost++;
317 	u64_stats_update_end(&stats->syncp);
318 
319 	return err;
320 }
321 
322 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
323 			     const struct dp_upcall_info *upcall_info)
324 {
325 	unsigned short gso_type = skb_shinfo(skb)->gso_type;
326 	struct dp_upcall_info later_info;
327 	struct sw_flow_key later_key;
328 	struct sk_buff *segs, *nskb;
329 	int err;
330 
331 	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
332 	if (IS_ERR(segs))
333 		return PTR_ERR(segs);
334 
335 	/* Queue all of the segments. */
336 	skb = segs;
337 	do {
338 		err = queue_userspace_packet(dp, skb, upcall_info);
339 		if (err)
340 			break;
341 
342 		if (skb == segs && gso_type & SKB_GSO_UDP) {
343 			/* The initial flow key extracted by ovs_flow_extract()
344 			 * in this case is for a first fragment, so we need to
345 			 * properly mark later fragments.
346 			 */
347 			later_key = *upcall_info->key;
348 			later_key.ip.frag = OVS_FRAG_TYPE_LATER;
349 
350 			later_info = *upcall_info;
351 			later_info.key = &later_key;
352 			upcall_info = &later_info;
353 		}
354 	} while ((skb = skb->next));
355 
356 	/* Free all of the segments. */
357 	skb = segs;
358 	do {
359 		nskb = skb->next;
360 		if (err)
361 			kfree_skb(skb);
362 		else
363 			consume_skb(skb);
364 	} while ((skb = nskb));
365 	return err;
366 }
367 
368 static size_t key_attr_size(void)
369 {
370 	return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
371 		+ nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
372 		  + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
373 		  + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
374 		  + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
375 		  + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
376 		  + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
377 		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
378 		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
379 		+ nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
380 		+ nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
381 		+ nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
382 		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
383 		+ nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
384 		+ nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
385 		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
386 		+ nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
387 		+ nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
388 		+ nla_total_size(28); /* OVS_KEY_ATTR_ND */
389 }
390 
391 static size_t upcall_msg_size(const struct nlattr *userdata,
392 			      unsigned int hdrlen)
393 {
394 	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
395 		+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
396 		+ nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
397 
398 	/* OVS_PACKET_ATTR_USERDATA */
399 	if (userdata)
400 		size += NLA_ALIGN(userdata->nla_len);
401 
402 	return size;
403 }
404 
405 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
406 				  const struct dp_upcall_info *upcall_info)
407 {
408 	struct ovs_header *upcall;
409 	struct sk_buff *nskb = NULL;
410 	struct sk_buff *user_skb = NULL; /* to be queued to userspace */
411 	struct nlattr *nla;
412 	struct genl_info info = {
413 		.dst_sk = ovs_dp_get_net(dp)->genl_sock,
414 		.snd_portid = upcall_info->portid,
415 	};
416 	size_t len;
417 	unsigned int hlen;
418 	int err, dp_ifindex;
419 
420 	dp_ifindex = get_dpifindex(dp);
421 	if (!dp_ifindex)
422 		return -ENODEV;
423 
424 	if (vlan_tx_tag_present(skb)) {
425 		nskb = skb_clone(skb, GFP_ATOMIC);
426 		if (!nskb)
427 			return -ENOMEM;
428 
429 		nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
430 		if (!nskb)
431 			return -ENOMEM;
432 
433 		nskb->vlan_tci = 0;
434 		skb = nskb;
435 	}
436 
437 	if (nla_attr_size(skb->len) > USHRT_MAX) {
438 		err = -EFBIG;
439 		goto out;
440 	}
441 
442 	/* Complete checksum if needed */
443 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
444 	    (err = skb_checksum_help(skb)))
445 		goto out;
446 
447 	/* Older versions of OVS user space enforce alignment of the last
448 	 * Netlink attribute to NLA_ALIGNTO which would require extensive
449 	 * padding logic. Only perform zerocopy if padding is not required.
450 	 */
451 	if (dp->user_features & OVS_DP_F_UNALIGNED)
452 		hlen = skb_zerocopy_headlen(skb);
453 	else
454 		hlen = skb->len;
455 
456 	len = upcall_msg_size(upcall_info->userdata, hlen);
457 	user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
458 	if (!user_skb) {
459 		err = -ENOMEM;
460 		goto out;
461 	}
462 
463 	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
464 			     0, upcall_info->cmd);
465 	upcall->dp_ifindex = dp_ifindex;
466 
467 	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
468 	err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
469 	BUG_ON(err);
470 	nla_nest_end(user_skb, nla);
471 
472 	if (upcall_info->userdata)
473 		__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
474 			  nla_len(upcall_info->userdata),
475 			  nla_data(upcall_info->userdata));
476 
477 	/* Only reserve room for attribute header, packet data is added
478 	 * in skb_zerocopy() */
479 	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
480 		err = -ENOBUFS;
481 		goto out;
482 	}
483 	nla->nla_len = nla_attr_size(skb->len);
484 
485 	err = skb_zerocopy(user_skb, skb, skb->len, hlen);
486 	if (err)
487 		goto out;
488 
489 	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
490 	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
491 		size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
492 
493 		if (plen > 0)
494 			memset(skb_put(user_skb, plen), 0, plen);
495 	}
496 
497 	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
498 
499 	err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
500 	user_skb = NULL;
501 out:
502 	if (err)
503 		skb_tx_error(skb);
504 	kfree_skb(user_skb);
505 	kfree_skb(nskb);
506 	return err;
507 }
508 
509 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
510 {
511 	struct ovs_header *ovs_header = info->userhdr;
512 	struct nlattr **a = info->attrs;
513 	struct sw_flow_actions *acts;
514 	struct sk_buff *packet;
515 	struct sw_flow *flow;
516 	struct datapath *dp;
517 	struct ethhdr *eth;
518 	int len;
519 	int err;
520 
521 	err = -EINVAL;
522 	if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
523 	    !a[OVS_PACKET_ATTR_ACTIONS])
524 		goto err;
525 
526 	len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
527 	packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
528 	err = -ENOMEM;
529 	if (!packet)
530 		goto err;
531 	skb_reserve(packet, NET_IP_ALIGN);
532 
533 	nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
534 
535 	skb_reset_mac_header(packet);
536 	eth = eth_hdr(packet);
537 
538 	/* Normally, setting the skb 'protocol' field would be handled by a
539 	 * call to eth_type_trans(), but it assumes there's a sending
540 	 * device, which we may not have. */
541 	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
542 		packet->protocol = eth->h_proto;
543 	else
544 		packet->protocol = htons(ETH_P_802_2);
545 
546 	/* Build an sw_flow for sending this packet. */
547 	flow = ovs_flow_alloc();
548 	err = PTR_ERR(flow);
549 	if (IS_ERR(flow))
550 		goto err_kfree_skb;
551 
552 	err = ovs_flow_extract(packet, -1, &flow->key);
553 	if (err)
554 		goto err_flow_free;
555 
556 	err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
557 	if (err)
558 		goto err_flow_free;
559 	acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
560 	err = PTR_ERR(acts);
561 	if (IS_ERR(acts))
562 		goto err_flow_free;
563 
564 	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
565 				   &flow->key, 0, &acts);
566 	rcu_assign_pointer(flow->sf_acts, acts);
567 	if (err)
568 		goto err_flow_free;
569 
570 	OVS_CB(packet)->flow = flow;
571 	OVS_CB(packet)->pkt_key = &flow->key;
572 	packet->priority = flow->key.phy.priority;
573 	packet->mark = flow->key.phy.skb_mark;
574 
575 	rcu_read_lock();
576 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
577 	err = -ENODEV;
578 	if (!dp)
579 		goto err_unlock;
580 
581 	local_bh_disable();
582 	err = ovs_execute_actions(dp, packet);
583 	local_bh_enable();
584 	rcu_read_unlock();
585 
586 	ovs_flow_free(flow, false);
587 	return err;
588 
589 err_unlock:
590 	rcu_read_unlock();
591 err_flow_free:
592 	ovs_flow_free(flow, false);
593 err_kfree_skb:
594 	kfree_skb(packet);
595 err:
596 	return err;
597 }
598 
599 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
600 	[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
601 	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
602 	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
603 };
604 
605 static const struct genl_ops dp_packet_genl_ops[] = {
606 	{ .cmd = OVS_PACKET_CMD_EXECUTE,
607 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
608 	  .policy = packet_policy,
609 	  .doit = ovs_packet_cmd_execute
610 	}
611 };
612 
613 static struct genl_family dp_packet_genl_family = {
614 	.id = GENL_ID_GENERATE,
615 	.hdrsize = sizeof(struct ovs_header),
616 	.name = OVS_PACKET_FAMILY,
617 	.version = OVS_PACKET_VERSION,
618 	.maxattr = OVS_PACKET_ATTR_MAX,
619 	.netnsok = true,
620 	.parallel_ops = true,
621 	.ops = dp_packet_genl_ops,
622 	.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
623 };
624 
625 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
626 			 struct ovs_dp_megaflow_stats *mega_stats)
627 {
628 	int i;
629 
630 	memset(mega_stats, 0, sizeof(*mega_stats));
631 
632 	stats->n_flows = ovs_flow_tbl_count(&dp->table);
633 	mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
634 
635 	stats->n_hit = stats->n_missed = stats->n_lost = 0;
636 
637 	for_each_possible_cpu(i) {
638 		const struct dp_stats_percpu *percpu_stats;
639 		struct dp_stats_percpu local_stats;
640 		unsigned int start;
641 
642 		percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
643 
644 		do {
645 			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
646 			local_stats = *percpu_stats;
647 		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
648 
649 		stats->n_hit += local_stats.n_hit;
650 		stats->n_missed += local_stats.n_missed;
651 		stats->n_lost += local_stats.n_lost;
652 		mega_stats->n_mask_hit += local_stats.n_mask_hit;
653 	}
654 }
655 
656 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
657 {
658 	return NLMSG_ALIGN(sizeof(struct ovs_header))
659 		+ nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
660 		+ nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
661 		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
662 		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
663 		+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
664 		+ nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
665 }
666 
667 /* Called with ovs_mutex or RCU read lock. */
668 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
669 				  struct sk_buff *skb, u32 portid,
670 				  u32 seq, u32 flags, u8 cmd)
671 {
672 	const int skb_orig_len = skb->len;
673 	struct nlattr *start;
674 	struct ovs_flow_stats stats;
675 	__be16 tcp_flags;
676 	unsigned long used;
677 	struct ovs_header *ovs_header;
678 	struct nlattr *nla;
679 	int err;
680 
681 	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
682 	if (!ovs_header)
683 		return -EMSGSIZE;
684 
685 	ovs_header->dp_ifindex = dp_ifindex;
686 
687 	/* Fill flow key. */
688 	nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
689 	if (!nla)
690 		goto nla_put_failure;
691 
692 	err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
693 	if (err)
694 		goto error;
695 	nla_nest_end(skb, nla);
696 
697 	nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
698 	if (!nla)
699 		goto nla_put_failure;
700 
701 	err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
702 	if (err)
703 		goto error;
704 
705 	nla_nest_end(skb, nla);
706 
707 	ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
708 
709 	if (used &&
710 	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
711 		goto nla_put_failure;
712 
713 	if (stats.n_packets &&
714 	    nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
715 		goto nla_put_failure;
716 
717 	if ((u8)ntohs(tcp_flags) &&
718 	     nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
719 		goto nla_put_failure;
720 
721 	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
722 	 * this is the first flow to be dumped into 'skb'.  This is unusual for
723 	 * Netlink but individual action lists can be longer than
724 	 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
725 	 * The userspace caller can always fetch the actions separately if it
726 	 * really wants them.  (Most userspace callers in fact don't care.)
727 	 *
728 	 * This can only fail for dump operations because the skb is always
729 	 * properly sized for single flows.
730 	 */
731 	start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
732 	if (start) {
733 		const struct sw_flow_actions *sf_acts;
734 
735 		sf_acts = rcu_dereference_ovsl(flow->sf_acts);
736 		err = ovs_nla_put_actions(sf_acts->actions,
737 					  sf_acts->actions_len, skb);
738 
739 		if (!err)
740 			nla_nest_end(skb, start);
741 		else {
742 			if (skb_orig_len)
743 				goto error;
744 
745 			nla_nest_cancel(skb, start);
746 		}
747 	} else if (skb_orig_len)
748 		goto nla_put_failure;
749 
750 	return genlmsg_end(skb, ovs_header);
751 
752 nla_put_failure:
753 	err = -EMSGSIZE;
754 error:
755 	genlmsg_cancel(skb, ovs_header);
756 	return err;
757 }
758 
759 /* May not be called with RCU read lock. */
760 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
761 					       struct genl_info *info,
762 					       bool always)
763 {
764 	struct sk_buff *skb;
765 
766 	if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
767 		return NULL;
768 
769 	skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
770 	if (!skb)
771 		return ERR_PTR(-ENOMEM);
772 
773 	return skb;
774 }
775 
776 /* Called with ovs_mutex. */
777 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
778 					       int dp_ifindex,
779 					       struct genl_info *info, u8 cmd,
780 					       bool always)
781 {
782 	struct sk_buff *skb;
783 	int retval;
784 
785 	skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
786 				      always);
787 	if (IS_ERR_OR_NULL(skb))
788 		return skb;
789 
790 	retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
791 					info->snd_portid, info->snd_seq, 0,
792 					cmd);
793 	BUG_ON(retval < 0);
794 	return skb;
795 }
796 
797 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
798 {
799 	struct nlattr **a = info->attrs;
800 	struct ovs_header *ovs_header = info->userhdr;
801 	struct sw_flow *flow, *new_flow;
802 	struct sw_flow_mask mask;
803 	struct sk_buff *reply;
804 	struct datapath *dp;
805 	struct sw_flow_actions *acts;
806 	struct sw_flow_match match;
807 	int error;
808 
809 	/* Must have key and actions. */
810 	error = -EINVAL;
811 	if (!a[OVS_FLOW_ATTR_KEY])
812 		goto error;
813 	if (!a[OVS_FLOW_ATTR_ACTIONS])
814 		goto error;
815 
816 	/* Most of the time we need to allocate a new flow, do it before
817 	 * locking.
818 	 */
819 	new_flow = ovs_flow_alloc();
820 	if (IS_ERR(new_flow)) {
821 		error = PTR_ERR(new_flow);
822 		goto error;
823 	}
824 
825 	/* Extract key. */
826 	ovs_match_init(&match, &new_flow->unmasked_key, &mask);
827 	error = ovs_nla_get_match(&match,
828 				  a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
829 	if (error)
830 		goto err_kfree_flow;
831 
832 	ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
833 
834 	/* Validate actions. */
835 	acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
836 	error = PTR_ERR(acts);
837 	if (IS_ERR(acts))
838 		goto err_kfree_flow;
839 
840 	error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
841 				     0, &acts);
842 	if (error) {
843 		OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
844 		goto err_kfree_acts;
845 	}
846 
847 	reply = ovs_flow_cmd_alloc_info(acts, info, false);
848 	if (IS_ERR(reply)) {
849 		error = PTR_ERR(reply);
850 		goto err_kfree_acts;
851 	}
852 
853 	ovs_lock();
854 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
855 	if (unlikely(!dp)) {
856 		error = -ENODEV;
857 		goto err_unlock_ovs;
858 	}
859 	/* Check if this is a duplicate flow */
860 	flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
861 	if (likely(!flow)) {
862 		rcu_assign_pointer(new_flow->sf_acts, acts);
863 
864 		/* Put flow in bucket. */
865 		error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
866 		if (unlikely(error)) {
867 			acts = NULL;
868 			goto err_unlock_ovs;
869 		}
870 
871 		if (unlikely(reply)) {
872 			error = ovs_flow_cmd_fill_info(new_flow,
873 						       ovs_header->dp_ifindex,
874 						       reply, info->snd_portid,
875 						       info->snd_seq, 0,
876 						       OVS_FLOW_CMD_NEW);
877 			BUG_ON(error < 0);
878 		}
879 		ovs_unlock();
880 	} else {
881 		struct sw_flow_actions *old_acts;
882 
883 		/* Bail out if we're not allowed to modify an existing flow.
884 		 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
885 		 * because Generic Netlink treats the latter as a dump
886 		 * request.  We also accept NLM_F_EXCL in case that bug ever
887 		 * gets fixed.
888 		 */
889 		if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
890 							 | NLM_F_EXCL))) {
891 			error = -EEXIST;
892 			goto err_unlock_ovs;
893 		}
894 		/* The unmasked key has to be the same for flow updates. */
895 		if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
896 			flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
897 			if (!flow) {
898 				error = -ENOENT;
899 				goto err_unlock_ovs;
900 			}
901 		}
902 		/* Update actions. */
903 		old_acts = ovsl_dereference(flow->sf_acts);
904 		rcu_assign_pointer(flow->sf_acts, acts);
905 
906 		if (unlikely(reply)) {
907 			error = ovs_flow_cmd_fill_info(flow,
908 						       ovs_header->dp_ifindex,
909 						       reply, info->snd_portid,
910 						       info->snd_seq, 0,
911 						       OVS_FLOW_CMD_NEW);
912 			BUG_ON(error < 0);
913 		}
914 		ovs_unlock();
915 
916 		ovs_nla_free_flow_actions(old_acts);
917 		ovs_flow_free(new_flow, false);
918 	}
919 
920 	if (reply)
921 		ovs_notify(&dp_flow_genl_family, reply, info);
922 	return 0;
923 
924 err_unlock_ovs:
925 	ovs_unlock();
926 	kfree_skb(reply);
927 err_kfree_acts:
928 	kfree(acts);
929 err_kfree_flow:
930 	ovs_flow_free(new_flow, false);
931 error:
932 	return error;
933 }
934 
935 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
936 {
937 	struct nlattr **a = info->attrs;
938 	struct ovs_header *ovs_header = info->userhdr;
939 	struct sw_flow_key key, masked_key;
940 	struct sw_flow *flow;
941 	struct sw_flow_mask mask;
942 	struct sk_buff *reply = NULL;
943 	struct datapath *dp;
944 	struct sw_flow_actions *old_acts = NULL, *acts = NULL;
945 	struct sw_flow_match match;
946 	int error;
947 
948 	/* Extract key. */
949 	error = -EINVAL;
950 	if (!a[OVS_FLOW_ATTR_KEY])
951 		goto error;
952 
953 	ovs_match_init(&match, &key, &mask);
954 	error = ovs_nla_get_match(&match,
955 				  a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
956 	if (error)
957 		goto error;
958 
959 	/* Validate actions. */
960 	if (a[OVS_FLOW_ATTR_ACTIONS]) {
961 		acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
962 		error = PTR_ERR(acts);
963 		if (IS_ERR(acts))
964 			goto error;
965 
966 		ovs_flow_mask_key(&masked_key, &key, &mask);
967 		error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
968 					     &masked_key, 0, &acts);
969 		if (error) {
970 			OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
971 			goto err_kfree_acts;
972 		}
973 	}
974 
975 	/* Can allocate before locking if have acts. */
976 	if (acts) {
977 		reply = ovs_flow_cmd_alloc_info(acts, info, false);
978 		if (IS_ERR(reply)) {
979 			error = PTR_ERR(reply);
980 			goto err_kfree_acts;
981 		}
982 	}
983 
984 	ovs_lock();
985 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
986 	if (unlikely(!dp)) {
987 		error = -ENODEV;
988 		goto err_unlock_ovs;
989 	}
990 	/* Check that the flow exists. */
991 	flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
992 	if (unlikely(!flow)) {
993 		error = -ENOENT;
994 		goto err_unlock_ovs;
995 	}
996 
997 	/* Update actions, if present. */
998 	if (likely(acts)) {
999 		old_acts = ovsl_dereference(flow->sf_acts);
1000 		rcu_assign_pointer(flow->sf_acts, acts);
1001 
1002 		if (unlikely(reply)) {
1003 			error = ovs_flow_cmd_fill_info(flow,
1004 						       ovs_header->dp_ifindex,
1005 						       reply, info->snd_portid,
1006 						       info->snd_seq, 0,
1007 						       OVS_FLOW_CMD_NEW);
1008 			BUG_ON(error < 0);
1009 		}
1010 	} else {
1011 		/* Could not alloc without acts before locking. */
1012 		reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1013 						info, OVS_FLOW_CMD_NEW, false);
1014 		if (unlikely(IS_ERR(reply))) {
1015 			error = PTR_ERR(reply);
1016 			goto err_unlock_ovs;
1017 		}
1018 	}
1019 
1020 	/* Clear stats. */
1021 	if (a[OVS_FLOW_ATTR_CLEAR])
1022 		ovs_flow_stats_clear(flow);
1023 	ovs_unlock();
1024 
1025 	if (reply)
1026 		ovs_notify(&dp_flow_genl_family, reply, info);
1027 	if (old_acts)
1028 		ovs_nla_free_flow_actions(old_acts);
1029 
1030 	return 0;
1031 
1032 err_unlock_ovs:
1033 	ovs_unlock();
1034 	kfree_skb(reply);
1035 err_kfree_acts:
1036 	kfree(acts);
1037 error:
1038 	return error;
1039 }
1040 
1041 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1042 {
1043 	struct nlattr **a = info->attrs;
1044 	struct ovs_header *ovs_header = info->userhdr;
1045 	struct sw_flow_key key;
1046 	struct sk_buff *reply;
1047 	struct sw_flow *flow;
1048 	struct datapath *dp;
1049 	struct sw_flow_match match;
1050 	int err;
1051 
1052 	if (!a[OVS_FLOW_ATTR_KEY]) {
1053 		OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
1054 		return -EINVAL;
1055 	}
1056 
1057 	ovs_match_init(&match, &key, NULL);
1058 	err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1059 	if (err)
1060 		return err;
1061 
1062 	ovs_lock();
1063 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1064 	if (!dp) {
1065 		err = -ENODEV;
1066 		goto unlock;
1067 	}
1068 
1069 	flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1070 	if (!flow) {
1071 		err = -ENOENT;
1072 		goto unlock;
1073 	}
1074 
1075 	reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1076 					OVS_FLOW_CMD_NEW, true);
1077 	if (IS_ERR(reply)) {
1078 		err = PTR_ERR(reply);
1079 		goto unlock;
1080 	}
1081 
1082 	ovs_unlock();
1083 	return genlmsg_reply(reply, info);
1084 unlock:
1085 	ovs_unlock();
1086 	return err;
1087 }
1088 
1089 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1090 {
1091 	struct nlattr **a = info->attrs;
1092 	struct ovs_header *ovs_header = info->userhdr;
1093 	struct sw_flow_key key;
1094 	struct sk_buff *reply;
1095 	struct sw_flow *flow;
1096 	struct datapath *dp;
1097 	struct sw_flow_match match;
1098 	int err;
1099 
1100 	if (likely(a[OVS_FLOW_ATTR_KEY])) {
1101 		ovs_match_init(&match, &key, NULL);
1102 		err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1103 		if (unlikely(err))
1104 			return err;
1105 	}
1106 
1107 	ovs_lock();
1108 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1109 	if (unlikely(!dp)) {
1110 		err = -ENODEV;
1111 		goto unlock;
1112 	}
1113 
1114 	if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
1115 		err = ovs_flow_tbl_flush(&dp->table);
1116 		goto unlock;
1117 	}
1118 
1119 	flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1120 	if (unlikely(!flow)) {
1121 		err = -ENOENT;
1122 		goto unlock;
1123 	}
1124 
1125 	ovs_flow_tbl_remove(&dp->table, flow);
1126 	ovs_unlock();
1127 
1128 	reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1129 					info, false);
1130 	if (likely(reply)) {
1131 		if (likely(!IS_ERR(reply))) {
1132 			rcu_read_lock();	/*To keep RCU checker happy. */
1133 			err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1134 						     reply, info->snd_portid,
1135 						     info->snd_seq, 0,
1136 						     OVS_FLOW_CMD_DEL);
1137 			rcu_read_unlock();
1138 			BUG_ON(err < 0);
1139 
1140 			ovs_notify(&dp_flow_genl_family, reply, info);
1141 		} else {
1142 			netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1143 		}
1144 	}
1145 
1146 	ovs_flow_free(flow, true);
1147 	return 0;
1148 unlock:
1149 	ovs_unlock();
1150 	return err;
1151 }
1152 
1153 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1154 {
1155 	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1156 	struct table_instance *ti;
1157 	struct datapath *dp;
1158 
1159 	rcu_read_lock();
1160 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1161 	if (!dp) {
1162 		rcu_read_unlock();
1163 		return -ENODEV;
1164 	}
1165 
1166 	ti = rcu_dereference(dp->table.ti);
1167 	for (;;) {
1168 		struct sw_flow *flow;
1169 		u32 bucket, obj;
1170 
1171 		bucket = cb->args[0];
1172 		obj = cb->args[1];
1173 		flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1174 		if (!flow)
1175 			break;
1176 
1177 		if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1178 					   NETLINK_CB(cb->skb).portid,
1179 					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
1180 					   OVS_FLOW_CMD_NEW) < 0)
1181 			break;
1182 
1183 		cb->args[0] = bucket;
1184 		cb->args[1] = obj;
1185 	}
1186 	rcu_read_unlock();
1187 	return skb->len;
1188 }
1189 
1190 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1191 	[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1192 	[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1193 	[OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1194 };
1195 
1196 static const struct genl_ops dp_flow_genl_ops[] = {
1197 	{ .cmd = OVS_FLOW_CMD_NEW,
1198 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1199 	  .policy = flow_policy,
1200 	  .doit = ovs_flow_cmd_new
1201 	},
1202 	{ .cmd = OVS_FLOW_CMD_DEL,
1203 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1204 	  .policy = flow_policy,
1205 	  .doit = ovs_flow_cmd_del
1206 	},
1207 	{ .cmd = OVS_FLOW_CMD_GET,
1208 	  .flags = 0,		    /* OK for unprivileged users. */
1209 	  .policy = flow_policy,
1210 	  .doit = ovs_flow_cmd_get,
1211 	  .dumpit = ovs_flow_cmd_dump
1212 	},
1213 	{ .cmd = OVS_FLOW_CMD_SET,
1214 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1215 	  .policy = flow_policy,
1216 	  .doit = ovs_flow_cmd_set,
1217 	},
1218 };
1219 
1220 static struct genl_family dp_flow_genl_family = {
1221 	.id = GENL_ID_GENERATE,
1222 	.hdrsize = sizeof(struct ovs_header),
1223 	.name = OVS_FLOW_FAMILY,
1224 	.version = OVS_FLOW_VERSION,
1225 	.maxattr = OVS_FLOW_ATTR_MAX,
1226 	.netnsok = true,
1227 	.parallel_ops = true,
1228 	.ops = dp_flow_genl_ops,
1229 	.n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1230 	.mcgrps = &ovs_dp_flow_multicast_group,
1231 	.n_mcgrps = 1,
1232 };
1233 
1234 static size_t ovs_dp_cmd_msg_size(void)
1235 {
1236 	size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1237 
1238 	msgsize += nla_total_size(IFNAMSIZ);
1239 	msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1240 	msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1241 	msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1242 
1243 	return msgsize;
1244 }
1245 
1246 /* Called with ovs_mutex or RCU read lock. */
1247 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1248 				u32 portid, u32 seq, u32 flags, u8 cmd)
1249 {
1250 	struct ovs_header *ovs_header;
1251 	struct ovs_dp_stats dp_stats;
1252 	struct ovs_dp_megaflow_stats dp_megaflow_stats;
1253 	int err;
1254 
1255 	ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1256 				   flags, cmd);
1257 	if (!ovs_header)
1258 		goto error;
1259 
1260 	ovs_header->dp_ifindex = get_dpifindex(dp);
1261 
1262 	err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1263 	if (err)
1264 		goto nla_put_failure;
1265 
1266 	get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1267 	if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1268 			&dp_stats))
1269 		goto nla_put_failure;
1270 
1271 	if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1272 			sizeof(struct ovs_dp_megaflow_stats),
1273 			&dp_megaflow_stats))
1274 		goto nla_put_failure;
1275 
1276 	if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1277 		goto nla_put_failure;
1278 
1279 	return genlmsg_end(skb, ovs_header);
1280 
1281 nla_put_failure:
1282 	genlmsg_cancel(skb, ovs_header);
1283 error:
1284 	return -EMSGSIZE;
1285 }
1286 
1287 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1288 {
1289 	return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1290 }
1291 
1292 /* Called with rcu_read_lock or ovs_mutex. */
1293 static struct datapath *lookup_datapath(struct net *net,
1294 					struct ovs_header *ovs_header,
1295 					struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1296 {
1297 	struct datapath *dp;
1298 
1299 	if (!a[OVS_DP_ATTR_NAME])
1300 		dp = get_dp(net, ovs_header->dp_ifindex);
1301 	else {
1302 		struct vport *vport;
1303 
1304 		vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1305 		dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1306 	}
1307 	return dp ? dp : ERR_PTR(-ENODEV);
1308 }
1309 
1310 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1311 {
1312 	struct datapath *dp;
1313 
1314 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1315 	if (IS_ERR(dp))
1316 		return;
1317 
1318 	WARN(dp->user_features, "Dropping previously announced user features\n");
1319 	dp->user_features = 0;
1320 }
1321 
1322 static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
1323 {
1324 	if (a[OVS_DP_ATTR_USER_FEATURES])
1325 		dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1326 }
1327 
1328 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1329 {
1330 	struct nlattr **a = info->attrs;
1331 	struct vport_parms parms;
1332 	struct sk_buff *reply;
1333 	struct datapath *dp;
1334 	struct vport *vport;
1335 	struct ovs_net *ovs_net;
1336 	int err, i;
1337 
1338 	err = -EINVAL;
1339 	if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1340 		goto err;
1341 
1342 	reply = ovs_dp_cmd_alloc_info(info);
1343 	if (!reply)
1344 		return -ENOMEM;
1345 
1346 	err = -ENOMEM;
1347 	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1348 	if (dp == NULL)
1349 		goto err_free_reply;
1350 
1351 	ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1352 
1353 	/* Allocate table. */
1354 	err = ovs_flow_tbl_init(&dp->table);
1355 	if (err)
1356 		goto err_free_dp;
1357 
1358 	dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1359 	if (!dp->stats_percpu) {
1360 		err = -ENOMEM;
1361 		goto err_destroy_table;
1362 	}
1363 
1364 	dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1365 			    GFP_KERNEL);
1366 	if (!dp->ports) {
1367 		err = -ENOMEM;
1368 		goto err_destroy_percpu;
1369 	}
1370 
1371 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1372 		INIT_HLIST_HEAD(&dp->ports[i]);
1373 
1374 	/* Set up our datapath device. */
1375 	parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1376 	parms.type = OVS_VPORT_TYPE_INTERNAL;
1377 	parms.options = NULL;
1378 	parms.dp = dp;
1379 	parms.port_no = OVSP_LOCAL;
1380 	parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1381 
1382 	ovs_dp_change(dp, a);
1383 
1384 	/* So far only local changes have been made, now need the lock. */
1385 	ovs_lock();
1386 
1387 	vport = new_vport(&parms);
1388 	if (IS_ERR(vport)) {
1389 		err = PTR_ERR(vport);
1390 		if (err == -EBUSY)
1391 			err = -EEXIST;
1392 
1393 		if (err == -EEXIST) {
1394 			/* An outdated user space instance that does not understand
1395 			 * the concept of user_features has attempted to create a new
1396 			 * datapath and is likely to reuse it. Drop all user features.
1397 			 */
1398 			if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1399 				ovs_dp_reset_user_features(skb, info);
1400 		}
1401 
1402 		goto err_destroy_ports_array;
1403 	}
1404 
1405 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1406 				   info->snd_seq, 0, OVS_DP_CMD_NEW);
1407 	BUG_ON(err < 0);
1408 
1409 	ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1410 	list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1411 
1412 	ovs_unlock();
1413 
1414 	ovs_notify(&dp_datapath_genl_family, reply, info);
1415 	return 0;
1416 
1417 err_destroy_ports_array:
1418 	ovs_unlock();
1419 	kfree(dp->ports);
1420 err_destroy_percpu:
1421 	free_percpu(dp->stats_percpu);
1422 err_destroy_table:
1423 	ovs_flow_tbl_destroy(&dp->table, false);
1424 err_free_dp:
1425 	release_net(ovs_dp_get_net(dp));
1426 	kfree(dp);
1427 err_free_reply:
1428 	kfree_skb(reply);
1429 err:
1430 	return err;
1431 }
1432 
1433 /* Called with ovs_mutex. */
1434 static void __dp_destroy(struct datapath *dp)
1435 {
1436 	int i;
1437 
1438 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1439 		struct vport *vport;
1440 		struct hlist_node *n;
1441 
1442 		hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1443 			if (vport->port_no != OVSP_LOCAL)
1444 				ovs_dp_detach_port(vport);
1445 	}
1446 
1447 	list_del_rcu(&dp->list_node);
1448 
1449 	/* OVSP_LOCAL is datapath internal port. We need to make sure that
1450 	 * all ports in datapath are destroyed first before freeing datapath.
1451 	 */
1452 	ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1453 
1454 	/* RCU destroy the flow table */
1455 	ovs_flow_tbl_destroy(&dp->table, true);
1456 
1457 	call_rcu(&dp->rcu, destroy_dp_rcu);
1458 }
1459 
1460 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1461 {
1462 	struct sk_buff *reply;
1463 	struct datapath *dp;
1464 	int err;
1465 
1466 	reply = ovs_dp_cmd_alloc_info(info);
1467 	if (!reply)
1468 		return -ENOMEM;
1469 
1470 	ovs_lock();
1471 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1472 	err = PTR_ERR(dp);
1473 	if (IS_ERR(dp))
1474 		goto err_unlock_free;
1475 
1476 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1477 				   info->snd_seq, 0, OVS_DP_CMD_DEL);
1478 	BUG_ON(err < 0);
1479 
1480 	__dp_destroy(dp);
1481 	ovs_unlock();
1482 
1483 	ovs_notify(&dp_datapath_genl_family, reply, info);
1484 
1485 	return 0;
1486 
1487 err_unlock_free:
1488 	ovs_unlock();
1489 	kfree_skb(reply);
1490 	return err;
1491 }
1492 
1493 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1494 {
1495 	struct sk_buff *reply;
1496 	struct datapath *dp;
1497 	int err;
1498 
1499 	reply = ovs_dp_cmd_alloc_info(info);
1500 	if (!reply)
1501 		return -ENOMEM;
1502 
1503 	ovs_lock();
1504 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1505 	err = PTR_ERR(dp);
1506 	if (IS_ERR(dp))
1507 		goto err_unlock_free;
1508 
1509 	ovs_dp_change(dp, info->attrs);
1510 
1511 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1512 				   info->snd_seq, 0, OVS_DP_CMD_NEW);
1513 	BUG_ON(err < 0);
1514 
1515 	ovs_unlock();
1516 	ovs_notify(&dp_datapath_genl_family, reply, info);
1517 
1518 	return 0;
1519 
1520 err_unlock_free:
1521 	ovs_unlock();
1522 	kfree_skb(reply);
1523 	return err;
1524 }
1525 
1526 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1527 {
1528 	struct sk_buff *reply;
1529 	struct datapath *dp;
1530 	int err;
1531 
1532 	reply = ovs_dp_cmd_alloc_info(info);
1533 	if (!reply)
1534 		return -ENOMEM;
1535 
1536 	rcu_read_lock();
1537 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1538 	if (IS_ERR(dp)) {
1539 		err = PTR_ERR(dp);
1540 		goto err_unlock_free;
1541 	}
1542 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1543 				   info->snd_seq, 0, OVS_DP_CMD_NEW);
1544 	BUG_ON(err < 0);
1545 	rcu_read_unlock();
1546 
1547 	return genlmsg_reply(reply, info);
1548 
1549 err_unlock_free:
1550 	rcu_read_unlock();
1551 	kfree_skb(reply);
1552 	return err;
1553 }
1554 
1555 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1556 {
1557 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1558 	struct datapath *dp;
1559 	int skip = cb->args[0];
1560 	int i = 0;
1561 
1562 	rcu_read_lock();
1563 	list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
1564 		if (i >= skip &&
1565 		    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1566 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1567 					 OVS_DP_CMD_NEW) < 0)
1568 			break;
1569 		i++;
1570 	}
1571 	rcu_read_unlock();
1572 
1573 	cb->args[0] = i;
1574 
1575 	return skb->len;
1576 }
1577 
1578 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1579 	[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1580 	[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1581 	[OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1582 };
1583 
1584 static const struct genl_ops dp_datapath_genl_ops[] = {
1585 	{ .cmd = OVS_DP_CMD_NEW,
1586 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1587 	  .policy = datapath_policy,
1588 	  .doit = ovs_dp_cmd_new
1589 	},
1590 	{ .cmd = OVS_DP_CMD_DEL,
1591 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1592 	  .policy = datapath_policy,
1593 	  .doit = ovs_dp_cmd_del
1594 	},
1595 	{ .cmd = OVS_DP_CMD_GET,
1596 	  .flags = 0,		    /* OK for unprivileged users. */
1597 	  .policy = datapath_policy,
1598 	  .doit = ovs_dp_cmd_get,
1599 	  .dumpit = ovs_dp_cmd_dump
1600 	},
1601 	{ .cmd = OVS_DP_CMD_SET,
1602 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1603 	  .policy = datapath_policy,
1604 	  .doit = ovs_dp_cmd_set,
1605 	},
1606 };
1607 
1608 static struct genl_family dp_datapath_genl_family = {
1609 	.id = GENL_ID_GENERATE,
1610 	.hdrsize = sizeof(struct ovs_header),
1611 	.name = OVS_DATAPATH_FAMILY,
1612 	.version = OVS_DATAPATH_VERSION,
1613 	.maxattr = OVS_DP_ATTR_MAX,
1614 	.netnsok = true,
1615 	.parallel_ops = true,
1616 	.ops = dp_datapath_genl_ops,
1617 	.n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1618 	.mcgrps = &ovs_dp_datapath_multicast_group,
1619 	.n_mcgrps = 1,
1620 };
1621 
1622 /* Called with ovs_mutex or RCU read lock. */
1623 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1624 				   u32 portid, u32 seq, u32 flags, u8 cmd)
1625 {
1626 	struct ovs_header *ovs_header;
1627 	struct ovs_vport_stats vport_stats;
1628 	int err;
1629 
1630 	ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1631 				 flags, cmd);
1632 	if (!ovs_header)
1633 		return -EMSGSIZE;
1634 
1635 	ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1636 
1637 	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1638 	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1639 	    nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1640 			   vport->ops->get_name(vport)))
1641 		goto nla_put_failure;
1642 
1643 	ovs_vport_get_stats(vport, &vport_stats);
1644 	if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1645 		    &vport_stats))
1646 		goto nla_put_failure;
1647 
1648 	if (ovs_vport_get_upcall_portids(vport, skb))
1649 		goto nla_put_failure;
1650 
1651 	err = ovs_vport_get_options(vport, skb);
1652 	if (err == -EMSGSIZE)
1653 		goto error;
1654 
1655 	return genlmsg_end(skb, ovs_header);
1656 
1657 nla_put_failure:
1658 	err = -EMSGSIZE;
1659 error:
1660 	genlmsg_cancel(skb, ovs_header);
1661 	return err;
1662 }
1663 
1664 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1665 {
1666 	return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1667 }
1668 
1669 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1670 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1671 					 u32 seq, u8 cmd)
1672 {
1673 	struct sk_buff *skb;
1674 	int retval;
1675 
1676 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1677 	if (!skb)
1678 		return ERR_PTR(-ENOMEM);
1679 
1680 	retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1681 	BUG_ON(retval < 0);
1682 
1683 	return skb;
1684 }
1685 
1686 /* Called with ovs_mutex or RCU read lock. */
1687 static struct vport *lookup_vport(struct net *net,
1688 				  struct ovs_header *ovs_header,
1689 				  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1690 {
1691 	struct datapath *dp;
1692 	struct vport *vport;
1693 
1694 	if (a[OVS_VPORT_ATTR_NAME]) {
1695 		vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1696 		if (!vport)
1697 			return ERR_PTR(-ENODEV);
1698 		if (ovs_header->dp_ifindex &&
1699 		    ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1700 			return ERR_PTR(-ENODEV);
1701 		return vport;
1702 	} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1703 		u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1704 
1705 		if (port_no >= DP_MAX_PORTS)
1706 			return ERR_PTR(-EFBIG);
1707 
1708 		dp = get_dp(net, ovs_header->dp_ifindex);
1709 		if (!dp)
1710 			return ERR_PTR(-ENODEV);
1711 
1712 		vport = ovs_vport_ovsl_rcu(dp, port_no);
1713 		if (!vport)
1714 			return ERR_PTR(-ENODEV);
1715 		return vport;
1716 	} else
1717 		return ERR_PTR(-EINVAL);
1718 }
1719 
1720 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1721 {
1722 	struct nlattr **a = info->attrs;
1723 	struct ovs_header *ovs_header = info->userhdr;
1724 	struct vport_parms parms;
1725 	struct sk_buff *reply;
1726 	struct vport *vport;
1727 	struct datapath *dp;
1728 	u32 port_no;
1729 	int err;
1730 
1731 	if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1732 	    !a[OVS_VPORT_ATTR_UPCALL_PID])
1733 		return -EINVAL;
1734 
1735 	port_no = a[OVS_VPORT_ATTR_PORT_NO]
1736 		? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1737 	if (port_no >= DP_MAX_PORTS)
1738 		return -EFBIG;
1739 
1740 	reply = ovs_vport_cmd_alloc_info();
1741 	if (!reply)
1742 		return -ENOMEM;
1743 
1744 	ovs_lock();
1745 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1746 	err = -ENODEV;
1747 	if (!dp)
1748 		goto exit_unlock_free;
1749 
1750 	if (port_no) {
1751 		vport = ovs_vport_ovsl(dp, port_no);
1752 		err = -EBUSY;
1753 		if (vport)
1754 			goto exit_unlock_free;
1755 	} else {
1756 		for (port_no = 1; ; port_no++) {
1757 			if (port_no >= DP_MAX_PORTS) {
1758 				err = -EFBIG;
1759 				goto exit_unlock_free;
1760 			}
1761 			vport = ovs_vport_ovsl(dp, port_no);
1762 			if (!vport)
1763 				break;
1764 		}
1765 	}
1766 
1767 	parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1768 	parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1769 	parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1770 	parms.dp = dp;
1771 	parms.port_no = port_no;
1772 	parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1773 
1774 	vport = new_vport(&parms);
1775 	err = PTR_ERR(vport);
1776 	if (IS_ERR(vport))
1777 		goto exit_unlock_free;
1778 
1779 	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1780 				      info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1781 	BUG_ON(err < 0);
1782 	ovs_unlock();
1783 
1784 	ovs_notify(&dp_vport_genl_family, reply, info);
1785 	return 0;
1786 
1787 exit_unlock_free:
1788 	ovs_unlock();
1789 	kfree_skb(reply);
1790 	return err;
1791 }
1792 
1793 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1794 {
1795 	struct nlattr **a = info->attrs;
1796 	struct sk_buff *reply;
1797 	struct vport *vport;
1798 	int err;
1799 
1800 	reply = ovs_vport_cmd_alloc_info();
1801 	if (!reply)
1802 		return -ENOMEM;
1803 
1804 	ovs_lock();
1805 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1806 	err = PTR_ERR(vport);
1807 	if (IS_ERR(vport))
1808 		goto exit_unlock_free;
1809 
1810 	if (a[OVS_VPORT_ATTR_TYPE] &&
1811 	    nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1812 		err = -EINVAL;
1813 		goto exit_unlock_free;
1814 	}
1815 
1816 	if (a[OVS_VPORT_ATTR_OPTIONS]) {
1817 		err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1818 		if (err)
1819 			goto exit_unlock_free;
1820 	}
1821 
1822 
1823 	if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
1824 		struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
1825 
1826 		err = ovs_vport_set_upcall_portids(vport, ids);
1827 		if (err)
1828 			goto exit_unlock_free;
1829 	}
1830 
1831 	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1832 				      info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1833 	BUG_ON(err < 0);
1834 
1835 	ovs_unlock();
1836 	ovs_notify(&dp_vport_genl_family, reply, info);
1837 	return 0;
1838 
1839 exit_unlock_free:
1840 	ovs_unlock();
1841 	kfree_skb(reply);
1842 	return err;
1843 }
1844 
1845 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1846 {
1847 	struct nlattr **a = info->attrs;
1848 	struct sk_buff *reply;
1849 	struct vport *vport;
1850 	int err;
1851 
1852 	reply = ovs_vport_cmd_alloc_info();
1853 	if (!reply)
1854 		return -ENOMEM;
1855 
1856 	ovs_lock();
1857 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1858 	err = PTR_ERR(vport);
1859 	if (IS_ERR(vport))
1860 		goto exit_unlock_free;
1861 
1862 	if (vport->port_no == OVSP_LOCAL) {
1863 		err = -EINVAL;
1864 		goto exit_unlock_free;
1865 	}
1866 
1867 	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1868 				      info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1869 	BUG_ON(err < 0);
1870 	ovs_dp_detach_port(vport);
1871 	ovs_unlock();
1872 
1873 	ovs_notify(&dp_vport_genl_family, reply, info);
1874 	return 0;
1875 
1876 exit_unlock_free:
1877 	ovs_unlock();
1878 	kfree_skb(reply);
1879 	return err;
1880 }
1881 
1882 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1883 {
1884 	struct nlattr **a = info->attrs;
1885 	struct ovs_header *ovs_header = info->userhdr;
1886 	struct sk_buff *reply;
1887 	struct vport *vport;
1888 	int err;
1889 
1890 	reply = ovs_vport_cmd_alloc_info();
1891 	if (!reply)
1892 		return -ENOMEM;
1893 
1894 	rcu_read_lock();
1895 	vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1896 	err = PTR_ERR(vport);
1897 	if (IS_ERR(vport))
1898 		goto exit_unlock_free;
1899 	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1900 				      info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1901 	BUG_ON(err < 0);
1902 	rcu_read_unlock();
1903 
1904 	return genlmsg_reply(reply, info);
1905 
1906 exit_unlock_free:
1907 	rcu_read_unlock();
1908 	kfree_skb(reply);
1909 	return err;
1910 }
1911 
1912 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1913 {
1914 	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1915 	struct datapath *dp;
1916 	int bucket = cb->args[0], skip = cb->args[1];
1917 	int i, j = 0;
1918 
1919 	rcu_read_lock();
1920 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1921 	if (!dp) {
1922 		rcu_read_unlock();
1923 		return -ENODEV;
1924 	}
1925 	for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1926 		struct vport *vport;
1927 
1928 		j = 0;
1929 		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1930 			if (j >= skip &&
1931 			    ovs_vport_cmd_fill_info(vport, skb,
1932 						    NETLINK_CB(cb->skb).portid,
1933 						    cb->nlh->nlmsg_seq,
1934 						    NLM_F_MULTI,
1935 						    OVS_VPORT_CMD_NEW) < 0)
1936 				goto out;
1937 
1938 			j++;
1939 		}
1940 		skip = 0;
1941 	}
1942 out:
1943 	rcu_read_unlock();
1944 
1945 	cb->args[0] = i;
1946 	cb->args[1] = j;
1947 
1948 	return skb->len;
1949 }
1950 
1951 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1952 	[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1953 	[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1954 	[OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1955 	[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1956 	[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1957 	[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1958 };
1959 
1960 static const struct genl_ops dp_vport_genl_ops[] = {
1961 	{ .cmd = OVS_VPORT_CMD_NEW,
1962 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1963 	  .policy = vport_policy,
1964 	  .doit = ovs_vport_cmd_new
1965 	},
1966 	{ .cmd = OVS_VPORT_CMD_DEL,
1967 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1968 	  .policy = vport_policy,
1969 	  .doit = ovs_vport_cmd_del
1970 	},
1971 	{ .cmd = OVS_VPORT_CMD_GET,
1972 	  .flags = 0,		    /* OK for unprivileged users. */
1973 	  .policy = vport_policy,
1974 	  .doit = ovs_vport_cmd_get,
1975 	  .dumpit = ovs_vport_cmd_dump
1976 	},
1977 	{ .cmd = OVS_VPORT_CMD_SET,
1978 	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1979 	  .policy = vport_policy,
1980 	  .doit = ovs_vport_cmd_set,
1981 	},
1982 };
1983 
1984 struct genl_family dp_vport_genl_family = {
1985 	.id = GENL_ID_GENERATE,
1986 	.hdrsize = sizeof(struct ovs_header),
1987 	.name = OVS_VPORT_FAMILY,
1988 	.version = OVS_VPORT_VERSION,
1989 	.maxattr = OVS_VPORT_ATTR_MAX,
1990 	.netnsok = true,
1991 	.parallel_ops = true,
1992 	.ops = dp_vport_genl_ops,
1993 	.n_ops = ARRAY_SIZE(dp_vport_genl_ops),
1994 	.mcgrps = &ovs_dp_vport_multicast_group,
1995 	.n_mcgrps = 1,
1996 };
1997 
1998 static struct genl_family * const dp_genl_families[] = {
1999 	&dp_datapath_genl_family,
2000 	&dp_vport_genl_family,
2001 	&dp_flow_genl_family,
2002 	&dp_packet_genl_family,
2003 };
2004 
2005 static void dp_unregister_genl(int n_families)
2006 {
2007 	int i;
2008 
2009 	for (i = 0; i < n_families; i++)
2010 		genl_unregister_family(dp_genl_families[i]);
2011 }
2012 
2013 static int dp_register_genl(void)
2014 {
2015 	int err;
2016 	int i;
2017 
2018 	for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2019 
2020 		err = genl_register_family(dp_genl_families[i]);
2021 		if (err)
2022 			goto error;
2023 	}
2024 
2025 	return 0;
2026 
2027 error:
2028 	dp_unregister_genl(i);
2029 	return err;
2030 }
2031 
2032 static int __net_init ovs_init_net(struct net *net)
2033 {
2034 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2035 
2036 	INIT_LIST_HEAD(&ovs_net->dps);
2037 	INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2038 	return 0;
2039 }
2040 
2041 static void __net_exit ovs_exit_net(struct net *net)
2042 {
2043 	struct datapath *dp, *dp_next;
2044 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2045 
2046 	ovs_lock();
2047 	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2048 		__dp_destroy(dp);
2049 	ovs_unlock();
2050 
2051 	cancel_work_sync(&ovs_net->dp_notify_work);
2052 }
2053 
2054 static struct pernet_operations ovs_net_ops = {
2055 	.init = ovs_init_net,
2056 	.exit = ovs_exit_net,
2057 	.id   = &ovs_net_id,
2058 	.size = sizeof(struct ovs_net),
2059 };
2060 
2061 static int __init dp_init(void)
2062 {
2063 	int err;
2064 
2065 	BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2066 
2067 	pr_info("Open vSwitch switching datapath\n");
2068 
2069 	err = ovs_internal_dev_rtnl_link_register();
2070 	if (err)
2071 		goto error;
2072 
2073 	err = ovs_flow_init();
2074 	if (err)
2075 		goto error_unreg_rtnl_link;
2076 
2077 	err = ovs_vport_init();
2078 	if (err)
2079 		goto error_flow_exit;
2080 
2081 	err = register_pernet_device(&ovs_net_ops);
2082 	if (err)
2083 		goto error_vport_exit;
2084 
2085 	err = register_netdevice_notifier(&ovs_dp_device_notifier);
2086 	if (err)
2087 		goto error_netns_exit;
2088 
2089 	err = dp_register_genl();
2090 	if (err < 0)
2091 		goto error_unreg_notifier;
2092 
2093 	return 0;
2094 
2095 error_unreg_notifier:
2096 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
2097 error_netns_exit:
2098 	unregister_pernet_device(&ovs_net_ops);
2099 error_vport_exit:
2100 	ovs_vport_exit();
2101 error_flow_exit:
2102 	ovs_flow_exit();
2103 error_unreg_rtnl_link:
2104 	ovs_internal_dev_rtnl_link_unregister();
2105 error:
2106 	return err;
2107 }
2108 
2109 static void dp_cleanup(void)
2110 {
2111 	dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2112 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
2113 	unregister_pernet_device(&ovs_net_ops);
2114 	rcu_barrier();
2115 	ovs_vport_exit();
2116 	ovs_flow_exit();
2117 	ovs_internal_dev_rtnl_link_unregister();
2118 }
2119 
2120 module_init(dp_init);
2121 module_exit(dp_cleanup);
2122 
2123 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2124 MODULE_LICENSE("GPL");
2125