1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2007-2014 Nicira, Inc. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/init.h> 9 #include <linux/module.h> 10 #include <linux/if_arp.h> 11 #include <linux/if_vlan.h> 12 #include <linux/in.h> 13 #include <linux/ip.h> 14 #include <linux/jhash.h> 15 #include <linux/delay.h> 16 #include <linux/time.h> 17 #include <linux/etherdevice.h> 18 #include <linux/genetlink.h> 19 #include <linux/kernel.h> 20 #include <linux/kthread.h> 21 #include <linux/mutex.h> 22 #include <linux/percpu.h> 23 #include <linux/rcupdate.h> 24 #include <linux/tcp.h> 25 #include <linux/udp.h> 26 #include <linux/ethtool.h> 27 #include <linux/wait.h> 28 #include <asm/div64.h> 29 #include <linux/highmem.h> 30 #include <linux/netfilter_bridge.h> 31 #include <linux/netfilter_ipv4.h> 32 #include <linux/inetdevice.h> 33 #include <linux/list.h> 34 #include <linux/openvswitch.h> 35 #include <linux/rculist.h> 36 #include <linux/dmi.h> 37 #include <net/genetlink.h> 38 #include <net/net_namespace.h> 39 #include <net/netns/generic.h> 40 #include <net/pkt_cls.h> 41 42 #include "datapath.h" 43 #include "flow.h" 44 #include "flow_table.h" 45 #include "flow_netlink.h" 46 #include "meter.h" 47 #include "openvswitch_trace.h" 48 #include "vport-internal_dev.h" 49 #include "vport-netdev.h" 50 51 unsigned int ovs_net_id __read_mostly; 52 53 static struct genl_family dp_packet_genl_family; 54 static struct genl_family dp_flow_genl_family; 55 static struct genl_family dp_datapath_genl_family; 56 57 static const struct nla_policy flow_policy[]; 58 59 static const struct genl_multicast_group ovs_dp_flow_multicast_group = { 60 .name = OVS_FLOW_MCGROUP, 61 }; 62 63 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = { 64 .name = OVS_DATAPATH_MCGROUP, 65 }; 66 67 static const struct genl_multicast_group ovs_dp_vport_multicast_group = { 68 .name = OVS_VPORT_MCGROUP, 69 }; 70 71 /* Check if need to build a reply message. 72 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */ 73 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info, 74 unsigned int group) 75 { 76 return info->nlhdr->nlmsg_flags & NLM_F_ECHO || 77 genl_has_listeners(family, genl_info_net(info), group); 78 } 79 80 static void ovs_notify(struct genl_family *family, 81 struct sk_buff *skb, struct genl_info *info) 82 { 83 genl_notify(family, skb, info, 0, GFP_KERNEL); 84 } 85 86 /** 87 * DOC: Locking: 88 * 89 * All writes e.g. Writes to device state (add/remove datapath, port, set 90 * operations on vports, etc.), Writes to other state (flow table 91 * modifications, set miscellaneous datapath parameters, etc.) are protected 92 * by ovs_lock. 93 * 94 * Reads are protected by RCU. 95 * 96 * There are a few special cases (mostly stats) that have their own 97 * synchronization but they nest under all of above and don't interact with 98 * each other. 99 * 100 * The RTNL lock nests inside ovs_mutex. 101 */ 102 103 static DEFINE_MUTEX(ovs_mutex); 104 105 void ovs_lock(void) 106 { 107 mutex_lock(&ovs_mutex); 108 } 109 110 void ovs_unlock(void) 111 { 112 mutex_unlock(&ovs_mutex); 113 } 114 115 #ifdef CONFIG_LOCKDEP 116 int lockdep_ovsl_is_held(void) 117 { 118 if (debug_locks) 119 return lockdep_is_held(&ovs_mutex); 120 else 121 return 1; 122 } 123 #endif 124 125 static struct vport *new_vport(const struct vport_parms *); 126 static int queue_gso_packets(struct datapath *dp, struct sk_buff *, 127 const struct sw_flow_key *, 128 const struct dp_upcall_info *, 129 uint32_t cutlen); 130 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, 131 const struct sw_flow_key *, 132 const struct dp_upcall_info *, 133 uint32_t cutlen); 134 135 static void ovs_dp_masks_rebalance(struct work_struct *work); 136 137 static int ovs_dp_set_upcall_portids(struct datapath *, const struct nlattr *); 138 139 /* Must be called with rcu_read_lock or ovs_mutex. */ 140 const char *ovs_dp_name(const struct datapath *dp) 141 { 142 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); 143 return ovs_vport_name(vport); 144 } 145 146 static int get_dpifindex(const struct datapath *dp) 147 { 148 struct vport *local; 149 int ifindex; 150 151 rcu_read_lock(); 152 153 local = ovs_vport_rcu(dp, OVSP_LOCAL); 154 if (local) 155 ifindex = local->dev->ifindex; 156 else 157 ifindex = 0; 158 159 rcu_read_unlock(); 160 161 return ifindex; 162 } 163 164 static void destroy_dp_rcu(struct rcu_head *rcu) 165 { 166 struct datapath *dp = container_of(rcu, struct datapath, rcu); 167 168 ovs_flow_tbl_destroy(&dp->table); 169 free_percpu(dp->stats_percpu); 170 kfree(dp->ports); 171 ovs_meters_exit(dp); 172 kfree(rcu_dereference_raw(dp->upcall_portids)); 173 kfree(dp); 174 } 175 176 static struct hlist_head *vport_hash_bucket(const struct datapath *dp, 177 u16 port_no) 178 { 179 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)]; 180 } 181 182 /* Called with ovs_mutex or RCU read lock. */ 183 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) 184 { 185 struct vport *vport; 186 struct hlist_head *head; 187 188 head = vport_hash_bucket(dp, port_no); 189 hlist_for_each_entry_rcu(vport, head, dp_hash_node, 190 lockdep_ovsl_is_held()) { 191 if (vport->port_no == port_no) 192 return vport; 193 } 194 return NULL; 195 } 196 197 /* Called with ovs_mutex. */ 198 static struct vport *new_vport(const struct vport_parms *parms) 199 { 200 struct vport *vport; 201 202 vport = ovs_vport_add(parms); 203 if (!IS_ERR(vport)) { 204 struct datapath *dp = parms->dp; 205 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no); 206 207 hlist_add_head_rcu(&vport->dp_hash_node, head); 208 } 209 return vport; 210 } 211 212 static void ovs_vport_update_upcall_stats(struct sk_buff *skb, 213 const struct dp_upcall_info *upcall_info, 214 bool upcall_result) 215 { 216 struct vport *p = OVS_CB(skb)->input_vport; 217 struct vport_upcall_stats_percpu *stats; 218 219 if (upcall_info->cmd != OVS_PACKET_CMD_MISS && 220 upcall_info->cmd != OVS_PACKET_CMD_ACTION) 221 return; 222 223 stats = this_cpu_ptr(p->upcall_stats); 224 u64_stats_update_begin(&stats->syncp); 225 if (upcall_result) 226 u64_stats_inc(&stats->n_success); 227 else 228 u64_stats_inc(&stats->n_fail); 229 u64_stats_update_end(&stats->syncp); 230 } 231 232 void ovs_dp_detach_port(struct vport *p) 233 { 234 ASSERT_OVSL(); 235 236 /* First drop references to device. */ 237 hlist_del_rcu(&p->dp_hash_node); 238 239 /* Free percpu memory */ 240 free_percpu(p->upcall_stats); 241 242 /* Then destroy it. */ 243 ovs_vport_del(p); 244 } 245 246 /* Must be called with rcu_read_lock. */ 247 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) 248 { 249 const struct vport *p = OVS_CB(skb)->input_vport; 250 struct datapath *dp = p->dp; 251 struct sw_flow *flow; 252 struct sw_flow_actions *sf_acts; 253 struct dp_stats_percpu *stats; 254 u64 *stats_counter; 255 u32 n_mask_hit; 256 u32 n_cache_hit; 257 int error; 258 259 stats = this_cpu_ptr(dp->stats_percpu); 260 261 /* Look up flow. */ 262 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb), 263 &n_mask_hit, &n_cache_hit); 264 if (unlikely(!flow)) { 265 struct dp_upcall_info upcall; 266 267 memset(&upcall, 0, sizeof(upcall)); 268 upcall.cmd = OVS_PACKET_CMD_MISS; 269 270 if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU) 271 upcall.portid = 272 ovs_dp_get_upcall_portid(dp, smp_processor_id()); 273 else 274 upcall.portid = ovs_vport_find_upcall_portid(p, skb); 275 276 upcall.mru = OVS_CB(skb)->mru; 277 error = ovs_dp_upcall(dp, skb, key, &upcall, 0); 278 switch (error) { 279 case 0: 280 case -EAGAIN: 281 case -ERESTARTSYS: 282 case -EINTR: 283 consume_skb(skb); 284 break; 285 default: 286 kfree_skb(skb); 287 break; 288 } 289 stats_counter = &stats->n_missed; 290 goto out; 291 } 292 293 ovs_flow_stats_update(flow, key->tp.flags, skb); 294 sf_acts = rcu_dereference(flow->sf_acts); 295 error = ovs_execute_actions(dp, skb, sf_acts, key); 296 if (unlikely(error)) 297 net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n", 298 ovs_dp_name(dp), error); 299 300 stats_counter = &stats->n_hit; 301 302 out: 303 /* Update datapath statistics. */ 304 u64_stats_update_begin(&stats->syncp); 305 (*stats_counter)++; 306 stats->n_mask_hit += n_mask_hit; 307 stats->n_cache_hit += n_cache_hit; 308 u64_stats_update_end(&stats->syncp); 309 } 310 311 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, 312 const struct sw_flow_key *key, 313 const struct dp_upcall_info *upcall_info, 314 uint32_t cutlen) 315 { 316 struct dp_stats_percpu *stats; 317 int err; 318 319 if (trace_ovs_dp_upcall_enabled()) 320 trace_ovs_dp_upcall(dp, skb, key, upcall_info); 321 322 if (upcall_info->portid == 0) { 323 err = -ENOTCONN; 324 goto err; 325 } 326 327 if (!skb_is_gso(skb)) 328 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen); 329 else 330 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen); 331 332 ovs_vport_update_upcall_stats(skb, upcall_info, !err); 333 if (err) 334 goto err; 335 336 return 0; 337 338 err: 339 stats = this_cpu_ptr(dp->stats_percpu); 340 341 u64_stats_update_begin(&stats->syncp); 342 stats->n_lost++; 343 u64_stats_update_end(&stats->syncp); 344 345 return err; 346 } 347 348 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, 349 const struct sw_flow_key *key, 350 const struct dp_upcall_info *upcall_info, 351 uint32_t cutlen) 352 { 353 unsigned int gso_type = skb_shinfo(skb)->gso_type; 354 struct sw_flow_key later_key; 355 struct sk_buff *segs, *nskb; 356 int err; 357 358 BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_GSO_CB_OFFSET); 359 segs = __skb_gso_segment(skb, NETIF_F_SG, false); 360 if (IS_ERR(segs)) 361 return PTR_ERR(segs); 362 if (segs == NULL) 363 return -EINVAL; 364 365 if (gso_type & SKB_GSO_UDP) { 366 /* The initial flow key extracted by ovs_flow_key_extract() 367 * in this case is for a first fragment, so we need to 368 * properly mark later fragments. 369 */ 370 later_key = *key; 371 later_key.ip.frag = OVS_FRAG_TYPE_LATER; 372 } 373 374 /* Queue all of the segments. */ 375 skb_list_walk_safe(segs, skb, nskb) { 376 if (gso_type & SKB_GSO_UDP && skb != segs) 377 key = &later_key; 378 379 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen); 380 if (err) 381 break; 382 383 } 384 385 /* Free all of the segments. */ 386 skb_list_walk_safe(segs, skb, nskb) { 387 if (err) 388 kfree_skb(skb); 389 else 390 consume_skb(skb); 391 } 392 return err; 393 } 394 395 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, 396 unsigned int hdrlen, int actions_attrlen) 397 { 398 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) 399 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ 400 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */ 401 + nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */ 402 + nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */ 403 404 /* OVS_PACKET_ATTR_USERDATA */ 405 if (upcall_info->userdata) 406 size += NLA_ALIGN(upcall_info->userdata->nla_len); 407 408 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */ 409 if (upcall_info->egress_tun_info) 410 size += nla_total_size(ovs_tun_key_attr_size()); 411 412 /* OVS_PACKET_ATTR_ACTIONS */ 413 if (upcall_info->actions_len) 414 size += nla_total_size(actions_attrlen); 415 416 /* OVS_PACKET_ATTR_MRU */ 417 if (upcall_info->mru) 418 size += nla_total_size(sizeof(upcall_info->mru)); 419 420 return size; 421 } 422 423 static void pad_packet(struct datapath *dp, struct sk_buff *skb) 424 { 425 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { 426 size_t plen = NLA_ALIGN(skb->len) - skb->len; 427 428 if (plen > 0) 429 skb_put_zero(skb, plen); 430 } 431 } 432 433 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, 434 const struct sw_flow_key *key, 435 const struct dp_upcall_info *upcall_info, 436 uint32_t cutlen) 437 { 438 struct ovs_header *upcall; 439 struct sk_buff *nskb = NULL; 440 struct sk_buff *user_skb = NULL; /* to be queued to userspace */ 441 struct nlattr *nla; 442 size_t len; 443 unsigned int hlen; 444 int err, dp_ifindex; 445 u64 hash; 446 447 dp_ifindex = get_dpifindex(dp); 448 if (!dp_ifindex) 449 return -ENODEV; 450 451 if (skb_vlan_tag_present(skb)) { 452 nskb = skb_clone(skb, GFP_ATOMIC); 453 if (!nskb) 454 return -ENOMEM; 455 456 nskb = __vlan_hwaccel_push_inside(nskb); 457 if (!nskb) 458 return -ENOMEM; 459 460 skb = nskb; 461 } 462 463 if (nla_attr_size(skb->len) > USHRT_MAX) { 464 err = -EFBIG; 465 goto out; 466 } 467 468 /* Complete checksum if needed */ 469 if (skb->ip_summed == CHECKSUM_PARTIAL && 470 (err = skb_csum_hwoffload_help(skb, 0))) 471 goto out; 472 473 /* Older versions of OVS user space enforce alignment of the last 474 * Netlink attribute to NLA_ALIGNTO which would require extensive 475 * padding logic. Only perform zerocopy if padding is not required. 476 */ 477 if (dp->user_features & OVS_DP_F_UNALIGNED) 478 hlen = skb_zerocopy_headlen(skb); 479 else 480 hlen = skb->len; 481 482 len = upcall_msg_size(upcall_info, hlen - cutlen, 483 OVS_CB(skb)->acts_origlen); 484 user_skb = genlmsg_new(len, GFP_ATOMIC); 485 if (!user_skb) { 486 err = -ENOMEM; 487 goto out; 488 } 489 490 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 491 0, upcall_info->cmd); 492 if (!upcall) { 493 err = -EINVAL; 494 goto out; 495 } 496 upcall->dp_ifindex = dp_ifindex; 497 498 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb); 499 if (err) 500 goto out; 501 502 if (upcall_info->userdata) 503 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA, 504 nla_len(upcall_info->userdata), 505 nla_data(upcall_info->userdata)); 506 507 if (upcall_info->egress_tun_info) { 508 nla = nla_nest_start_noflag(user_skb, 509 OVS_PACKET_ATTR_EGRESS_TUN_KEY); 510 if (!nla) { 511 err = -EMSGSIZE; 512 goto out; 513 } 514 err = ovs_nla_put_tunnel_info(user_skb, 515 upcall_info->egress_tun_info); 516 if (err) 517 goto out; 518 519 nla_nest_end(user_skb, nla); 520 } 521 522 if (upcall_info->actions_len) { 523 nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS); 524 if (!nla) { 525 err = -EMSGSIZE; 526 goto out; 527 } 528 err = ovs_nla_put_actions(upcall_info->actions, 529 upcall_info->actions_len, 530 user_skb); 531 if (!err) 532 nla_nest_end(user_skb, nla); 533 else 534 nla_nest_cancel(user_skb, nla); 535 } 536 537 /* Add OVS_PACKET_ATTR_MRU */ 538 if (upcall_info->mru && 539 nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, upcall_info->mru)) { 540 err = -ENOBUFS; 541 goto out; 542 } 543 544 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */ 545 if (cutlen > 0 && 546 nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, skb->len)) { 547 err = -ENOBUFS; 548 goto out; 549 } 550 551 /* Add OVS_PACKET_ATTR_HASH */ 552 hash = skb_get_hash_raw(skb); 553 if (skb->sw_hash) 554 hash |= OVS_PACKET_HASH_SW_BIT; 555 556 if (skb->l4_hash) 557 hash |= OVS_PACKET_HASH_L4_BIT; 558 559 if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) { 560 err = -ENOBUFS; 561 goto out; 562 } 563 564 /* Only reserve room for attribute header, packet data is added 565 * in skb_zerocopy() */ 566 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { 567 err = -ENOBUFS; 568 goto out; 569 } 570 nla->nla_len = nla_attr_size(skb->len - cutlen); 571 572 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen); 573 if (err) 574 goto out; 575 576 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ 577 pad_packet(dp, user_skb); 578 579 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; 580 581 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); 582 user_skb = NULL; 583 out: 584 if (err) 585 skb_tx_error(skb); 586 consume_skb(user_skb); 587 consume_skb(nskb); 588 589 return err; 590 } 591 592 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) 593 { 594 struct ovs_header *ovs_header = info->userhdr; 595 struct net *net = sock_net(skb->sk); 596 struct nlattr **a = info->attrs; 597 struct sw_flow_actions *acts; 598 struct sk_buff *packet; 599 struct sw_flow *flow; 600 struct sw_flow_actions *sf_acts; 601 struct datapath *dp; 602 struct vport *input_vport; 603 u16 mru = 0; 604 u64 hash; 605 int len; 606 int err; 607 bool log = !a[OVS_PACKET_ATTR_PROBE]; 608 609 err = -EINVAL; 610 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || 611 !a[OVS_PACKET_ATTR_ACTIONS]) 612 goto err; 613 614 len = nla_len(a[OVS_PACKET_ATTR_PACKET]); 615 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL); 616 err = -ENOMEM; 617 if (!packet) 618 goto err; 619 skb_reserve(packet, NET_IP_ALIGN); 620 621 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len); 622 623 /* Set packet's mru */ 624 if (a[OVS_PACKET_ATTR_MRU]) { 625 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]); 626 packet->ignore_df = 1; 627 } 628 OVS_CB(packet)->mru = mru; 629 630 if (a[OVS_PACKET_ATTR_HASH]) { 631 hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]); 632 633 __skb_set_hash(packet, hash & 0xFFFFFFFFULL, 634 !!(hash & OVS_PACKET_HASH_SW_BIT), 635 !!(hash & OVS_PACKET_HASH_L4_BIT)); 636 } 637 638 /* Build an sw_flow for sending this packet. */ 639 flow = ovs_flow_alloc(); 640 err = PTR_ERR(flow); 641 if (IS_ERR(flow)) 642 goto err_kfree_skb; 643 644 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY], 645 packet, &flow->key, log); 646 if (err) 647 goto err_flow_free; 648 649 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS], 650 &flow->key, &acts, log); 651 if (err) 652 goto err_flow_free; 653 654 rcu_assign_pointer(flow->sf_acts, acts); 655 packet->priority = flow->key.phy.priority; 656 packet->mark = flow->key.phy.skb_mark; 657 658 rcu_read_lock(); 659 dp = get_dp_rcu(net, ovs_header->dp_ifindex); 660 err = -ENODEV; 661 if (!dp) 662 goto err_unlock; 663 664 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port); 665 if (!input_vport) 666 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL); 667 668 if (!input_vport) 669 goto err_unlock; 670 671 packet->dev = input_vport->dev; 672 OVS_CB(packet)->input_vport = input_vport; 673 sf_acts = rcu_dereference(flow->sf_acts); 674 675 local_bh_disable(); 676 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key); 677 local_bh_enable(); 678 rcu_read_unlock(); 679 680 ovs_flow_free(flow, false); 681 return err; 682 683 err_unlock: 684 rcu_read_unlock(); 685 err_flow_free: 686 ovs_flow_free(flow, false); 687 err_kfree_skb: 688 kfree_skb(packet); 689 err: 690 return err; 691 } 692 693 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { 694 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, 695 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, 696 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, 697 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG }, 698 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 }, 699 [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 }, 700 }; 701 702 static const struct genl_small_ops dp_packet_genl_ops[] = { 703 { .cmd = OVS_PACKET_CMD_EXECUTE, 704 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 705 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 706 .doit = ovs_packet_cmd_execute 707 } 708 }; 709 710 static struct genl_family dp_packet_genl_family __ro_after_init = { 711 .hdrsize = sizeof(struct ovs_header), 712 .name = OVS_PACKET_FAMILY, 713 .version = OVS_PACKET_VERSION, 714 .maxattr = OVS_PACKET_ATTR_MAX, 715 .policy = packet_policy, 716 .netnsok = true, 717 .parallel_ops = true, 718 .small_ops = dp_packet_genl_ops, 719 .n_small_ops = ARRAY_SIZE(dp_packet_genl_ops), 720 .resv_start_op = OVS_PACKET_CMD_EXECUTE + 1, 721 .module = THIS_MODULE, 722 }; 723 724 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, 725 struct ovs_dp_megaflow_stats *mega_stats) 726 { 727 int i; 728 729 memset(mega_stats, 0, sizeof(*mega_stats)); 730 731 stats->n_flows = ovs_flow_tbl_count(&dp->table); 732 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table); 733 734 stats->n_hit = stats->n_missed = stats->n_lost = 0; 735 736 for_each_possible_cpu(i) { 737 const struct dp_stats_percpu *percpu_stats; 738 struct dp_stats_percpu local_stats; 739 unsigned int start; 740 741 percpu_stats = per_cpu_ptr(dp->stats_percpu, i); 742 743 do { 744 start = u64_stats_fetch_begin(&percpu_stats->syncp); 745 local_stats = *percpu_stats; 746 } while (u64_stats_fetch_retry(&percpu_stats->syncp, start)); 747 748 stats->n_hit += local_stats.n_hit; 749 stats->n_missed += local_stats.n_missed; 750 stats->n_lost += local_stats.n_lost; 751 mega_stats->n_mask_hit += local_stats.n_mask_hit; 752 mega_stats->n_cache_hit += local_stats.n_cache_hit; 753 } 754 } 755 756 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags) 757 { 758 return ovs_identifier_is_ufid(sfid) && 759 !(ufid_flags & OVS_UFID_F_OMIT_KEY); 760 } 761 762 static bool should_fill_mask(uint32_t ufid_flags) 763 { 764 return !(ufid_flags & OVS_UFID_F_OMIT_MASK); 765 } 766 767 static bool should_fill_actions(uint32_t ufid_flags) 768 { 769 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS); 770 } 771 772 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts, 773 const struct sw_flow_id *sfid, 774 uint32_t ufid_flags) 775 { 776 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header)); 777 778 /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback 779 * see ovs_nla_put_identifier() 780 */ 781 if (sfid && ovs_identifier_is_ufid(sfid)) 782 len += nla_total_size(sfid->ufid_len); 783 else 784 len += nla_total_size(ovs_key_attr_size()); 785 786 /* OVS_FLOW_ATTR_KEY */ 787 if (!sfid || should_fill_key(sfid, ufid_flags)) 788 len += nla_total_size(ovs_key_attr_size()); 789 790 /* OVS_FLOW_ATTR_MASK */ 791 if (should_fill_mask(ufid_flags)) 792 len += nla_total_size(ovs_key_attr_size()); 793 794 /* OVS_FLOW_ATTR_ACTIONS */ 795 if (should_fill_actions(ufid_flags)) 796 len += nla_total_size(acts->orig_len); 797 798 return len 799 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ 800 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ 801 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */ 802 } 803 804 /* Called with ovs_mutex or RCU read lock. */ 805 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow, 806 struct sk_buff *skb) 807 { 808 struct ovs_flow_stats stats; 809 __be16 tcp_flags; 810 unsigned long used; 811 812 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); 813 814 if (used && 815 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used), 816 OVS_FLOW_ATTR_PAD)) 817 return -EMSGSIZE; 818 819 if (stats.n_packets && 820 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS, 821 sizeof(struct ovs_flow_stats), &stats, 822 OVS_FLOW_ATTR_PAD)) 823 return -EMSGSIZE; 824 825 if ((u8)ntohs(tcp_flags) && 826 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) 827 return -EMSGSIZE; 828 829 return 0; 830 } 831 832 /* Called with ovs_mutex or RCU read lock. */ 833 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow, 834 struct sk_buff *skb, int skb_orig_len) 835 { 836 struct nlattr *start; 837 int err; 838 839 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if 840 * this is the first flow to be dumped into 'skb'. This is unusual for 841 * Netlink but individual action lists can be longer than 842 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this. 843 * The userspace caller can always fetch the actions separately if it 844 * really wants them. (Most userspace callers in fact don't care.) 845 * 846 * This can only fail for dump operations because the skb is always 847 * properly sized for single flows. 848 */ 849 start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS); 850 if (start) { 851 const struct sw_flow_actions *sf_acts; 852 853 sf_acts = rcu_dereference_ovsl(flow->sf_acts); 854 err = ovs_nla_put_actions(sf_acts->actions, 855 sf_acts->actions_len, skb); 856 857 if (!err) 858 nla_nest_end(skb, start); 859 else { 860 if (skb_orig_len) 861 return err; 862 863 nla_nest_cancel(skb, start); 864 } 865 } else if (skb_orig_len) { 866 return -EMSGSIZE; 867 } 868 869 return 0; 870 } 871 872 /* Called with ovs_mutex or RCU read lock. */ 873 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, 874 struct sk_buff *skb, u32 portid, 875 u32 seq, u32 flags, u8 cmd, u32 ufid_flags) 876 { 877 const int skb_orig_len = skb->len; 878 struct ovs_header *ovs_header; 879 int err; 880 881 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, 882 flags, cmd); 883 if (!ovs_header) 884 return -EMSGSIZE; 885 886 ovs_header->dp_ifindex = dp_ifindex; 887 888 err = ovs_nla_put_identifier(flow, skb); 889 if (err) 890 goto error; 891 892 if (should_fill_key(&flow->id, ufid_flags)) { 893 err = ovs_nla_put_masked_key(flow, skb); 894 if (err) 895 goto error; 896 } 897 898 if (should_fill_mask(ufid_flags)) { 899 err = ovs_nla_put_mask(flow, skb); 900 if (err) 901 goto error; 902 } 903 904 err = ovs_flow_cmd_fill_stats(flow, skb); 905 if (err) 906 goto error; 907 908 if (should_fill_actions(ufid_flags)) { 909 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len); 910 if (err) 911 goto error; 912 } 913 914 genlmsg_end(skb, ovs_header); 915 return 0; 916 917 error: 918 genlmsg_cancel(skb, ovs_header); 919 return err; 920 } 921 922 /* May not be called with RCU read lock. */ 923 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts, 924 const struct sw_flow_id *sfid, 925 struct genl_info *info, 926 bool always, 927 uint32_t ufid_flags) 928 { 929 struct sk_buff *skb; 930 size_t len; 931 932 if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0)) 933 return NULL; 934 935 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags); 936 skb = genlmsg_new(len, GFP_KERNEL); 937 if (!skb) 938 return ERR_PTR(-ENOMEM); 939 940 return skb; 941 } 942 943 /* Called with ovs_mutex. */ 944 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow, 945 int dp_ifindex, 946 struct genl_info *info, u8 cmd, 947 bool always, u32 ufid_flags) 948 { 949 struct sk_buff *skb; 950 int retval; 951 952 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), 953 &flow->id, info, always, ufid_flags); 954 if (IS_ERR_OR_NULL(skb)) 955 return skb; 956 957 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb, 958 info->snd_portid, info->snd_seq, 0, 959 cmd, ufid_flags); 960 if (WARN_ON_ONCE(retval < 0)) { 961 kfree_skb(skb); 962 skb = ERR_PTR(retval); 963 } 964 return skb; 965 } 966 967 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) 968 { 969 struct net *net = sock_net(skb->sk); 970 struct nlattr **a = info->attrs; 971 struct ovs_header *ovs_header = info->userhdr; 972 struct sw_flow *flow = NULL, *new_flow; 973 struct sw_flow_mask mask; 974 struct sk_buff *reply; 975 struct datapath *dp; 976 struct sw_flow_key *key; 977 struct sw_flow_actions *acts; 978 struct sw_flow_match match; 979 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 980 int error; 981 bool log = !a[OVS_FLOW_ATTR_PROBE]; 982 983 /* Must have key and actions. */ 984 error = -EINVAL; 985 if (!a[OVS_FLOW_ATTR_KEY]) { 986 OVS_NLERR(log, "Flow key attr not present in new flow."); 987 goto error; 988 } 989 if (!a[OVS_FLOW_ATTR_ACTIONS]) { 990 OVS_NLERR(log, "Flow actions attr not present in new flow."); 991 goto error; 992 } 993 994 /* Most of the time we need to allocate a new flow, do it before 995 * locking. 996 */ 997 new_flow = ovs_flow_alloc(); 998 if (IS_ERR(new_flow)) { 999 error = PTR_ERR(new_flow); 1000 goto error; 1001 } 1002 1003 /* Extract key. */ 1004 key = kzalloc(sizeof(*key), GFP_KERNEL); 1005 if (!key) { 1006 error = -ENOMEM; 1007 goto err_kfree_key; 1008 } 1009 1010 ovs_match_init(&match, key, false, &mask); 1011 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], 1012 a[OVS_FLOW_ATTR_MASK], log); 1013 if (error) 1014 goto err_kfree_flow; 1015 1016 ovs_flow_mask_key(&new_flow->key, key, true, &mask); 1017 1018 /* Extract flow identifier. */ 1019 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], 1020 key, log); 1021 if (error) 1022 goto err_kfree_flow; 1023 1024 /* Validate actions. */ 1025 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS], 1026 &new_flow->key, &acts, log); 1027 if (error) { 1028 OVS_NLERR(log, "Flow actions may not be safe on all matching packets."); 1029 goto err_kfree_flow; 1030 } 1031 1032 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false, 1033 ufid_flags); 1034 if (IS_ERR(reply)) { 1035 error = PTR_ERR(reply); 1036 goto err_kfree_acts; 1037 } 1038 1039 ovs_lock(); 1040 dp = get_dp(net, ovs_header->dp_ifindex); 1041 if (unlikely(!dp)) { 1042 error = -ENODEV; 1043 goto err_unlock_ovs; 1044 } 1045 1046 /* Check if this is a duplicate flow */ 1047 if (ovs_identifier_is_ufid(&new_flow->id)) 1048 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id); 1049 if (!flow) 1050 flow = ovs_flow_tbl_lookup(&dp->table, key); 1051 if (likely(!flow)) { 1052 rcu_assign_pointer(new_flow->sf_acts, acts); 1053 1054 /* Put flow in bucket. */ 1055 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask); 1056 if (unlikely(error)) { 1057 acts = NULL; 1058 goto err_unlock_ovs; 1059 } 1060 1061 if (unlikely(reply)) { 1062 error = ovs_flow_cmd_fill_info(new_flow, 1063 ovs_header->dp_ifindex, 1064 reply, info->snd_portid, 1065 info->snd_seq, 0, 1066 OVS_FLOW_CMD_NEW, 1067 ufid_flags); 1068 BUG_ON(error < 0); 1069 } 1070 ovs_unlock(); 1071 } else { 1072 struct sw_flow_actions *old_acts; 1073 1074 /* Bail out if we're not allowed to modify an existing flow. 1075 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL 1076 * because Generic Netlink treats the latter as a dump 1077 * request. We also accept NLM_F_EXCL in case that bug ever 1078 * gets fixed. 1079 */ 1080 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE 1081 | NLM_F_EXCL))) { 1082 error = -EEXIST; 1083 goto err_unlock_ovs; 1084 } 1085 /* The flow identifier has to be the same for flow updates. 1086 * Look for any overlapping flow. 1087 */ 1088 if (unlikely(!ovs_flow_cmp(flow, &match))) { 1089 if (ovs_identifier_is_key(&flow->id)) 1090 flow = ovs_flow_tbl_lookup_exact(&dp->table, 1091 &match); 1092 else /* UFID matches but key is different */ 1093 flow = NULL; 1094 if (!flow) { 1095 error = -ENOENT; 1096 goto err_unlock_ovs; 1097 } 1098 } 1099 /* Update actions. */ 1100 old_acts = ovsl_dereference(flow->sf_acts); 1101 rcu_assign_pointer(flow->sf_acts, acts); 1102 1103 if (unlikely(reply)) { 1104 error = ovs_flow_cmd_fill_info(flow, 1105 ovs_header->dp_ifindex, 1106 reply, info->snd_portid, 1107 info->snd_seq, 0, 1108 OVS_FLOW_CMD_NEW, 1109 ufid_flags); 1110 BUG_ON(error < 0); 1111 } 1112 ovs_unlock(); 1113 1114 ovs_nla_free_flow_actions_rcu(old_acts); 1115 ovs_flow_free(new_flow, false); 1116 } 1117 1118 if (reply) 1119 ovs_notify(&dp_flow_genl_family, reply, info); 1120 1121 kfree(key); 1122 return 0; 1123 1124 err_unlock_ovs: 1125 ovs_unlock(); 1126 kfree_skb(reply); 1127 err_kfree_acts: 1128 ovs_nla_free_flow_actions(acts); 1129 err_kfree_flow: 1130 ovs_flow_free(new_flow, false); 1131 err_kfree_key: 1132 kfree(key); 1133 error: 1134 return error; 1135 } 1136 1137 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */ 1138 static noinline_for_stack 1139 struct sw_flow_actions *get_flow_actions(struct net *net, 1140 const struct nlattr *a, 1141 const struct sw_flow_key *key, 1142 const struct sw_flow_mask *mask, 1143 bool log) 1144 { 1145 struct sw_flow_actions *acts; 1146 struct sw_flow_key masked_key; 1147 int error; 1148 1149 ovs_flow_mask_key(&masked_key, key, true, mask); 1150 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); 1151 if (error) { 1152 OVS_NLERR(log, 1153 "Actions may not be safe on all matching packets"); 1154 return ERR_PTR(error); 1155 } 1156 1157 return acts; 1158 } 1159 1160 /* Factor out match-init and action-copy to avoid 1161 * "Wframe-larger-than=1024" warning. Because mask is only 1162 * used to get actions, we new a function to save some 1163 * stack space. 1164 * 1165 * If there are not key and action attrs, we return 0 1166 * directly. In the case, the caller will also not use the 1167 * match as before. If there is action attr, we try to get 1168 * actions and save them to *acts. Before returning from 1169 * the function, we reset the match->mask pointer. Because 1170 * we should not to return match object with dangling reference 1171 * to mask. 1172 * */ 1173 static noinline_for_stack int 1174 ovs_nla_init_match_and_action(struct net *net, 1175 struct sw_flow_match *match, 1176 struct sw_flow_key *key, 1177 struct nlattr **a, 1178 struct sw_flow_actions **acts, 1179 bool log) 1180 { 1181 struct sw_flow_mask mask; 1182 int error = 0; 1183 1184 if (a[OVS_FLOW_ATTR_KEY]) { 1185 ovs_match_init(match, key, true, &mask); 1186 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY], 1187 a[OVS_FLOW_ATTR_MASK], log); 1188 if (error) 1189 goto error; 1190 } 1191 1192 if (a[OVS_FLOW_ATTR_ACTIONS]) { 1193 if (!a[OVS_FLOW_ATTR_KEY]) { 1194 OVS_NLERR(log, 1195 "Flow key attribute not present in set flow."); 1196 error = -EINVAL; 1197 goto error; 1198 } 1199 1200 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key, 1201 &mask, log); 1202 if (IS_ERR(*acts)) { 1203 error = PTR_ERR(*acts); 1204 goto error; 1205 } 1206 } 1207 1208 /* On success, error is 0. */ 1209 error: 1210 match->mask = NULL; 1211 return error; 1212 } 1213 1214 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) 1215 { 1216 struct net *net = sock_net(skb->sk); 1217 struct nlattr **a = info->attrs; 1218 struct ovs_header *ovs_header = info->userhdr; 1219 struct sw_flow_key key; 1220 struct sw_flow *flow; 1221 struct sk_buff *reply = NULL; 1222 struct datapath *dp; 1223 struct sw_flow_actions *old_acts = NULL, *acts = NULL; 1224 struct sw_flow_match match; 1225 struct sw_flow_id sfid; 1226 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 1227 int error = 0; 1228 bool log = !a[OVS_FLOW_ATTR_PROBE]; 1229 bool ufid_present; 1230 1231 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log); 1232 if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) { 1233 OVS_NLERR(log, 1234 "Flow set message rejected, Key attribute missing."); 1235 return -EINVAL; 1236 } 1237 1238 error = ovs_nla_init_match_and_action(net, &match, &key, a, 1239 &acts, log); 1240 if (error) 1241 goto error; 1242 1243 if (acts) { 1244 /* Can allocate before locking if have acts. */ 1245 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false, 1246 ufid_flags); 1247 if (IS_ERR(reply)) { 1248 error = PTR_ERR(reply); 1249 goto err_kfree_acts; 1250 } 1251 } 1252 1253 ovs_lock(); 1254 dp = get_dp(net, ovs_header->dp_ifindex); 1255 if (unlikely(!dp)) { 1256 error = -ENODEV; 1257 goto err_unlock_ovs; 1258 } 1259 /* Check that the flow exists. */ 1260 if (ufid_present) 1261 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid); 1262 else 1263 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); 1264 if (unlikely(!flow)) { 1265 error = -ENOENT; 1266 goto err_unlock_ovs; 1267 } 1268 1269 /* Update actions, if present. */ 1270 if (likely(acts)) { 1271 old_acts = ovsl_dereference(flow->sf_acts); 1272 rcu_assign_pointer(flow->sf_acts, acts); 1273 1274 if (unlikely(reply)) { 1275 error = ovs_flow_cmd_fill_info(flow, 1276 ovs_header->dp_ifindex, 1277 reply, info->snd_portid, 1278 info->snd_seq, 0, 1279 OVS_FLOW_CMD_SET, 1280 ufid_flags); 1281 BUG_ON(error < 0); 1282 } 1283 } else { 1284 /* Could not alloc without acts before locking. */ 1285 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, 1286 info, OVS_FLOW_CMD_SET, false, 1287 ufid_flags); 1288 1289 if (IS_ERR(reply)) { 1290 error = PTR_ERR(reply); 1291 goto err_unlock_ovs; 1292 } 1293 } 1294 1295 /* Clear stats. */ 1296 if (a[OVS_FLOW_ATTR_CLEAR]) 1297 ovs_flow_stats_clear(flow); 1298 ovs_unlock(); 1299 1300 if (reply) 1301 ovs_notify(&dp_flow_genl_family, reply, info); 1302 if (old_acts) 1303 ovs_nla_free_flow_actions_rcu(old_acts); 1304 1305 return 0; 1306 1307 err_unlock_ovs: 1308 ovs_unlock(); 1309 kfree_skb(reply); 1310 err_kfree_acts: 1311 ovs_nla_free_flow_actions(acts); 1312 error: 1313 return error; 1314 } 1315 1316 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) 1317 { 1318 struct nlattr **a = info->attrs; 1319 struct ovs_header *ovs_header = info->userhdr; 1320 struct net *net = sock_net(skb->sk); 1321 struct sw_flow_key key; 1322 struct sk_buff *reply; 1323 struct sw_flow *flow; 1324 struct datapath *dp; 1325 struct sw_flow_match match; 1326 struct sw_flow_id ufid; 1327 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 1328 int err = 0; 1329 bool log = !a[OVS_FLOW_ATTR_PROBE]; 1330 bool ufid_present; 1331 1332 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log); 1333 if (a[OVS_FLOW_ATTR_KEY]) { 1334 ovs_match_init(&match, &key, true, NULL); 1335 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL, 1336 log); 1337 } else if (!ufid_present) { 1338 OVS_NLERR(log, 1339 "Flow get message rejected, Key attribute missing."); 1340 err = -EINVAL; 1341 } 1342 if (err) 1343 return err; 1344 1345 ovs_lock(); 1346 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1347 if (!dp) { 1348 err = -ENODEV; 1349 goto unlock; 1350 } 1351 1352 if (ufid_present) 1353 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid); 1354 else 1355 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); 1356 if (!flow) { 1357 err = -ENOENT; 1358 goto unlock; 1359 } 1360 1361 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info, 1362 OVS_FLOW_CMD_GET, true, ufid_flags); 1363 if (IS_ERR(reply)) { 1364 err = PTR_ERR(reply); 1365 goto unlock; 1366 } 1367 1368 ovs_unlock(); 1369 return genlmsg_reply(reply, info); 1370 unlock: 1371 ovs_unlock(); 1372 return err; 1373 } 1374 1375 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) 1376 { 1377 struct nlattr **a = info->attrs; 1378 struct ovs_header *ovs_header = info->userhdr; 1379 struct net *net = sock_net(skb->sk); 1380 struct sw_flow_key key; 1381 struct sk_buff *reply; 1382 struct sw_flow *flow = NULL; 1383 struct datapath *dp; 1384 struct sw_flow_match match; 1385 struct sw_flow_id ufid; 1386 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 1387 int err; 1388 bool log = !a[OVS_FLOW_ATTR_PROBE]; 1389 bool ufid_present; 1390 1391 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log); 1392 if (a[OVS_FLOW_ATTR_KEY]) { 1393 ovs_match_init(&match, &key, true, NULL); 1394 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], 1395 NULL, log); 1396 if (unlikely(err)) 1397 return err; 1398 } 1399 1400 ovs_lock(); 1401 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1402 if (unlikely(!dp)) { 1403 err = -ENODEV; 1404 goto unlock; 1405 } 1406 1407 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) { 1408 err = ovs_flow_tbl_flush(&dp->table); 1409 goto unlock; 1410 } 1411 1412 if (ufid_present) 1413 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid); 1414 else 1415 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); 1416 if (unlikely(!flow)) { 1417 err = -ENOENT; 1418 goto unlock; 1419 } 1420 1421 ovs_flow_tbl_remove(&dp->table, flow); 1422 ovs_unlock(); 1423 1424 reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts, 1425 &flow->id, info, false, ufid_flags); 1426 if (likely(reply)) { 1427 if (!IS_ERR(reply)) { 1428 rcu_read_lock(); /*To keep RCU checker happy. */ 1429 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, 1430 reply, info->snd_portid, 1431 info->snd_seq, 0, 1432 OVS_FLOW_CMD_DEL, 1433 ufid_flags); 1434 rcu_read_unlock(); 1435 if (WARN_ON_ONCE(err < 0)) { 1436 kfree_skb(reply); 1437 goto out_free; 1438 } 1439 1440 ovs_notify(&dp_flow_genl_family, reply, info); 1441 } else { 1442 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, 1443 PTR_ERR(reply)); 1444 } 1445 } 1446 1447 out_free: 1448 ovs_flow_free(flow, true); 1449 return 0; 1450 unlock: 1451 ovs_unlock(); 1452 return err; 1453 } 1454 1455 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 1456 { 1457 struct nlattr *a[__OVS_FLOW_ATTR_MAX]; 1458 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); 1459 struct table_instance *ti; 1460 struct datapath *dp; 1461 u32 ufid_flags; 1462 int err; 1463 1464 err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a, 1465 OVS_FLOW_ATTR_MAX, flow_policy, NULL); 1466 if (err) 1467 return err; 1468 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 1469 1470 rcu_read_lock(); 1471 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); 1472 if (!dp) { 1473 rcu_read_unlock(); 1474 return -ENODEV; 1475 } 1476 1477 ti = rcu_dereference(dp->table.ti); 1478 for (;;) { 1479 struct sw_flow *flow; 1480 u32 bucket, obj; 1481 1482 bucket = cb->args[0]; 1483 obj = cb->args[1]; 1484 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj); 1485 if (!flow) 1486 break; 1487 1488 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb, 1489 NETLINK_CB(cb->skb).portid, 1490 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1491 OVS_FLOW_CMD_GET, ufid_flags) < 0) 1492 break; 1493 1494 cb->args[0] = bucket; 1495 cb->args[1] = obj; 1496 } 1497 rcu_read_unlock(); 1498 return skb->len; 1499 } 1500 1501 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { 1502 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, 1503 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED }, 1504 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, 1505 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, 1506 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG }, 1507 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 }, 1508 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 }, 1509 }; 1510 1511 static const struct genl_small_ops dp_flow_genl_ops[] = { 1512 { .cmd = OVS_FLOW_CMD_NEW, 1513 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 1514 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1515 .doit = ovs_flow_cmd_new 1516 }, 1517 { .cmd = OVS_FLOW_CMD_DEL, 1518 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 1519 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1520 .doit = ovs_flow_cmd_del 1521 }, 1522 { .cmd = OVS_FLOW_CMD_GET, 1523 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 1524 .flags = 0, /* OK for unprivileged users. */ 1525 .doit = ovs_flow_cmd_get, 1526 .dumpit = ovs_flow_cmd_dump 1527 }, 1528 { .cmd = OVS_FLOW_CMD_SET, 1529 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 1530 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1531 .doit = ovs_flow_cmd_set, 1532 }, 1533 }; 1534 1535 static struct genl_family dp_flow_genl_family __ro_after_init = { 1536 .hdrsize = sizeof(struct ovs_header), 1537 .name = OVS_FLOW_FAMILY, 1538 .version = OVS_FLOW_VERSION, 1539 .maxattr = OVS_FLOW_ATTR_MAX, 1540 .policy = flow_policy, 1541 .netnsok = true, 1542 .parallel_ops = true, 1543 .small_ops = dp_flow_genl_ops, 1544 .n_small_ops = ARRAY_SIZE(dp_flow_genl_ops), 1545 .resv_start_op = OVS_FLOW_CMD_SET + 1, 1546 .mcgrps = &ovs_dp_flow_multicast_group, 1547 .n_mcgrps = 1, 1548 .module = THIS_MODULE, 1549 }; 1550 1551 static size_t ovs_dp_cmd_msg_size(void) 1552 { 1553 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header)); 1554 1555 msgsize += nla_total_size(IFNAMSIZ); 1556 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats)); 1557 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats)); 1558 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ 1559 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */ 1560 msgsize += nla_total_size(sizeof(u32) * nr_cpu_ids); /* OVS_DP_ATTR_PER_CPU_PIDS */ 1561 1562 return msgsize; 1563 } 1564 1565 /* Called with ovs_mutex. */ 1566 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, 1567 u32 portid, u32 seq, u32 flags, u8 cmd) 1568 { 1569 struct ovs_header *ovs_header; 1570 struct ovs_dp_stats dp_stats; 1571 struct ovs_dp_megaflow_stats dp_megaflow_stats; 1572 struct dp_nlsk_pids *pids = ovsl_dereference(dp->upcall_portids); 1573 int err, pids_len; 1574 1575 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, 1576 flags, cmd); 1577 if (!ovs_header) 1578 goto error; 1579 1580 ovs_header->dp_ifindex = get_dpifindex(dp); 1581 1582 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); 1583 if (err) 1584 goto nla_put_failure; 1585 1586 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats); 1587 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), 1588 &dp_stats, OVS_DP_ATTR_PAD)) 1589 goto nla_put_failure; 1590 1591 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS, 1592 sizeof(struct ovs_dp_megaflow_stats), 1593 &dp_megaflow_stats, OVS_DP_ATTR_PAD)) 1594 goto nla_put_failure; 1595 1596 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) 1597 goto nla_put_failure; 1598 1599 if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE, 1600 ovs_flow_tbl_masks_cache_size(&dp->table))) 1601 goto nla_put_failure; 1602 1603 if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU && pids) { 1604 pids_len = min(pids->n_pids, nr_cpu_ids) * sizeof(u32); 1605 if (nla_put(skb, OVS_DP_ATTR_PER_CPU_PIDS, pids_len, &pids->pids)) 1606 goto nla_put_failure; 1607 } 1608 1609 genlmsg_end(skb, ovs_header); 1610 return 0; 1611 1612 nla_put_failure: 1613 genlmsg_cancel(skb, ovs_header); 1614 error: 1615 return -EMSGSIZE; 1616 } 1617 1618 static struct sk_buff *ovs_dp_cmd_alloc_info(void) 1619 { 1620 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL); 1621 } 1622 1623 /* Called with rcu_read_lock or ovs_mutex. */ 1624 static struct datapath *lookup_datapath(struct net *net, 1625 const struct ovs_header *ovs_header, 1626 struct nlattr *a[OVS_DP_ATTR_MAX + 1]) 1627 { 1628 struct datapath *dp; 1629 1630 if (!a[OVS_DP_ATTR_NAME]) 1631 dp = get_dp(net, ovs_header->dp_ifindex); 1632 else { 1633 struct vport *vport; 1634 1635 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME])); 1636 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; 1637 } 1638 return dp ? dp : ERR_PTR(-ENODEV); 1639 } 1640 1641 static void ovs_dp_reset_user_features(struct sk_buff *skb, 1642 struct genl_info *info) 1643 { 1644 struct datapath *dp; 1645 1646 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, 1647 info->attrs); 1648 if (IS_ERR(dp)) 1649 return; 1650 1651 pr_warn("%s: Dropping previously announced user features\n", 1652 ovs_dp_name(dp)); 1653 dp->user_features = 0; 1654 } 1655 1656 static int ovs_dp_set_upcall_portids(struct datapath *dp, 1657 const struct nlattr *ids) 1658 { 1659 struct dp_nlsk_pids *old, *dp_nlsk_pids; 1660 1661 if (!nla_len(ids) || nla_len(ids) % sizeof(u32)) 1662 return -EINVAL; 1663 1664 old = ovsl_dereference(dp->upcall_portids); 1665 1666 dp_nlsk_pids = kmalloc(sizeof(*dp_nlsk_pids) + nla_len(ids), 1667 GFP_KERNEL); 1668 if (!dp_nlsk_pids) 1669 return -ENOMEM; 1670 1671 dp_nlsk_pids->n_pids = nla_len(ids) / sizeof(u32); 1672 nla_memcpy(dp_nlsk_pids->pids, ids, nla_len(ids)); 1673 1674 rcu_assign_pointer(dp->upcall_portids, dp_nlsk_pids); 1675 1676 kfree_rcu(old, rcu); 1677 1678 return 0; 1679 } 1680 1681 u32 ovs_dp_get_upcall_portid(const struct datapath *dp, uint32_t cpu_id) 1682 { 1683 struct dp_nlsk_pids *dp_nlsk_pids; 1684 1685 dp_nlsk_pids = rcu_dereference(dp->upcall_portids); 1686 1687 if (dp_nlsk_pids) { 1688 if (cpu_id < dp_nlsk_pids->n_pids) { 1689 return dp_nlsk_pids->pids[cpu_id]; 1690 } else if (dp_nlsk_pids->n_pids > 0 && 1691 cpu_id >= dp_nlsk_pids->n_pids) { 1692 /* If the number of netlink PIDs is mismatched with 1693 * the number of CPUs as seen by the kernel, log this 1694 * and send the upcall to an arbitrary socket (0) in 1695 * order to not drop packets 1696 */ 1697 pr_info_ratelimited("cpu_id mismatch with handler threads"); 1698 return dp_nlsk_pids->pids[cpu_id % 1699 dp_nlsk_pids->n_pids]; 1700 } else { 1701 return 0; 1702 } 1703 } else { 1704 return 0; 1705 } 1706 } 1707 1708 static int ovs_dp_change(struct datapath *dp, struct nlattr *a[]) 1709 { 1710 u32 user_features = 0, old_features = dp->user_features; 1711 int err; 1712 1713 if (a[OVS_DP_ATTR_USER_FEATURES]) { 1714 user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); 1715 1716 if (user_features & ~(OVS_DP_F_VPORT_PIDS | 1717 OVS_DP_F_UNALIGNED | 1718 OVS_DP_F_TC_RECIRC_SHARING | 1719 OVS_DP_F_DISPATCH_UPCALL_PER_CPU)) 1720 return -EOPNOTSUPP; 1721 1722 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1723 if (user_features & OVS_DP_F_TC_RECIRC_SHARING) 1724 return -EOPNOTSUPP; 1725 #endif 1726 } 1727 1728 if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) { 1729 int err; 1730 u32 cache_size; 1731 1732 cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]); 1733 err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size); 1734 if (err) 1735 return err; 1736 } 1737 1738 dp->user_features = user_features; 1739 1740 if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU && 1741 a[OVS_DP_ATTR_PER_CPU_PIDS]) { 1742 /* Upcall Netlink Port IDs have been updated */ 1743 err = ovs_dp_set_upcall_portids(dp, 1744 a[OVS_DP_ATTR_PER_CPU_PIDS]); 1745 if (err) 1746 return err; 1747 } 1748 1749 if ((dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) && 1750 !(old_features & OVS_DP_F_TC_RECIRC_SHARING)) 1751 tc_skb_ext_tc_enable(); 1752 else if (!(dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) && 1753 (old_features & OVS_DP_F_TC_RECIRC_SHARING)) 1754 tc_skb_ext_tc_disable(); 1755 1756 return 0; 1757 } 1758 1759 static int ovs_dp_stats_init(struct datapath *dp) 1760 { 1761 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu); 1762 if (!dp->stats_percpu) 1763 return -ENOMEM; 1764 1765 return 0; 1766 } 1767 1768 static int ovs_dp_vport_init(struct datapath *dp) 1769 { 1770 int i; 1771 1772 dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS, 1773 sizeof(struct hlist_head), 1774 GFP_KERNEL); 1775 if (!dp->ports) 1776 return -ENOMEM; 1777 1778 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) 1779 INIT_HLIST_HEAD(&dp->ports[i]); 1780 1781 return 0; 1782 } 1783 1784 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) 1785 { 1786 struct nlattr **a = info->attrs; 1787 struct vport_parms parms; 1788 struct sk_buff *reply; 1789 struct datapath *dp; 1790 struct vport *vport; 1791 struct ovs_net *ovs_net; 1792 int err; 1793 1794 err = -EINVAL; 1795 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) 1796 goto err; 1797 1798 reply = ovs_dp_cmd_alloc_info(); 1799 if (!reply) 1800 return -ENOMEM; 1801 1802 err = -ENOMEM; 1803 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 1804 if (dp == NULL) 1805 goto err_destroy_reply; 1806 1807 ovs_dp_set_net(dp, sock_net(skb->sk)); 1808 1809 /* Allocate table. */ 1810 err = ovs_flow_tbl_init(&dp->table); 1811 if (err) 1812 goto err_destroy_dp; 1813 1814 err = ovs_dp_stats_init(dp); 1815 if (err) 1816 goto err_destroy_table; 1817 1818 err = ovs_dp_vport_init(dp); 1819 if (err) 1820 goto err_destroy_stats; 1821 1822 err = ovs_meters_init(dp); 1823 if (err) 1824 goto err_destroy_ports; 1825 1826 /* Set up our datapath device. */ 1827 parms.name = nla_data(a[OVS_DP_ATTR_NAME]); 1828 parms.type = OVS_VPORT_TYPE_INTERNAL; 1829 parms.options = NULL; 1830 parms.dp = dp; 1831 parms.port_no = OVSP_LOCAL; 1832 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID]; 1833 parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX] 1834 ? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0; 1835 1836 /* So far only local changes have been made, now need the lock. */ 1837 ovs_lock(); 1838 1839 err = ovs_dp_change(dp, a); 1840 if (err) 1841 goto err_unlock_and_destroy_meters; 1842 1843 vport = new_vport(&parms); 1844 if (IS_ERR(vport)) { 1845 err = PTR_ERR(vport); 1846 if (err == -EBUSY) 1847 err = -EEXIST; 1848 1849 if (err == -EEXIST) { 1850 /* An outdated user space instance that does not understand 1851 * the concept of user_features has attempted to create a new 1852 * datapath and is likely to reuse it. Drop all user features. 1853 */ 1854 if (info->genlhdr->version < OVS_DP_VER_FEATURES) 1855 ovs_dp_reset_user_features(skb, info); 1856 } 1857 1858 goto err_destroy_portids; 1859 } 1860 1861 vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); 1862 if (!vport->upcall_stats) { 1863 err = -ENOMEM; 1864 goto err_destroy_vport; 1865 } 1866 1867 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1868 info->snd_seq, 0, OVS_DP_CMD_NEW); 1869 BUG_ON(err < 0); 1870 1871 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); 1872 list_add_tail_rcu(&dp->list_node, &ovs_net->dps); 1873 1874 ovs_unlock(); 1875 1876 ovs_notify(&dp_datapath_genl_family, reply, info); 1877 return 0; 1878 1879 err_destroy_vport: 1880 ovs_dp_detach_port(vport); 1881 err_destroy_portids: 1882 kfree(rcu_dereference_raw(dp->upcall_portids)); 1883 err_unlock_and_destroy_meters: 1884 ovs_unlock(); 1885 ovs_meters_exit(dp); 1886 err_destroy_ports: 1887 kfree(dp->ports); 1888 err_destroy_stats: 1889 free_percpu(dp->stats_percpu); 1890 err_destroy_table: 1891 ovs_flow_tbl_destroy(&dp->table); 1892 err_destroy_dp: 1893 kfree(dp); 1894 err_destroy_reply: 1895 kfree_skb(reply); 1896 err: 1897 return err; 1898 } 1899 1900 /* Called with ovs_mutex. */ 1901 static void __dp_destroy(struct datapath *dp) 1902 { 1903 struct flow_table *table = &dp->table; 1904 int i; 1905 1906 if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) 1907 tc_skb_ext_tc_disable(); 1908 1909 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 1910 struct vport *vport; 1911 struct hlist_node *n; 1912 1913 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) 1914 if (vport->port_no != OVSP_LOCAL) 1915 ovs_dp_detach_port(vport); 1916 } 1917 1918 list_del_rcu(&dp->list_node); 1919 1920 /* OVSP_LOCAL is datapath internal port. We need to make sure that 1921 * all ports in datapath are destroyed first before freeing datapath. 1922 */ 1923 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); 1924 1925 /* Flush sw_flow in the tables. RCU cb only releases resource 1926 * such as dp, ports and tables. That may avoid some issues 1927 * such as RCU usage warning. 1928 */ 1929 table_instance_flow_flush(table, ovsl_dereference(table->ti), 1930 ovsl_dereference(table->ufid_ti)); 1931 1932 /* RCU destroy the ports, meters and flow tables. */ 1933 call_rcu(&dp->rcu, destroy_dp_rcu); 1934 } 1935 1936 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) 1937 { 1938 struct sk_buff *reply; 1939 struct datapath *dp; 1940 int err; 1941 1942 reply = ovs_dp_cmd_alloc_info(); 1943 if (!reply) 1944 return -ENOMEM; 1945 1946 ovs_lock(); 1947 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1948 err = PTR_ERR(dp); 1949 if (IS_ERR(dp)) 1950 goto err_unlock_free; 1951 1952 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1953 info->snd_seq, 0, OVS_DP_CMD_DEL); 1954 BUG_ON(err < 0); 1955 1956 __dp_destroy(dp); 1957 ovs_unlock(); 1958 1959 ovs_notify(&dp_datapath_genl_family, reply, info); 1960 1961 return 0; 1962 1963 err_unlock_free: 1964 ovs_unlock(); 1965 kfree_skb(reply); 1966 return err; 1967 } 1968 1969 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) 1970 { 1971 struct sk_buff *reply; 1972 struct datapath *dp; 1973 int err; 1974 1975 reply = ovs_dp_cmd_alloc_info(); 1976 if (!reply) 1977 return -ENOMEM; 1978 1979 ovs_lock(); 1980 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1981 err = PTR_ERR(dp); 1982 if (IS_ERR(dp)) 1983 goto err_unlock_free; 1984 1985 err = ovs_dp_change(dp, info->attrs); 1986 if (err) 1987 goto err_unlock_free; 1988 1989 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1990 info->snd_seq, 0, OVS_DP_CMD_SET); 1991 BUG_ON(err < 0); 1992 1993 ovs_unlock(); 1994 ovs_notify(&dp_datapath_genl_family, reply, info); 1995 1996 return 0; 1997 1998 err_unlock_free: 1999 ovs_unlock(); 2000 kfree_skb(reply); 2001 return err; 2002 } 2003 2004 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) 2005 { 2006 struct sk_buff *reply; 2007 struct datapath *dp; 2008 int err; 2009 2010 reply = ovs_dp_cmd_alloc_info(); 2011 if (!reply) 2012 return -ENOMEM; 2013 2014 ovs_lock(); 2015 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 2016 if (IS_ERR(dp)) { 2017 err = PTR_ERR(dp); 2018 goto err_unlock_free; 2019 } 2020 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 2021 info->snd_seq, 0, OVS_DP_CMD_GET); 2022 BUG_ON(err < 0); 2023 ovs_unlock(); 2024 2025 return genlmsg_reply(reply, info); 2026 2027 err_unlock_free: 2028 ovs_unlock(); 2029 kfree_skb(reply); 2030 return err; 2031 } 2032 2033 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 2034 { 2035 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id); 2036 struct datapath *dp; 2037 int skip = cb->args[0]; 2038 int i = 0; 2039 2040 ovs_lock(); 2041 list_for_each_entry(dp, &ovs_net->dps, list_node) { 2042 if (i >= skip && 2043 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, 2044 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2045 OVS_DP_CMD_GET) < 0) 2046 break; 2047 i++; 2048 } 2049 ovs_unlock(); 2050 2051 cb->args[0] = i; 2052 2053 return skb->len; 2054 } 2055 2056 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { 2057 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, 2058 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, 2059 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, 2060 [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0, 2061 PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)), 2062 [OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 }, 2063 }; 2064 2065 static const struct genl_small_ops dp_datapath_genl_ops[] = { 2066 { .cmd = OVS_DP_CMD_NEW, 2067 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2068 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2069 .doit = ovs_dp_cmd_new 2070 }, 2071 { .cmd = OVS_DP_CMD_DEL, 2072 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2073 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2074 .doit = ovs_dp_cmd_del 2075 }, 2076 { .cmd = OVS_DP_CMD_GET, 2077 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2078 .flags = 0, /* OK for unprivileged users. */ 2079 .doit = ovs_dp_cmd_get, 2080 .dumpit = ovs_dp_cmd_dump 2081 }, 2082 { .cmd = OVS_DP_CMD_SET, 2083 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2084 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2085 .doit = ovs_dp_cmd_set, 2086 }, 2087 }; 2088 2089 static struct genl_family dp_datapath_genl_family __ro_after_init = { 2090 .hdrsize = sizeof(struct ovs_header), 2091 .name = OVS_DATAPATH_FAMILY, 2092 .version = OVS_DATAPATH_VERSION, 2093 .maxattr = OVS_DP_ATTR_MAX, 2094 .policy = datapath_policy, 2095 .netnsok = true, 2096 .parallel_ops = true, 2097 .small_ops = dp_datapath_genl_ops, 2098 .n_small_ops = ARRAY_SIZE(dp_datapath_genl_ops), 2099 .resv_start_op = OVS_DP_CMD_SET + 1, 2100 .mcgrps = &ovs_dp_datapath_multicast_group, 2101 .n_mcgrps = 1, 2102 .module = THIS_MODULE, 2103 }; 2104 2105 /* Called with ovs_mutex or RCU read lock. */ 2106 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, 2107 struct net *net, u32 portid, u32 seq, 2108 u32 flags, u8 cmd, gfp_t gfp) 2109 { 2110 struct ovs_header *ovs_header; 2111 struct ovs_vport_stats vport_stats; 2112 int err; 2113 2114 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, 2115 flags, cmd); 2116 if (!ovs_header) 2117 return -EMSGSIZE; 2118 2119 ovs_header->dp_ifindex = get_dpifindex(vport->dp); 2120 2121 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) || 2122 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) || 2123 nla_put_string(skb, OVS_VPORT_ATTR_NAME, 2124 ovs_vport_name(vport)) || 2125 nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex)) 2126 goto nla_put_failure; 2127 2128 if (!net_eq(net, dev_net(vport->dev))) { 2129 int id = peernet2id_alloc(net, dev_net(vport->dev), gfp); 2130 2131 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id)) 2132 goto nla_put_failure; 2133 } 2134 2135 ovs_vport_get_stats(vport, &vport_stats); 2136 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS, 2137 sizeof(struct ovs_vport_stats), &vport_stats, 2138 OVS_VPORT_ATTR_PAD)) 2139 goto nla_put_failure; 2140 2141 if (ovs_vport_get_upcall_stats(vport, skb)) 2142 goto nla_put_failure; 2143 2144 if (ovs_vport_get_upcall_portids(vport, skb)) 2145 goto nla_put_failure; 2146 2147 err = ovs_vport_get_options(vport, skb); 2148 if (err == -EMSGSIZE) 2149 goto error; 2150 2151 genlmsg_end(skb, ovs_header); 2152 return 0; 2153 2154 nla_put_failure: 2155 err = -EMSGSIZE; 2156 error: 2157 genlmsg_cancel(skb, ovs_header); 2158 return err; 2159 } 2160 2161 static struct sk_buff *ovs_vport_cmd_alloc_info(void) 2162 { 2163 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2164 } 2165 2166 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */ 2167 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net, 2168 u32 portid, u32 seq, u8 cmd) 2169 { 2170 struct sk_buff *skb; 2171 int retval; 2172 2173 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2174 if (!skb) 2175 return ERR_PTR(-ENOMEM); 2176 2177 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd, 2178 GFP_KERNEL); 2179 BUG_ON(retval < 0); 2180 2181 return skb; 2182 } 2183 2184 /* Called with ovs_mutex or RCU read lock. */ 2185 static struct vport *lookup_vport(struct net *net, 2186 const struct ovs_header *ovs_header, 2187 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) 2188 { 2189 struct datapath *dp; 2190 struct vport *vport; 2191 2192 if (a[OVS_VPORT_ATTR_IFINDEX]) 2193 return ERR_PTR(-EOPNOTSUPP); 2194 if (a[OVS_VPORT_ATTR_NAME]) { 2195 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME])); 2196 if (!vport) 2197 return ERR_PTR(-ENODEV); 2198 if (ovs_header->dp_ifindex && 2199 ovs_header->dp_ifindex != get_dpifindex(vport->dp)) 2200 return ERR_PTR(-ENODEV); 2201 return vport; 2202 } else if (a[OVS_VPORT_ATTR_PORT_NO]) { 2203 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); 2204 2205 if (port_no >= DP_MAX_PORTS) 2206 return ERR_PTR(-EFBIG); 2207 2208 dp = get_dp(net, ovs_header->dp_ifindex); 2209 if (!dp) 2210 return ERR_PTR(-ENODEV); 2211 2212 vport = ovs_vport_ovsl_rcu(dp, port_no); 2213 if (!vport) 2214 return ERR_PTR(-ENODEV); 2215 return vport; 2216 } else 2217 return ERR_PTR(-EINVAL); 2218 2219 } 2220 2221 static unsigned int ovs_get_max_headroom(struct datapath *dp) 2222 { 2223 unsigned int dev_headroom, max_headroom = 0; 2224 struct net_device *dev; 2225 struct vport *vport; 2226 int i; 2227 2228 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 2229 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node, 2230 lockdep_ovsl_is_held()) { 2231 dev = vport->dev; 2232 dev_headroom = netdev_get_fwd_headroom(dev); 2233 if (dev_headroom > max_headroom) 2234 max_headroom = dev_headroom; 2235 } 2236 } 2237 2238 return max_headroom; 2239 } 2240 2241 /* Called with ovs_mutex */ 2242 static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom) 2243 { 2244 struct vport *vport; 2245 int i; 2246 2247 dp->max_headroom = new_headroom; 2248 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 2249 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node, 2250 lockdep_ovsl_is_held()) 2251 netdev_set_rx_headroom(vport->dev, new_headroom); 2252 } 2253 } 2254 2255 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) 2256 { 2257 struct nlattr **a = info->attrs; 2258 struct ovs_header *ovs_header = info->userhdr; 2259 struct vport_parms parms; 2260 struct sk_buff *reply; 2261 struct vport *vport; 2262 struct datapath *dp; 2263 unsigned int new_headroom; 2264 u32 port_no; 2265 int err; 2266 2267 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || 2268 !a[OVS_VPORT_ATTR_UPCALL_PID]) 2269 return -EINVAL; 2270 2271 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); 2272 2273 if (a[OVS_VPORT_ATTR_IFINDEX] && parms.type != OVS_VPORT_TYPE_INTERNAL) 2274 return -EOPNOTSUPP; 2275 2276 port_no = a[OVS_VPORT_ATTR_PORT_NO] 2277 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0; 2278 if (port_no >= DP_MAX_PORTS) 2279 return -EFBIG; 2280 2281 reply = ovs_vport_cmd_alloc_info(); 2282 if (!reply) 2283 return -ENOMEM; 2284 2285 ovs_lock(); 2286 restart: 2287 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 2288 err = -ENODEV; 2289 if (!dp) 2290 goto exit_unlock_free; 2291 2292 if (port_no) { 2293 vport = ovs_vport_ovsl(dp, port_no); 2294 err = -EBUSY; 2295 if (vport) 2296 goto exit_unlock_free; 2297 } else { 2298 for (port_no = 1; ; port_no++) { 2299 if (port_no >= DP_MAX_PORTS) { 2300 err = -EFBIG; 2301 goto exit_unlock_free; 2302 } 2303 vport = ovs_vport_ovsl(dp, port_no); 2304 if (!vport) 2305 break; 2306 } 2307 } 2308 2309 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]); 2310 parms.options = a[OVS_VPORT_ATTR_OPTIONS]; 2311 parms.dp = dp; 2312 parms.port_no = port_no; 2313 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID]; 2314 parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX] 2315 ? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0; 2316 2317 vport = new_vport(&parms); 2318 err = PTR_ERR(vport); 2319 if (IS_ERR(vport)) { 2320 if (err == -EAGAIN) 2321 goto restart; 2322 goto exit_unlock_free; 2323 } 2324 2325 vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); 2326 if (!vport->upcall_stats) { 2327 err = -ENOMEM; 2328 goto exit_unlock_free_vport; 2329 } 2330 2331 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2332 info->snd_portid, info->snd_seq, 0, 2333 OVS_VPORT_CMD_NEW, GFP_KERNEL); 2334 2335 new_headroom = netdev_get_fwd_headroom(vport->dev); 2336 2337 if (new_headroom > dp->max_headroom) 2338 ovs_update_headroom(dp, new_headroom); 2339 else 2340 netdev_set_rx_headroom(vport->dev, dp->max_headroom); 2341 2342 BUG_ON(err < 0); 2343 ovs_unlock(); 2344 2345 ovs_notify(&dp_vport_genl_family, reply, info); 2346 return 0; 2347 2348 exit_unlock_free_vport: 2349 ovs_dp_detach_port(vport); 2350 exit_unlock_free: 2351 ovs_unlock(); 2352 kfree_skb(reply); 2353 return err; 2354 } 2355 2356 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) 2357 { 2358 struct nlattr **a = info->attrs; 2359 struct sk_buff *reply; 2360 struct vport *vport; 2361 int err; 2362 2363 reply = ovs_vport_cmd_alloc_info(); 2364 if (!reply) 2365 return -ENOMEM; 2366 2367 ovs_lock(); 2368 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); 2369 err = PTR_ERR(vport); 2370 if (IS_ERR(vport)) 2371 goto exit_unlock_free; 2372 2373 if (a[OVS_VPORT_ATTR_TYPE] && 2374 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) { 2375 err = -EINVAL; 2376 goto exit_unlock_free; 2377 } 2378 2379 if (a[OVS_VPORT_ATTR_OPTIONS]) { 2380 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); 2381 if (err) 2382 goto exit_unlock_free; 2383 } 2384 2385 2386 if (a[OVS_VPORT_ATTR_UPCALL_PID]) { 2387 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID]; 2388 2389 err = ovs_vport_set_upcall_portids(vport, ids); 2390 if (err) 2391 goto exit_unlock_free; 2392 } 2393 2394 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2395 info->snd_portid, info->snd_seq, 0, 2396 OVS_VPORT_CMD_SET, GFP_KERNEL); 2397 BUG_ON(err < 0); 2398 2399 ovs_unlock(); 2400 ovs_notify(&dp_vport_genl_family, reply, info); 2401 return 0; 2402 2403 exit_unlock_free: 2404 ovs_unlock(); 2405 kfree_skb(reply); 2406 return err; 2407 } 2408 2409 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) 2410 { 2411 bool update_headroom = false; 2412 struct nlattr **a = info->attrs; 2413 struct sk_buff *reply; 2414 struct datapath *dp; 2415 struct vport *vport; 2416 unsigned int new_headroom; 2417 int err; 2418 2419 reply = ovs_vport_cmd_alloc_info(); 2420 if (!reply) 2421 return -ENOMEM; 2422 2423 ovs_lock(); 2424 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); 2425 err = PTR_ERR(vport); 2426 if (IS_ERR(vport)) 2427 goto exit_unlock_free; 2428 2429 if (vport->port_no == OVSP_LOCAL) { 2430 err = -EINVAL; 2431 goto exit_unlock_free; 2432 } 2433 2434 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2435 info->snd_portid, info->snd_seq, 0, 2436 OVS_VPORT_CMD_DEL, GFP_KERNEL); 2437 BUG_ON(err < 0); 2438 2439 /* the vport deletion may trigger dp headroom update */ 2440 dp = vport->dp; 2441 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom) 2442 update_headroom = true; 2443 2444 netdev_reset_rx_headroom(vport->dev); 2445 ovs_dp_detach_port(vport); 2446 2447 if (update_headroom) { 2448 new_headroom = ovs_get_max_headroom(dp); 2449 2450 if (new_headroom < dp->max_headroom) 2451 ovs_update_headroom(dp, new_headroom); 2452 } 2453 ovs_unlock(); 2454 2455 ovs_notify(&dp_vport_genl_family, reply, info); 2456 return 0; 2457 2458 exit_unlock_free: 2459 ovs_unlock(); 2460 kfree_skb(reply); 2461 return err; 2462 } 2463 2464 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) 2465 { 2466 struct nlattr **a = info->attrs; 2467 struct ovs_header *ovs_header = info->userhdr; 2468 struct sk_buff *reply; 2469 struct vport *vport; 2470 int err; 2471 2472 reply = ovs_vport_cmd_alloc_info(); 2473 if (!reply) 2474 return -ENOMEM; 2475 2476 rcu_read_lock(); 2477 vport = lookup_vport(sock_net(skb->sk), ovs_header, a); 2478 err = PTR_ERR(vport); 2479 if (IS_ERR(vport)) 2480 goto exit_unlock_free; 2481 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2482 info->snd_portid, info->snd_seq, 0, 2483 OVS_VPORT_CMD_GET, GFP_ATOMIC); 2484 BUG_ON(err < 0); 2485 rcu_read_unlock(); 2486 2487 return genlmsg_reply(reply, info); 2488 2489 exit_unlock_free: 2490 rcu_read_unlock(); 2491 kfree_skb(reply); 2492 return err; 2493 } 2494 2495 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 2496 { 2497 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); 2498 struct datapath *dp; 2499 int bucket = cb->args[0], skip = cb->args[1]; 2500 int i, j = 0; 2501 2502 rcu_read_lock(); 2503 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); 2504 if (!dp) { 2505 rcu_read_unlock(); 2506 return -ENODEV; 2507 } 2508 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { 2509 struct vport *vport; 2510 2511 j = 0; 2512 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) { 2513 if (j >= skip && 2514 ovs_vport_cmd_fill_info(vport, skb, 2515 sock_net(skb->sk), 2516 NETLINK_CB(cb->skb).portid, 2517 cb->nlh->nlmsg_seq, 2518 NLM_F_MULTI, 2519 OVS_VPORT_CMD_GET, 2520 GFP_ATOMIC) < 0) 2521 goto out; 2522 2523 j++; 2524 } 2525 skip = 0; 2526 } 2527 out: 2528 rcu_read_unlock(); 2529 2530 cb->args[0] = i; 2531 cb->args[1] = j; 2532 2533 return skb->len; 2534 } 2535 2536 static void ovs_dp_masks_rebalance(struct work_struct *work) 2537 { 2538 struct ovs_net *ovs_net = container_of(work, struct ovs_net, 2539 masks_rebalance.work); 2540 struct datapath *dp; 2541 2542 ovs_lock(); 2543 2544 list_for_each_entry(dp, &ovs_net->dps, list_node) 2545 ovs_flow_masks_rebalance(&dp->table); 2546 2547 ovs_unlock(); 2548 2549 schedule_delayed_work(&ovs_net->masks_rebalance, 2550 msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL)); 2551 } 2552 2553 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { 2554 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, 2555 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, 2556 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, 2557 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, 2558 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC }, 2559 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, 2560 [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 }, 2561 [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 }, 2562 [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED }, 2563 }; 2564 2565 static const struct genl_small_ops dp_vport_genl_ops[] = { 2566 { .cmd = OVS_VPORT_CMD_NEW, 2567 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2568 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2569 .doit = ovs_vport_cmd_new 2570 }, 2571 { .cmd = OVS_VPORT_CMD_DEL, 2572 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2573 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2574 .doit = ovs_vport_cmd_del 2575 }, 2576 { .cmd = OVS_VPORT_CMD_GET, 2577 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2578 .flags = 0, /* OK for unprivileged users. */ 2579 .doit = ovs_vport_cmd_get, 2580 .dumpit = ovs_vport_cmd_dump 2581 }, 2582 { .cmd = OVS_VPORT_CMD_SET, 2583 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2584 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2585 .doit = ovs_vport_cmd_set, 2586 }, 2587 }; 2588 2589 struct genl_family dp_vport_genl_family __ro_after_init = { 2590 .hdrsize = sizeof(struct ovs_header), 2591 .name = OVS_VPORT_FAMILY, 2592 .version = OVS_VPORT_VERSION, 2593 .maxattr = OVS_VPORT_ATTR_MAX, 2594 .policy = vport_policy, 2595 .netnsok = true, 2596 .parallel_ops = true, 2597 .small_ops = dp_vport_genl_ops, 2598 .n_small_ops = ARRAY_SIZE(dp_vport_genl_ops), 2599 .resv_start_op = OVS_VPORT_CMD_SET + 1, 2600 .mcgrps = &ovs_dp_vport_multicast_group, 2601 .n_mcgrps = 1, 2602 .module = THIS_MODULE, 2603 }; 2604 2605 static struct genl_family * const dp_genl_families[] = { 2606 &dp_datapath_genl_family, 2607 &dp_vport_genl_family, 2608 &dp_flow_genl_family, 2609 &dp_packet_genl_family, 2610 &dp_meter_genl_family, 2611 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) 2612 &dp_ct_limit_genl_family, 2613 #endif 2614 }; 2615 2616 static void dp_unregister_genl(int n_families) 2617 { 2618 int i; 2619 2620 for (i = 0; i < n_families; i++) 2621 genl_unregister_family(dp_genl_families[i]); 2622 } 2623 2624 static int __init dp_register_genl(void) 2625 { 2626 int err; 2627 int i; 2628 2629 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { 2630 2631 err = genl_register_family(dp_genl_families[i]); 2632 if (err) 2633 goto error; 2634 } 2635 2636 return 0; 2637 2638 error: 2639 dp_unregister_genl(i); 2640 return err; 2641 } 2642 2643 static int __net_init ovs_init_net(struct net *net) 2644 { 2645 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2646 int err; 2647 2648 INIT_LIST_HEAD(&ovs_net->dps); 2649 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq); 2650 INIT_DELAYED_WORK(&ovs_net->masks_rebalance, ovs_dp_masks_rebalance); 2651 2652 err = ovs_ct_init(net); 2653 if (err) 2654 return err; 2655 2656 schedule_delayed_work(&ovs_net->masks_rebalance, 2657 msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL)); 2658 return 0; 2659 } 2660 2661 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet, 2662 struct list_head *head) 2663 { 2664 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2665 struct datapath *dp; 2666 2667 list_for_each_entry(dp, &ovs_net->dps, list_node) { 2668 int i; 2669 2670 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 2671 struct vport *vport; 2672 2673 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) { 2674 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL) 2675 continue; 2676 2677 if (dev_net(vport->dev) == dnet) 2678 list_add(&vport->detach_list, head); 2679 } 2680 } 2681 } 2682 } 2683 2684 static void __net_exit ovs_exit_net(struct net *dnet) 2685 { 2686 struct datapath *dp, *dp_next; 2687 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id); 2688 struct vport *vport, *vport_next; 2689 struct net *net; 2690 LIST_HEAD(head); 2691 2692 ovs_lock(); 2693 2694 ovs_ct_exit(dnet); 2695 2696 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) 2697 __dp_destroy(dp); 2698 2699 down_read(&net_rwsem); 2700 for_each_net(net) 2701 list_vports_from_net(net, dnet, &head); 2702 up_read(&net_rwsem); 2703 2704 /* Detach all vports from given namespace. */ 2705 list_for_each_entry_safe(vport, vport_next, &head, detach_list) { 2706 list_del(&vport->detach_list); 2707 ovs_dp_detach_port(vport); 2708 } 2709 2710 ovs_unlock(); 2711 2712 cancel_delayed_work_sync(&ovs_net->masks_rebalance); 2713 cancel_work_sync(&ovs_net->dp_notify_work); 2714 } 2715 2716 static struct pernet_operations ovs_net_ops = { 2717 .init = ovs_init_net, 2718 .exit = ovs_exit_net, 2719 .id = &ovs_net_id, 2720 .size = sizeof(struct ovs_net), 2721 }; 2722 2723 static int __init dp_init(void) 2724 { 2725 int err; 2726 2727 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > 2728 sizeof_field(struct sk_buff, cb)); 2729 2730 pr_info("Open vSwitch switching datapath\n"); 2731 2732 err = action_fifos_init(); 2733 if (err) 2734 goto error; 2735 2736 err = ovs_internal_dev_rtnl_link_register(); 2737 if (err) 2738 goto error_action_fifos_exit; 2739 2740 err = ovs_flow_init(); 2741 if (err) 2742 goto error_unreg_rtnl_link; 2743 2744 err = ovs_vport_init(); 2745 if (err) 2746 goto error_flow_exit; 2747 2748 err = register_pernet_device(&ovs_net_ops); 2749 if (err) 2750 goto error_vport_exit; 2751 2752 err = register_netdevice_notifier(&ovs_dp_device_notifier); 2753 if (err) 2754 goto error_netns_exit; 2755 2756 err = ovs_netdev_init(); 2757 if (err) 2758 goto error_unreg_notifier; 2759 2760 err = dp_register_genl(); 2761 if (err < 0) 2762 goto error_unreg_netdev; 2763 2764 return 0; 2765 2766 error_unreg_netdev: 2767 ovs_netdev_exit(); 2768 error_unreg_notifier: 2769 unregister_netdevice_notifier(&ovs_dp_device_notifier); 2770 error_netns_exit: 2771 unregister_pernet_device(&ovs_net_ops); 2772 error_vport_exit: 2773 ovs_vport_exit(); 2774 error_flow_exit: 2775 ovs_flow_exit(); 2776 error_unreg_rtnl_link: 2777 ovs_internal_dev_rtnl_link_unregister(); 2778 error_action_fifos_exit: 2779 action_fifos_exit(); 2780 error: 2781 return err; 2782 } 2783 2784 static void dp_cleanup(void) 2785 { 2786 dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); 2787 ovs_netdev_exit(); 2788 unregister_netdevice_notifier(&ovs_dp_device_notifier); 2789 unregister_pernet_device(&ovs_net_ops); 2790 rcu_barrier(); 2791 ovs_vport_exit(); 2792 ovs_flow_exit(); 2793 ovs_internal_dev_rtnl_link_unregister(); 2794 action_fifos_exit(); 2795 } 2796 2797 module_init(dp_init); 2798 module_exit(dp_cleanup); 2799 2800 MODULE_DESCRIPTION("Open vSwitch switching datapath"); 2801 MODULE_LICENSE("GPL"); 2802 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY); 2803 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY); 2804 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY); 2805 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY); 2806 MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY); 2807 MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY); 2808