1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 Nicira, Inc. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/openvswitch.h> 8 #include <linux/tcp.h> 9 #include <linux/udp.h> 10 #include <linux/sctp.h> 11 #include <linux/static_key.h> 12 #include <net/ip.h> 13 #include <net/genetlink.h> 14 #include <net/netfilter/nf_conntrack_core.h> 15 #include <net/netfilter/nf_conntrack_count.h> 16 #include <net/netfilter/nf_conntrack_helper.h> 17 #include <net/netfilter/nf_conntrack_labels.h> 18 #include <net/netfilter/nf_conntrack_seqadj.h> 19 #include <net/netfilter/nf_conntrack_timeout.h> 20 #include <net/netfilter/nf_conntrack_zones.h> 21 #include <net/netfilter/ipv6/nf_defrag_ipv6.h> 22 #include <net/ipv6_frag.h> 23 24 #if IS_ENABLED(CONFIG_NF_NAT) 25 #include <net/netfilter/nf_nat.h> 26 #endif 27 28 #include "datapath.h" 29 #include "conntrack.h" 30 #include "flow.h" 31 #include "flow_netlink.h" 32 33 struct ovs_ct_len_tbl { 34 int maxlen; 35 int minlen; 36 }; 37 38 /* Metadata mark for masked write to conntrack mark */ 39 struct md_mark { 40 u32 value; 41 u32 mask; 42 }; 43 44 /* Metadata label for masked write to conntrack label. */ 45 struct md_labels { 46 struct ovs_key_ct_labels value; 47 struct ovs_key_ct_labels mask; 48 }; 49 50 enum ovs_ct_nat { 51 OVS_CT_NAT = 1 << 0, /* NAT for committed connections only. */ 52 OVS_CT_SRC_NAT = 1 << 1, /* Source NAT for NEW connections. */ 53 OVS_CT_DST_NAT = 1 << 2, /* Destination NAT for NEW connections. */ 54 }; 55 56 /* Conntrack action context for execution. */ 57 struct ovs_conntrack_info { 58 struct nf_conntrack_helper *helper; 59 struct nf_conntrack_zone zone; 60 struct nf_conn *ct; 61 u8 commit : 1; 62 u8 nat : 3; /* enum ovs_ct_nat */ 63 u8 force : 1; 64 u8 have_eventmask : 1; 65 u16 family; 66 u32 eventmask; /* Mask of 1 << IPCT_*. */ 67 struct md_mark mark; 68 struct md_labels labels; 69 char timeout[CTNL_TIMEOUT_NAME_MAX]; 70 struct nf_ct_timeout *nf_ct_timeout; 71 #if IS_ENABLED(CONFIG_NF_NAT) 72 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */ 73 #endif 74 }; 75 76 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) 77 #define OVS_CT_LIMIT_UNLIMITED 0 78 #define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED 79 #define CT_LIMIT_HASH_BUCKETS 512 80 static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled); 81 82 struct ovs_ct_limit { 83 /* Elements in ovs_ct_limit_info->limits hash table */ 84 struct hlist_node hlist_node; 85 struct rcu_head rcu; 86 u16 zone; 87 u32 limit; 88 }; 89 90 struct ovs_ct_limit_info { 91 u32 default_limit; 92 struct hlist_head *limits; 93 struct nf_conncount_data *data; 94 }; 95 96 static const struct nla_policy ct_limit_policy[OVS_CT_LIMIT_ATTR_MAX + 1] = { 97 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NLA_NESTED, }, 98 }; 99 #endif 100 101 static bool labels_nonzero(const struct ovs_key_ct_labels *labels); 102 103 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info); 104 105 static u16 key_to_nfproto(const struct sw_flow_key *key) 106 { 107 switch (ntohs(key->eth.type)) { 108 case ETH_P_IP: 109 return NFPROTO_IPV4; 110 case ETH_P_IPV6: 111 return NFPROTO_IPV6; 112 default: 113 return NFPROTO_UNSPEC; 114 } 115 } 116 117 /* Map SKB connection state into the values used by flow definition. */ 118 static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo) 119 { 120 u8 ct_state = OVS_CS_F_TRACKED; 121 122 switch (ctinfo) { 123 case IP_CT_ESTABLISHED_REPLY: 124 case IP_CT_RELATED_REPLY: 125 ct_state |= OVS_CS_F_REPLY_DIR; 126 break; 127 default: 128 break; 129 } 130 131 switch (ctinfo) { 132 case IP_CT_ESTABLISHED: 133 case IP_CT_ESTABLISHED_REPLY: 134 ct_state |= OVS_CS_F_ESTABLISHED; 135 break; 136 case IP_CT_RELATED: 137 case IP_CT_RELATED_REPLY: 138 ct_state |= OVS_CS_F_RELATED; 139 break; 140 case IP_CT_NEW: 141 ct_state |= OVS_CS_F_NEW; 142 break; 143 default: 144 break; 145 } 146 147 return ct_state; 148 } 149 150 static u32 ovs_ct_get_mark(const struct nf_conn *ct) 151 { 152 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) 153 return ct ? ct->mark : 0; 154 #else 155 return 0; 156 #endif 157 } 158 159 /* Guard against conntrack labels max size shrinking below 128 bits. */ 160 #if NF_CT_LABELS_MAX_SIZE < 16 161 #error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes 162 #endif 163 164 static void ovs_ct_get_labels(const struct nf_conn *ct, 165 struct ovs_key_ct_labels *labels) 166 { 167 struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL; 168 169 if (cl) 170 memcpy(labels, cl->bits, OVS_CT_LABELS_LEN); 171 else 172 memset(labels, 0, OVS_CT_LABELS_LEN); 173 } 174 175 static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key, 176 const struct nf_conntrack_tuple *orig, 177 u8 icmp_proto) 178 { 179 key->ct_orig_proto = orig->dst.protonum; 180 if (orig->dst.protonum == icmp_proto) { 181 key->ct.orig_tp.src = htons(orig->dst.u.icmp.type); 182 key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code); 183 } else { 184 key->ct.orig_tp.src = orig->src.u.all; 185 key->ct.orig_tp.dst = orig->dst.u.all; 186 } 187 } 188 189 static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, 190 const struct nf_conntrack_zone *zone, 191 const struct nf_conn *ct) 192 { 193 key->ct_state = state; 194 key->ct_zone = zone->id; 195 key->ct.mark = ovs_ct_get_mark(ct); 196 ovs_ct_get_labels(ct, &key->ct.labels); 197 198 if (ct) { 199 const struct nf_conntrack_tuple *orig; 200 201 /* Use the master if we have one. */ 202 if (ct->master) 203 ct = ct->master; 204 orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 205 206 /* IP version must match with the master connection. */ 207 if (key->eth.type == htons(ETH_P_IP) && 208 nf_ct_l3num(ct) == NFPROTO_IPV4) { 209 key->ipv4.ct_orig.src = orig->src.u3.ip; 210 key->ipv4.ct_orig.dst = orig->dst.u3.ip; 211 __ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP); 212 return; 213 } else if (key->eth.type == htons(ETH_P_IPV6) && 214 !sw_flow_key_is_nd(key) && 215 nf_ct_l3num(ct) == NFPROTO_IPV6) { 216 key->ipv6.ct_orig.src = orig->src.u3.in6; 217 key->ipv6.ct_orig.dst = orig->dst.u3.in6; 218 __ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP); 219 return; 220 } 221 } 222 /* Clear 'ct_orig_proto' to mark the non-existence of conntrack 223 * original direction key fields. 224 */ 225 key->ct_orig_proto = 0; 226 } 227 228 /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has 229 * previously sent the packet to conntrack via the ct action. If 230 * 'keep_nat_flags' is true, the existing NAT flags retained, else they are 231 * initialized from the connection status. 232 */ 233 static void ovs_ct_update_key(const struct sk_buff *skb, 234 const struct ovs_conntrack_info *info, 235 struct sw_flow_key *key, bool post_ct, 236 bool keep_nat_flags) 237 { 238 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; 239 enum ip_conntrack_info ctinfo; 240 struct nf_conn *ct; 241 u8 state = 0; 242 243 ct = nf_ct_get(skb, &ctinfo); 244 if (ct) { 245 state = ovs_ct_get_state(ctinfo); 246 /* All unconfirmed entries are NEW connections. */ 247 if (!nf_ct_is_confirmed(ct)) 248 state |= OVS_CS_F_NEW; 249 /* OVS persists the related flag for the duration of the 250 * connection. 251 */ 252 if (ct->master) 253 state |= OVS_CS_F_RELATED; 254 if (keep_nat_flags) { 255 state |= key->ct_state & OVS_CS_F_NAT_MASK; 256 } else { 257 if (ct->status & IPS_SRC_NAT) 258 state |= OVS_CS_F_SRC_NAT; 259 if (ct->status & IPS_DST_NAT) 260 state |= OVS_CS_F_DST_NAT; 261 } 262 zone = nf_ct_zone(ct); 263 } else if (post_ct) { 264 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID; 265 if (info) 266 zone = &info->zone; 267 } 268 __ovs_ct_update_key(key, state, zone, ct); 269 } 270 271 /* This is called to initialize CT key fields possibly coming in from the local 272 * stack. 273 */ 274 void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) 275 { 276 ovs_ct_update_key(skb, NULL, key, false, false); 277 } 278 279 int ovs_ct_put_key(const struct sw_flow_key *swkey, 280 const struct sw_flow_key *output, struct sk_buff *skb) 281 { 282 if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state)) 283 return -EMSGSIZE; 284 285 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 286 nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone)) 287 return -EMSGSIZE; 288 289 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 290 nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark)) 291 return -EMSGSIZE; 292 293 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 294 nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels), 295 &output->ct.labels)) 296 return -EMSGSIZE; 297 298 if (swkey->ct_orig_proto) { 299 if (swkey->eth.type == htons(ETH_P_IP)) { 300 struct ovs_key_ct_tuple_ipv4 orig; 301 302 memset(&orig, 0, sizeof(orig)); 303 orig.ipv4_src = output->ipv4.ct_orig.src; 304 orig.ipv4_dst = output->ipv4.ct_orig.dst; 305 orig.src_port = output->ct.orig_tp.src; 306 orig.dst_port = output->ct.orig_tp.dst; 307 orig.ipv4_proto = output->ct_orig_proto; 308 309 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, 310 sizeof(orig), &orig)) 311 return -EMSGSIZE; 312 } else if (swkey->eth.type == htons(ETH_P_IPV6)) { 313 struct ovs_key_ct_tuple_ipv6 orig; 314 315 memset(&orig, 0, sizeof(orig)); 316 memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32, 317 sizeof(orig.ipv6_src)); 318 memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32, 319 sizeof(orig.ipv6_dst)); 320 orig.src_port = output->ct.orig_tp.src; 321 orig.dst_port = output->ct.orig_tp.dst; 322 orig.ipv6_proto = output->ct_orig_proto; 323 324 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, 325 sizeof(orig), &orig)) 326 return -EMSGSIZE; 327 } 328 } 329 330 return 0; 331 } 332 333 static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key, 334 u32 ct_mark, u32 mask) 335 { 336 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) 337 u32 new_mark; 338 339 new_mark = ct_mark | (ct->mark & ~(mask)); 340 if (ct->mark != new_mark) { 341 ct->mark = new_mark; 342 if (nf_ct_is_confirmed(ct)) 343 nf_conntrack_event_cache(IPCT_MARK, ct); 344 key->ct.mark = new_mark; 345 } 346 347 return 0; 348 #else 349 return -ENOTSUPP; 350 #endif 351 } 352 353 static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct) 354 { 355 struct nf_conn_labels *cl; 356 357 cl = nf_ct_labels_find(ct); 358 if (!cl) { 359 nf_ct_labels_ext_add(ct); 360 cl = nf_ct_labels_find(ct); 361 } 362 363 return cl; 364 } 365 366 /* Initialize labels for a new, yet to be committed conntrack entry. Note that 367 * since the new connection is not yet confirmed, and thus no-one else has 368 * access to it's labels, we simply write them over. 369 */ 370 static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, 371 const struct ovs_key_ct_labels *labels, 372 const struct ovs_key_ct_labels *mask) 373 { 374 struct nf_conn_labels *cl, *master_cl; 375 bool have_mask = labels_nonzero(mask); 376 377 /* Inherit master's labels to the related connection? */ 378 master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL; 379 380 if (!master_cl && !have_mask) 381 return 0; /* Nothing to do. */ 382 383 cl = ovs_ct_get_conn_labels(ct); 384 if (!cl) 385 return -ENOSPC; 386 387 /* Inherit the master's labels, if any. */ 388 if (master_cl) 389 *cl = *master_cl; 390 391 if (have_mask) { 392 u32 *dst = (u32 *)cl->bits; 393 int i; 394 395 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++) 396 dst[i] = (dst[i] & ~mask->ct_labels_32[i]) | 397 (labels->ct_labels_32[i] 398 & mask->ct_labels_32[i]); 399 } 400 401 /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the 402 * IPCT_LABEL bit is set in the event cache. 403 */ 404 nf_conntrack_event_cache(IPCT_LABEL, ct); 405 406 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); 407 408 return 0; 409 } 410 411 static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key, 412 const struct ovs_key_ct_labels *labels, 413 const struct ovs_key_ct_labels *mask) 414 { 415 struct nf_conn_labels *cl; 416 int err; 417 418 cl = ovs_ct_get_conn_labels(ct); 419 if (!cl) 420 return -ENOSPC; 421 422 err = nf_connlabels_replace(ct, labels->ct_labels_32, 423 mask->ct_labels_32, 424 OVS_CT_LABELS_LEN_32); 425 if (err) 426 return err; 427 428 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); 429 430 return 0; 431 } 432 433 /* 'skb' should already be pulled to nh_ofs. */ 434 static int ovs_ct_helper(struct sk_buff *skb, u16 proto) 435 { 436 const struct nf_conntrack_helper *helper; 437 const struct nf_conn_help *help; 438 enum ip_conntrack_info ctinfo; 439 unsigned int protoff; 440 struct nf_conn *ct; 441 int err; 442 443 ct = nf_ct_get(skb, &ctinfo); 444 if (!ct || ctinfo == IP_CT_RELATED_REPLY) 445 return NF_ACCEPT; 446 447 help = nfct_help(ct); 448 if (!help) 449 return NF_ACCEPT; 450 451 helper = rcu_dereference(help->helper); 452 if (!helper) 453 return NF_ACCEPT; 454 455 switch (proto) { 456 case NFPROTO_IPV4: 457 protoff = ip_hdrlen(skb); 458 break; 459 case NFPROTO_IPV6: { 460 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 461 __be16 frag_off; 462 int ofs; 463 464 ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, 465 &frag_off); 466 if (ofs < 0 || (frag_off & htons(~0x7)) != 0) { 467 pr_debug("proto header not found\n"); 468 return NF_ACCEPT; 469 } 470 protoff = ofs; 471 break; 472 } 473 default: 474 WARN_ONCE(1, "helper invoked on non-IP family!"); 475 return NF_DROP; 476 } 477 478 err = helper->help(skb, protoff, ct, ctinfo); 479 if (err != NF_ACCEPT) 480 return err; 481 482 /* Adjust seqs after helper. This is needed due to some helpers (e.g., 483 * FTP with NAT) adusting the TCP payload size when mangling IP 484 * addresses and/or port numbers in the text-based control connection. 485 */ 486 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 487 !nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) 488 return NF_DROP; 489 return NF_ACCEPT; 490 } 491 492 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero 493 * value if 'skb' is freed. 494 */ 495 static int handle_fragments(struct net *net, struct sw_flow_key *key, 496 u16 zone, struct sk_buff *skb) 497 { 498 struct ovs_skb_cb ovs_cb = *OVS_CB(skb); 499 int err; 500 501 if (key->eth.type == htons(ETH_P_IP)) { 502 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; 503 504 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 505 err = ip_defrag(net, skb, user); 506 if (err) 507 return err; 508 509 ovs_cb.mru = IPCB(skb)->frag_max_size; 510 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 511 } else if (key->eth.type == htons(ETH_P_IPV6)) { 512 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 513 514 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 515 err = nf_ct_frag6_gather(net, skb, user); 516 if (err) { 517 if (err != -EINPROGRESS) 518 kfree_skb(skb); 519 return err; 520 } 521 522 key->ip.proto = ipv6_hdr(skb)->nexthdr; 523 ovs_cb.mru = IP6CB(skb)->frag_max_size; 524 #endif 525 } else { 526 kfree_skb(skb); 527 return -EPFNOSUPPORT; 528 } 529 530 /* The key extracted from the fragment that completed this datagram 531 * likely didn't have an L4 header, so regenerate it. 532 */ 533 ovs_flow_key_update_l3l4(skb, key); 534 535 key->ip.frag = OVS_FRAG_TYPE_NONE; 536 skb_clear_hash(skb); 537 skb->ignore_df = 1; 538 *OVS_CB(skb) = ovs_cb; 539 540 return 0; 541 } 542 543 static struct nf_conntrack_expect * 544 ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone, 545 u16 proto, const struct sk_buff *skb) 546 { 547 struct nf_conntrack_tuple tuple; 548 struct nf_conntrack_expect *exp; 549 550 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple)) 551 return NULL; 552 553 exp = __nf_ct_expect_find(net, zone, &tuple); 554 if (exp) { 555 struct nf_conntrack_tuple_hash *h; 556 557 /* Delete existing conntrack entry, if it clashes with the 558 * expectation. This can happen since conntrack ALGs do not 559 * check for clashes between (new) expectations and existing 560 * conntrack entries. nf_conntrack_in() will check the 561 * expectations only if a conntrack entry can not be found, 562 * which can lead to OVS finding the expectation (here) in the 563 * init direction, but which will not be removed by the 564 * nf_conntrack_in() call, if a matching conntrack entry is 565 * found instead. In this case all init direction packets 566 * would be reported as new related packets, while reply 567 * direction packets would be reported as un-related 568 * established packets. 569 */ 570 h = nf_conntrack_find_get(net, zone, &tuple); 571 if (h) { 572 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 573 574 nf_ct_delete(ct, 0, 0); 575 nf_conntrack_put(&ct->ct_general); 576 } 577 } 578 579 return exp; 580 } 581 582 /* This replicates logic from nf_conntrack_core.c that is not exported. */ 583 static enum ip_conntrack_info 584 ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h) 585 { 586 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 587 588 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) 589 return IP_CT_ESTABLISHED_REPLY; 590 /* Once we've had two way comms, always ESTABLISHED. */ 591 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 592 return IP_CT_ESTABLISHED; 593 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 594 return IP_CT_RELATED; 595 return IP_CT_NEW; 596 } 597 598 /* Find an existing connection which this packet belongs to without 599 * re-attributing statistics or modifying the connection state. This allows an 600 * skb->_nfct lost due to an upcall to be recovered during actions execution. 601 * 602 * Must be called with rcu_read_lock. 603 * 604 * On success, populates skb->_nfct and returns the connection. Returns NULL 605 * if there is no existing entry. 606 */ 607 static struct nf_conn * 608 ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, 609 u8 l3num, struct sk_buff *skb, bool natted) 610 { 611 struct nf_conntrack_tuple tuple; 612 struct nf_conntrack_tuple_hash *h; 613 struct nf_conn *ct; 614 615 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num, 616 net, &tuple)) { 617 pr_debug("ovs_ct_find_existing: Can't get tuple\n"); 618 return NULL; 619 } 620 621 /* Must invert the tuple if skb has been transformed by NAT. */ 622 if (natted) { 623 struct nf_conntrack_tuple inverse; 624 625 if (!nf_ct_invert_tuple(&inverse, &tuple)) { 626 pr_debug("ovs_ct_find_existing: Inversion failed!\n"); 627 return NULL; 628 } 629 tuple = inverse; 630 } 631 632 /* look for tuple match */ 633 h = nf_conntrack_find_get(net, zone, &tuple); 634 if (!h) 635 return NULL; /* Not found. */ 636 637 ct = nf_ct_tuplehash_to_ctrack(h); 638 639 /* Inverted packet tuple matches the reverse direction conntrack tuple, 640 * select the other tuplehash to get the right 'ctinfo' bits for this 641 * packet. 642 */ 643 if (natted) 644 h = &ct->tuplehash[!h->tuple.dst.dir]; 645 646 nf_ct_set(skb, ct, ovs_ct_get_info(h)); 647 return ct; 648 } 649 650 static 651 struct nf_conn *ovs_ct_executed(struct net *net, 652 const struct sw_flow_key *key, 653 const struct ovs_conntrack_info *info, 654 struct sk_buff *skb, 655 bool *ct_executed) 656 { 657 struct nf_conn *ct = NULL; 658 659 /* If no ct, check if we have evidence that an existing conntrack entry 660 * might be found for this skb. This happens when we lose a skb->_nfct 661 * due to an upcall, or if the direction is being forced. If the 662 * connection was not confirmed, it is not cached and needs to be run 663 * through conntrack again. 664 */ 665 *ct_executed = (key->ct_state & OVS_CS_F_TRACKED) && 666 !(key->ct_state & OVS_CS_F_INVALID) && 667 (key->ct_zone == info->zone.id); 668 669 if (*ct_executed || (!key->ct_state && info->force)) { 670 ct = ovs_ct_find_existing(net, &info->zone, info->family, skb, 671 !!(key->ct_state & 672 OVS_CS_F_NAT_MASK)); 673 } 674 675 return ct; 676 } 677 678 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */ 679 static bool skb_nfct_cached(struct net *net, 680 const struct sw_flow_key *key, 681 const struct ovs_conntrack_info *info, 682 struct sk_buff *skb) 683 { 684 enum ip_conntrack_info ctinfo; 685 struct nf_conn *ct; 686 bool ct_executed = true; 687 688 ct = nf_ct_get(skb, &ctinfo); 689 if (!ct) 690 ct = ovs_ct_executed(net, key, info, skb, &ct_executed); 691 692 if (ct) 693 nf_ct_get(skb, &ctinfo); 694 else 695 return false; 696 697 if (!net_eq(net, read_pnet(&ct->ct_net))) 698 return false; 699 if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct))) 700 return false; 701 if (info->helper) { 702 struct nf_conn_help *help; 703 704 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER); 705 if (help && rcu_access_pointer(help->helper) != info->helper) 706 return false; 707 } 708 if (info->nf_ct_timeout) { 709 struct nf_conn_timeout *timeout_ext; 710 711 timeout_ext = nf_ct_timeout_find(ct); 712 if (!timeout_ext || info->nf_ct_timeout != 713 rcu_dereference(timeout_ext->timeout)) 714 return false; 715 } 716 /* Force conntrack entry direction to the current packet? */ 717 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { 718 /* Delete the conntrack entry if confirmed, else just release 719 * the reference. 720 */ 721 if (nf_ct_is_confirmed(ct)) 722 nf_ct_delete(ct, 0, 0); 723 724 nf_conntrack_put(&ct->ct_general); 725 nf_ct_set(skb, NULL, 0); 726 return false; 727 } 728 729 return ct_executed; 730 } 731 732 #if IS_ENABLED(CONFIG_NF_NAT) 733 /* Modelled after nf_nat_ipv[46]_fn(). 734 * range is only used for new, uninitialized NAT state. 735 * Returns either NF_ACCEPT or NF_DROP. 736 */ 737 static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, 738 enum ip_conntrack_info ctinfo, 739 const struct nf_nat_range2 *range, 740 enum nf_nat_manip_type maniptype) 741 { 742 int hooknum, nh_off, err = NF_ACCEPT; 743 744 nh_off = skb_network_offset(skb); 745 skb_pull_rcsum(skb, nh_off); 746 747 /* See HOOK2MANIP(). */ 748 if (maniptype == NF_NAT_MANIP_SRC) 749 hooknum = NF_INET_LOCAL_IN; /* Source NAT */ 750 else 751 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */ 752 753 switch (ctinfo) { 754 case IP_CT_RELATED: 755 case IP_CT_RELATED_REPLY: 756 if (IS_ENABLED(CONFIG_NF_NAT) && 757 skb->protocol == htons(ETH_P_IP) && 758 ip_hdr(skb)->protocol == IPPROTO_ICMP) { 759 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, 760 hooknum)) 761 err = NF_DROP; 762 goto push; 763 } else if (IS_ENABLED(CONFIG_IPV6) && 764 skb->protocol == htons(ETH_P_IPV6)) { 765 __be16 frag_off; 766 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 767 int hdrlen = ipv6_skip_exthdr(skb, 768 sizeof(struct ipv6hdr), 769 &nexthdr, &frag_off); 770 771 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { 772 if (!nf_nat_icmpv6_reply_translation(skb, ct, 773 ctinfo, 774 hooknum, 775 hdrlen)) 776 err = NF_DROP; 777 goto push; 778 } 779 } 780 /* Non-ICMP, fall thru to initialize if needed. */ 781 fallthrough; 782 case IP_CT_NEW: 783 /* Seen it before? This can happen for loopback, retrans, 784 * or local packets. 785 */ 786 if (!nf_nat_initialized(ct, maniptype)) { 787 /* Initialize according to the NAT action. */ 788 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS) 789 /* Action is set up to establish a new 790 * mapping. 791 */ 792 ? nf_nat_setup_info(ct, range, maniptype) 793 : nf_nat_alloc_null_binding(ct, hooknum); 794 if (err != NF_ACCEPT) 795 goto push; 796 } 797 break; 798 799 case IP_CT_ESTABLISHED: 800 case IP_CT_ESTABLISHED_REPLY: 801 break; 802 803 default: 804 err = NF_DROP; 805 goto push; 806 } 807 808 err = nf_nat_packet(ct, ctinfo, hooknum, skb); 809 push: 810 skb_push(skb, nh_off); 811 skb_postpush_rcsum(skb, skb->data, nh_off); 812 813 return err; 814 } 815 816 static void ovs_nat_update_key(struct sw_flow_key *key, 817 const struct sk_buff *skb, 818 enum nf_nat_manip_type maniptype) 819 { 820 if (maniptype == NF_NAT_MANIP_SRC) { 821 __be16 src; 822 823 key->ct_state |= OVS_CS_F_SRC_NAT; 824 if (key->eth.type == htons(ETH_P_IP)) 825 key->ipv4.addr.src = ip_hdr(skb)->saddr; 826 else if (key->eth.type == htons(ETH_P_IPV6)) 827 memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr, 828 sizeof(key->ipv6.addr.src)); 829 else 830 return; 831 832 if (key->ip.proto == IPPROTO_UDP) 833 src = udp_hdr(skb)->source; 834 else if (key->ip.proto == IPPROTO_TCP) 835 src = tcp_hdr(skb)->source; 836 else if (key->ip.proto == IPPROTO_SCTP) 837 src = sctp_hdr(skb)->source; 838 else 839 return; 840 841 key->tp.src = src; 842 } else { 843 __be16 dst; 844 845 key->ct_state |= OVS_CS_F_DST_NAT; 846 if (key->eth.type == htons(ETH_P_IP)) 847 key->ipv4.addr.dst = ip_hdr(skb)->daddr; 848 else if (key->eth.type == htons(ETH_P_IPV6)) 849 memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr, 850 sizeof(key->ipv6.addr.dst)); 851 else 852 return; 853 854 if (key->ip.proto == IPPROTO_UDP) 855 dst = udp_hdr(skb)->dest; 856 else if (key->ip.proto == IPPROTO_TCP) 857 dst = tcp_hdr(skb)->dest; 858 else if (key->ip.proto == IPPROTO_SCTP) 859 dst = sctp_hdr(skb)->dest; 860 else 861 return; 862 863 key->tp.dst = dst; 864 } 865 } 866 867 /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */ 868 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, 869 const struct ovs_conntrack_info *info, 870 struct sk_buff *skb, struct nf_conn *ct, 871 enum ip_conntrack_info ctinfo) 872 { 873 enum nf_nat_manip_type maniptype; 874 int err; 875 876 /* Add NAT extension if not confirmed yet. */ 877 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct)) 878 return NF_ACCEPT; /* Can't NAT. */ 879 880 /* Determine NAT type. 881 * Check if the NAT type can be deduced from the tracked connection. 882 * Make sure new expected connections (IP_CT_RELATED) are NATted only 883 * when committing. 884 */ 885 if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW && 886 ct->status & IPS_NAT_MASK && 887 (ctinfo != IP_CT_RELATED || info->commit)) { 888 /* NAT an established or related connection like before. */ 889 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) 890 /* This is the REPLY direction for a connection 891 * for which NAT was applied in the forward 892 * direction. Do the reverse NAT. 893 */ 894 maniptype = ct->status & IPS_SRC_NAT 895 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC; 896 else 897 maniptype = ct->status & IPS_SRC_NAT 898 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST; 899 } else if (info->nat & OVS_CT_SRC_NAT) { 900 maniptype = NF_NAT_MANIP_SRC; 901 } else if (info->nat & OVS_CT_DST_NAT) { 902 maniptype = NF_NAT_MANIP_DST; 903 } else { 904 return NF_ACCEPT; /* Connection is not NATed. */ 905 } 906 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); 907 908 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { 909 if (ct->status & IPS_SRC_NAT) { 910 if (maniptype == NF_NAT_MANIP_SRC) 911 maniptype = NF_NAT_MANIP_DST; 912 else 913 maniptype = NF_NAT_MANIP_SRC; 914 915 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, 916 maniptype); 917 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { 918 err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL, 919 NF_NAT_MANIP_SRC); 920 } 921 } 922 923 /* Mark NAT done if successful and update the flow key. */ 924 if (err == NF_ACCEPT) 925 ovs_nat_update_key(key, skb, maniptype); 926 927 return err; 928 } 929 #else /* !CONFIG_NF_NAT */ 930 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, 931 const struct ovs_conntrack_info *info, 932 struct sk_buff *skb, struct nf_conn *ct, 933 enum ip_conntrack_info ctinfo) 934 { 935 return NF_ACCEPT; 936 } 937 #endif 938 939 /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if 940 * not done already. Update key with new CT state after passing the packet 941 * through conntrack. 942 * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be 943 * set to NULL and 0 will be returned. 944 */ 945 static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, 946 const struct ovs_conntrack_info *info, 947 struct sk_buff *skb) 948 { 949 /* If we are recirculating packets to match on conntrack fields and 950 * committing with a separate conntrack action, then we don't need to 951 * actually run the packet through conntrack twice unless it's for a 952 * different zone. 953 */ 954 bool cached = skb_nfct_cached(net, key, info, skb); 955 enum ip_conntrack_info ctinfo; 956 struct nf_conn *ct; 957 958 if (!cached) { 959 struct nf_hook_state state = { 960 .hook = NF_INET_PRE_ROUTING, 961 .pf = info->family, 962 .net = net, 963 }; 964 struct nf_conn *tmpl = info->ct; 965 int err; 966 967 /* Associate skb with specified zone. */ 968 if (tmpl) { 969 if (skb_nfct(skb)) 970 nf_conntrack_put(skb_nfct(skb)); 971 nf_conntrack_get(&tmpl->ct_general); 972 nf_ct_set(skb, tmpl, IP_CT_NEW); 973 } 974 975 err = nf_conntrack_in(skb, &state); 976 if (err != NF_ACCEPT) 977 return -ENOENT; 978 979 /* Clear CT state NAT flags to mark that we have not yet done 980 * NAT after the nf_conntrack_in() call. We can actually clear 981 * the whole state, as it will be re-initialized below. 982 */ 983 key->ct_state = 0; 984 985 /* Update the key, but keep the NAT flags. */ 986 ovs_ct_update_key(skb, info, key, true, true); 987 } 988 989 ct = nf_ct_get(skb, &ctinfo); 990 if (ct) { 991 bool add_helper = false; 992 993 /* Packets starting a new connection must be NATted before the 994 * helper, so that the helper knows about the NAT. We enforce 995 * this by delaying both NAT and helper calls for unconfirmed 996 * connections until the committing CT action. For later 997 * packets NAT and Helper may be called in either order. 998 * 999 * NAT will be done only if the CT action has NAT, and only 1000 * once per packet (per zone), as guarded by the NAT bits in 1001 * the key->ct_state. 1002 */ 1003 if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) && 1004 (nf_ct_is_confirmed(ct) || info->commit) && 1005 ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) { 1006 return -EINVAL; 1007 } 1008 1009 /* Userspace may decide to perform a ct lookup without a helper 1010 * specified followed by a (recirculate and) commit with one, 1011 * or attach a helper in a later commit. Therefore, for 1012 * connections which we will commit, we may need to attach 1013 * the helper here. 1014 */ 1015 if (info->commit && info->helper && !nfct_help(ct)) { 1016 int err = __nf_ct_try_assign_helper(ct, info->ct, 1017 GFP_ATOMIC); 1018 if (err) 1019 return err; 1020 add_helper = true; 1021 1022 /* helper installed, add seqadj if NAT is required */ 1023 if (info->nat && !nfct_seqadj(ct)) { 1024 if (!nfct_seqadj_ext_add(ct)) 1025 return -EINVAL; 1026 } 1027 } 1028 1029 /* Call the helper only if: 1030 * - nf_conntrack_in() was executed above ("!cached") or a 1031 * helper was just attached ("add_helper") for a confirmed 1032 * connection, or 1033 * - When committing an unconfirmed connection. 1034 */ 1035 if ((nf_ct_is_confirmed(ct) ? !cached || add_helper : 1036 info->commit) && 1037 ovs_ct_helper(skb, info->family) != NF_ACCEPT) { 1038 return -EINVAL; 1039 } 1040 1041 if (nf_ct_protonum(ct) == IPPROTO_TCP && 1042 nf_ct_is_confirmed(ct) && nf_conntrack_tcp_established(ct)) { 1043 /* Be liberal for tcp packets so that out-of-window 1044 * packets are not marked invalid. 1045 */ 1046 nf_ct_set_tcp_be_liberal(ct); 1047 } 1048 } 1049 1050 return 0; 1051 } 1052 1053 /* Lookup connection and read fields into key. */ 1054 static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key, 1055 const struct ovs_conntrack_info *info, 1056 struct sk_buff *skb) 1057 { 1058 struct nf_conntrack_expect *exp; 1059 1060 /* If we pass an expected packet through nf_conntrack_in() the 1061 * expectation is typically removed, but the packet could still be 1062 * lost in upcall processing. To prevent this from happening we 1063 * perform an explicit expectation lookup. Expected connections are 1064 * always new, and will be passed through conntrack only when they are 1065 * committed, as it is OK to remove the expectation at that time. 1066 */ 1067 exp = ovs_ct_expect_find(net, &info->zone, info->family, skb); 1068 if (exp) { 1069 u8 state; 1070 1071 /* NOTE: New connections are NATted and Helped only when 1072 * committed, so we are not calling into NAT here. 1073 */ 1074 state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED; 1075 __ovs_ct_update_key(key, state, &info->zone, exp->master); 1076 } else { 1077 struct nf_conn *ct; 1078 int err; 1079 1080 err = __ovs_ct_lookup(net, key, info, skb); 1081 if (err) 1082 return err; 1083 1084 ct = (struct nf_conn *)skb_nfct(skb); 1085 if (ct) 1086 nf_ct_deliver_cached_events(ct); 1087 } 1088 1089 return 0; 1090 } 1091 1092 static bool labels_nonzero(const struct ovs_key_ct_labels *labels) 1093 { 1094 size_t i; 1095 1096 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++) 1097 if (labels->ct_labels_32[i]) 1098 return true; 1099 1100 return false; 1101 } 1102 1103 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) 1104 static struct hlist_head *ct_limit_hash_bucket( 1105 const struct ovs_ct_limit_info *info, u16 zone) 1106 { 1107 return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)]; 1108 } 1109 1110 /* Call with ovs_mutex */ 1111 static void ct_limit_set(const struct ovs_ct_limit_info *info, 1112 struct ovs_ct_limit *new_ct_limit) 1113 { 1114 struct ovs_ct_limit *ct_limit; 1115 struct hlist_head *head; 1116 1117 head = ct_limit_hash_bucket(info, new_ct_limit->zone); 1118 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) { 1119 if (ct_limit->zone == new_ct_limit->zone) { 1120 hlist_replace_rcu(&ct_limit->hlist_node, 1121 &new_ct_limit->hlist_node); 1122 kfree_rcu(ct_limit, rcu); 1123 return; 1124 } 1125 } 1126 1127 hlist_add_head_rcu(&new_ct_limit->hlist_node, head); 1128 } 1129 1130 /* Call with ovs_mutex */ 1131 static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone) 1132 { 1133 struct ovs_ct_limit *ct_limit; 1134 struct hlist_head *head; 1135 struct hlist_node *n; 1136 1137 head = ct_limit_hash_bucket(info, zone); 1138 hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) { 1139 if (ct_limit->zone == zone) { 1140 hlist_del_rcu(&ct_limit->hlist_node); 1141 kfree_rcu(ct_limit, rcu); 1142 return; 1143 } 1144 } 1145 } 1146 1147 /* Call with RCU read lock */ 1148 static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone) 1149 { 1150 struct ovs_ct_limit *ct_limit; 1151 struct hlist_head *head; 1152 1153 head = ct_limit_hash_bucket(info, zone); 1154 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) { 1155 if (ct_limit->zone == zone) 1156 return ct_limit->limit; 1157 } 1158 1159 return info->default_limit; 1160 } 1161 1162 static int ovs_ct_check_limit(struct net *net, 1163 const struct ovs_conntrack_info *info, 1164 const struct nf_conntrack_tuple *tuple) 1165 { 1166 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 1167 const struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info; 1168 u32 per_zone_limit, connections; 1169 u32 conncount_key; 1170 1171 conncount_key = info->zone.id; 1172 1173 per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id); 1174 if (per_zone_limit == OVS_CT_LIMIT_UNLIMITED) 1175 return 0; 1176 1177 connections = nf_conncount_count(net, ct_limit_info->data, 1178 &conncount_key, tuple, &info->zone); 1179 if (connections > per_zone_limit) 1180 return -ENOMEM; 1181 1182 return 0; 1183 } 1184 #endif 1185 1186 /* Lookup connection and confirm if unconfirmed. */ 1187 static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, 1188 const struct ovs_conntrack_info *info, 1189 struct sk_buff *skb) 1190 { 1191 enum ip_conntrack_info ctinfo; 1192 struct nf_conn *ct; 1193 int err; 1194 1195 err = __ovs_ct_lookup(net, key, info, skb); 1196 if (err) 1197 return err; 1198 1199 /* The connection could be invalid, in which case this is a no-op.*/ 1200 ct = nf_ct_get(skb, &ctinfo); 1201 if (!ct) 1202 return 0; 1203 1204 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) 1205 if (static_branch_unlikely(&ovs_ct_limit_enabled)) { 1206 if (!nf_ct_is_confirmed(ct)) { 1207 err = ovs_ct_check_limit(net, info, 1208 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 1209 if (err) { 1210 net_warn_ratelimited("openvswitch: zone: %u " 1211 "exceeds conntrack limit\n", 1212 info->zone.id); 1213 return err; 1214 } 1215 } 1216 } 1217 #endif 1218 1219 /* Set the conntrack event mask if given. NEW and DELETE events have 1220 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener 1221 * typically would receive many kinds of updates. Setting the event 1222 * mask allows those events to be filtered. The set event mask will 1223 * remain in effect for the lifetime of the connection unless changed 1224 * by a further CT action with both the commit flag and the eventmask 1225 * option. */ 1226 if (info->have_eventmask) { 1227 struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct); 1228 1229 if (cache) 1230 cache->ctmask = info->eventmask; 1231 } 1232 1233 /* Apply changes before confirming the connection so that the initial 1234 * conntrack NEW netlink event carries the values given in the CT 1235 * action. 1236 */ 1237 if (info->mark.mask) { 1238 err = ovs_ct_set_mark(ct, key, info->mark.value, 1239 info->mark.mask); 1240 if (err) 1241 return err; 1242 } 1243 if (!nf_ct_is_confirmed(ct)) { 1244 err = ovs_ct_init_labels(ct, key, &info->labels.value, 1245 &info->labels.mask); 1246 if (err) 1247 return err; 1248 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 1249 labels_nonzero(&info->labels.mask)) { 1250 err = ovs_ct_set_labels(ct, key, &info->labels.value, 1251 &info->labels.mask); 1252 if (err) 1253 return err; 1254 } 1255 /* This will take care of sending queued events even if the connection 1256 * is already confirmed. 1257 */ 1258 if (nf_conntrack_confirm(skb) != NF_ACCEPT) 1259 return -EINVAL; 1260 1261 return 0; 1262 } 1263 1264 /* Trim the skb to the length specified by the IP/IPv6 header, 1265 * removing any trailing lower-layer padding. This prepares the skb 1266 * for higher-layer processing that assumes skb->len excludes padding 1267 * (such as nf_ip_checksum). The caller needs to pull the skb to the 1268 * network header, and ensure ip_hdr/ipv6_hdr points to valid data. 1269 */ 1270 static int ovs_skb_network_trim(struct sk_buff *skb) 1271 { 1272 unsigned int len; 1273 int err; 1274 1275 switch (skb->protocol) { 1276 case htons(ETH_P_IP): 1277 len = ntohs(ip_hdr(skb)->tot_len); 1278 break; 1279 case htons(ETH_P_IPV6): 1280 len = sizeof(struct ipv6hdr) 1281 + ntohs(ipv6_hdr(skb)->payload_len); 1282 break; 1283 default: 1284 len = skb->len; 1285 } 1286 1287 err = pskb_trim_rcsum(skb, len); 1288 if (err) 1289 kfree_skb(skb); 1290 1291 return err; 1292 } 1293 1294 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero 1295 * value if 'skb' is freed. 1296 */ 1297 int ovs_ct_execute(struct net *net, struct sk_buff *skb, 1298 struct sw_flow_key *key, 1299 const struct ovs_conntrack_info *info) 1300 { 1301 int nh_ofs; 1302 int err; 1303 1304 /* The conntrack module expects to be working at L3. */ 1305 nh_ofs = skb_network_offset(skb); 1306 skb_pull_rcsum(skb, nh_ofs); 1307 1308 err = ovs_skb_network_trim(skb); 1309 if (err) 1310 return err; 1311 1312 if (key->ip.frag != OVS_FRAG_TYPE_NONE) { 1313 err = handle_fragments(net, key, info->zone.id, skb); 1314 if (err) 1315 return err; 1316 } 1317 1318 if (info->commit) 1319 err = ovs_ct_commit(net, key, info, skb); 1320 else 1321 err = ovs_ct_lookup(net, key, info, skb); 1322 1323 skb_push(skb, nh_ofs); 1324 skb_postpush_rcsum(skb, skb->data, nh_ofs); 1325 if (err) 1326 kfree_skb(skb); 1327 return err; 1328 } 1329 1330 int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key) 1331 { 1332 if (skb_nfct(skb)) { 1333 nf_conntrack_put(skb_nfct(skb)); 1334 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 1335 ovs_ct_fill_key(skb, key); 1336 } 1337 1338 return 0; 1339 } 1340 1341 static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name, 1342 const struct sw_flow_key *key, bool log) 1343 { 1344 struct nf_conntrack_helper *helper; 1345 struct nf_conn_help *help; 1346 int ret = 0; 1347 1348 helper = nf_conntrack_helper_try_module_get(name, info->family, 1349 key->ip.proto); 1350 if (!helper) { 1351 OVS_NLERR(log, "Unknown helper \"%s\"", name); 1352 return -EINVAL; 1353 } 1354 1355 help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL); 1356 if (!help) { 1357 nf_conntrack_helper_put(helper); 1358 return -ENOMEM; 1359 } 1360 1361 #if IS_ENABLED(CONFIG_NF_NAT) 1362 if (info->nat) { 1363 ret = nf_nat_helper_try_module_get(name, info->family, 1364 key->ip.proto); 1365 if (ret) { 1366 nf_conntrack_helper_put(helper); 1367 OVS_NLERR(log, "Failed to load \"%s\" NAT helper, error: %d", 1368 name, ret); 1369 return ret; 1370 } 1371 } 1372 #endif 1373 rcu_assign_pointer(help->helper, helper); 1374 info->helper = helper; 1375 return ret; 1376 } 1377 1378 #if IS_ENABLED(CONFIG_NF_NAT) 1379 static int parse_nat(const struct nlattr *attr, 1380 struct ovs_conntrack_info *info, bool log) 1381 { 1382 struct nlattr *a; 1383 int rem; 1384 bool have_ip_max = false; 1385 bool have_proto_max = false; 1386 bool ip_vers = (info->family == NFPROTO_IPV6); 1387 1388 nla_for_each_nested(a, attr, rem) { 1389 static const int ovs_nat_attr_lens[OVS_NAT_ATTR_MAX + 1][2] = { 1390 [OVS_NAT_ATTR_SRC] = {0, 0}, 1391 [OVS_NAT_ATTR_DST] = {0, 0}, 1392 [OVS_NAT_ATTR_IP_MIN] = {sizeof(struct in_addr), 1393 sizeof(struct in6_addr)}, 1394 [OVS_NAT_ATTR_IP_MAX] = {sizeof(struct in_addr), 1395 sizeof(struct in6_addr)}, 1396 [OVS_NAT_ATTR_PROTO_MIN] = {sizeof(u16), sizeof(u16)}, 1397 [OVS_NAT_ATTR_PROTO_MAX] = {sizeof(u16), sizeof(u16)}, 1398 [OVS_NAT_ATTR_PERSISTENT] = {0, 0}, 1399 [OVS_NAT_ATTR_PROTO_HASH] = {0, 0}, 1400 [OVS_NAT_ATTR_PROTO_RANDOM] = {0, 0}, 1401 }; 1402 int type = nla_type(a); 1403 1404 if (type > OVS_NAT_ATTR_MAX) { 1405 OVS_NLERR(log, "Unknown NAT attribute (type=%d, max=%d)", 1406 type, OVS_NAT_ATTR_MAX); 1407 return -EINVAL; 1408 } 1409 1410 if (nla_len(a) != ovs_nat_attr_lens[type][ip_vers]) { 1411 OVS_NLERR(log, "NAT attribute type %d has unexpected length (%d != %d)", 1412 type, nla_len(a), 1413 ovs_nat_attr_lens[type][ip_vers]); 1414 return -EINVAL; 1415 } 1416 1417 switch (type) { 1418 case OVS_NAT_ATTR_SRC: 1419 case OVS_NAT_ATTR_DST: 1420 if (info->nat) { 1421 OVS_NLERR(log, "Only one type of NAT may be specified"); 1422 return -ERANGE; 1423 } 1424 info->nat |= OVS_CT_NAT; 1425 info->nat |= ((type == OVS_NAT_ATTR_SRC) 1426 ? OVS_CT_SRC_NAT : OVS_CT_DST_NAT); 1427 break; 1428 1429 case OVS_NAT_ATTR_IP_MIN: 1430 nla_memcpy(&info->range.min_addr, a, 1431 sizeof(info->range.min_addr)); 1432 info->range.flags |= NF_NAT_RANGE_MAP_IPS; 1433 break; 1434 1435 case OVS_NAT_ATTR_IP_MAX: 1436 have_ip_max = true; 1437 nla_memcpy(&info->range.max_addr, a, 1438 sizeof(info->range.max_addr)); 1439 info->range.flags |= NF_NAT_RANGE_MAP_IPS; 1440 break; 1441 1442 case OVS_NAT_ATTR_PROTO_MIN: 1443 info->range.min_proto.all = htons(nla_get_u16(a)); 1444 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 1445 break; 1446 1447 case OVS_NAT_ATTR_PROTO_MAX: 1448 have_proto_max = true; 1449 info->range.max_proto.all = htons(nla_get_u16(a)); 1450 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 1451 break; 1452 1453 case OVS_NAT_ATTR_PERSISTENT: 1454 info->range.flags |= NF_NAT_RANGE_PERSISTENT; 1455 break; 1456 1457 case OVS_NAT_ATTR_PROTO_HASH: 1458 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM; 1459 break; 1460 1461 case OVS_NAT_ATTR_PROTO_RANDOM: 1462 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY; 1463 break; 1464 1465 default: 1466 OVS_NLERR(log, "Unknown nat attribute (%d)", type); 1467 return -EINVAL; 1468 } 1469 } 1470 1471 if (rem > 0) { 1472 OVS_NLERR(log, "NAT attribute has %d unknown bytes", rem); 1473 return -EINVAL; 1474 } 1475 if (!info->nat) { 1476 /* Do not allow flags if no type is given. */ 1477 if (info->range.flags) { 1478 OVS_NLERR(log, 1479 "NAT flags may be given only when NAT range (SRC or DST) is also specified." 1480 ); 1481 return -EINVAL; 1482 } 1483 info->nat = OVS_CT_NAT; /* NAT existing connections. */ 1484 } else if (!info->commit) { 1485 OVS_NLERR(log, 1486 "NAT attributes may be specified only when CT COMMIT flag is also specified." 1487 ); 1488 return -EINVAL; 1489 } 1490 /* Allow missing IP_MAX. */ 1491 if (info->range.flags & NF_NAT_RANGE_MAP_IPS && !have_ip_max) { 1492 memcpy(&info->range.max_addr, &info->range.min_addr, 1493 sizeof(info->range.max_addr)); 1494 } 1495 /* Allow missing PROTO_MAX. */ 1496 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED && 1497 !have_proto_max) { 1498 info->range.max_proto.all = info->range.min_proto.all; 1499 } 1500 return 0; 1501 } 1502 #endif 1503 1504 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = { 1505 [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 }, 1506 [OVS_CT_ATTR_FORCE_COMMIT] = { .minlen = 0, .maxlen = 0 }, 1507 [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16), 1508 .maxlen = sizeof(u16) }, 1509 [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark), 1510 .maxlen = sizeof(struct md_mark) }, 1511 [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels), 1512 .maxlen = sizeof(struct md_labels) }, 1513 [OVS_CT_ATTR_HELPER] = { .minlen = 1, 1514 .maxlen = NF_CT_HELPER_NAME_LEN }, 1515 #if IS_ENABLED(CONFIG_NF_NAT) 1516 /* NAT length is checked when parsing the nested attributes. */ 1517 [OVS_CT_ATTR_NAT] = { .minlen = 0, .maxlen = INT_MAX }, 1518 #endif 1519 [OVS_CT_ATTR_EVENTMASK] = { .minlen = sizeof(u32), 1520 .maxlen = sizeof(u32) }, 1521 [OVS_CT_ATTR_TIMEOUT] = { .minlen = 1, 1522 .maxlen = CTNL_TIMEOUT_NAME_MAX }, 1523 }; 1524 1525 static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, 1526 const char **helper, bool log) 1527 { 1528 struct nlattr *a; 1529 int rem; 1530 1531 nla_for_each_nested(a, attr, rem) { 1532 int type = nla_type(a); 1533 int maxlen; 1534 int minlen; 1535 1536 if (type > OVS_CT_ATTR_MAX) { 1537 OVS_NLERR(log, 1538 "Unknown conntrack attr (type=%d, max=%d)", 1539 type, OVS_CT_ATTR_MAX); 1540 return -EINVAL; 1541 } 1542 1543 maxlen = ovs_ct_attr_lens[type].maxlen; 1544 minlen = ovs_ct_attr_lens[type].minlen; 1545 if (nla_len(a) < minlen || nla_len(a) > maxlen) { 1546 OVS_NLERR(log, 1547 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", 1548 type, nla_len(a), maxlen); 1549 return -EINVAL; 1550 } 1551 1552 switch (type) { 1553 case OVS_CT_ATTR_FORCE_COMMIT: 1554 info->force = true; 1555 fallthrough; 1556 case OVS_CT_ATTR_COMMIT: 1557 info->commit = true; 1558 break; 1559 #ifdef CONFIG_NF_CONNTRACK_ZONES 1560 case OVS_CT_ATTR_ZONE: 1561 info->zone.id = nla_get_u16(a); 1562 break; 1563 #endif 1564 #ifdef CONFIG_NF_CONNTRACK_MARK 1565 case OVS_CT_ATTR_MARK: { 1566 struct md_mark *mark = nla_data(a); 1567 1568 if (!mark->mask) { 1569 OVS_NLERR(log, "ct_mark mask cannot be 0"); 1570 return -EINVAL; 1571 } 1572 info->mark = *mark; 1573 break; 1574 } 1575 #endif 1576 #ifdef CONFIG_NF_CONNTRACK_LABELS 1577 case OVS_CT_ATTR_LABELS: { 1578 struct md_labels *labels = nla_data(a); 1579 1580 if (!labels_nonzero(&labels->mask)) { 1581 OVS_NLERR(log, "ct_labels mask cannot be 0"); 1582 return -EINVAL; 1583 } 1584 info->labels = *labels; 1585 break; 1586 } 1587 #endif 1588 case OVS_CT_ATTR_HELPER: 1589 *helper = nla_data(a); 1590 if (!memchr(*helper, '\0', nla_len(a))) { 1591 OVS_NLERR(log, "Invalid conntrack helper"); 1592 return -EINVAL; 1593 } 1594 break; 1595 #if IS_ENABLED(CONFIG_NF_NAT) 1596 case OVS_CT_ATTR_NAT: { 1597 int err = parse_nat(a, info, log); 1598 1599 if (err) 1600 return err; 1601 break; 1602 } 1603 #endif 1604 case OVS_CT_ATTR_EVENTMASK: 1605 info->have_eventmask = true; 1606 info->eventmask = nla_get_u32(a); 1607 break; 1608 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 1609 case OVS_CT_ATTR_TIMEOUT: 1610 memcpy(info->timeout, nla_data(a), nla_len(a)); 1611 if (!memchr(info->timeout, '\0', nla_len(a))) { 1612 OVS_NLERR(log, "Invalid conntrack timeout"); 1613 return -EINVAL; 1614 } 1615 break; 1616 #endif 1617 1618 default: 1619 OVS_NLERR(log, "Unknown conntrack attr (%d)", 1620 type); 1621 return -EINVAL; 1622 } 1623 } 1624 1625 #ifdef CONFIG_NF_CONNTRACK_MARK 1626 if (!info->commit && info->mark.mask) { 1627 OVS_NLERR(log, 1628 "Setting conntrack mark requires 'commit' flag."); 1629 return -EINVAL; 1630 } 1631 #endif 1632 #ifdef CONFIG_NF_CONNTRACK_LABELS 1633 if (!info->commit && labels_nonzero(&info->labels.mask)) { 1634 OVS_NLERR(log, 1635 "Setting conntrack labels requires 'commit' flag."); 1636 return -EINVAL; 1637 } 1638 #endif 1639 if (rem > 0) { 1640 OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem); 1641 return -EINVAL; 1642 } 1643 1644 return 0; 1645 } 1646 1647 bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr) 1648 { 1649 if (attr == OVS_KEY_ATTR_CT_STATE) 1650 return true; 1651 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 1652 attr == OVS_KEY_ATTR_CT_ZONE) 1653 return true; 1654 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 1655 attr == OVS_KEY_ATTR_CT_MARK) 1656 return true; 1657 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 1658 attr == OVS_KEY_ATTR_CT_LABELS) { 1659 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 1660 1661 return ovs_net->xt_label; 1662 } 1663 1664 return false; 1665 } 1666 1667 int ovs_ct_copy_action(struct net *net, const struct nlattr *attr, 1668 const struct sw_flow_key *key, 1669 struct sw_flow_actions **sfa, bool log) 1670 { 1671 struct ovs_conntrack_info ct_info; 1672 const char *helper = NULL; 1673 u16 family; 1674 int err; 1675 1676 family = key_to_nfproto(key); 1677 if (family == NFPROTO_UNSPEC) { 1678 OVS_NLERR(log, "ct family unspecified"); 1679 return -EINVAL; 1680 } 1681 1682 memset(&ct_info, 0, sizeof(ct_info)); 1683 ct_info.family = family; 1684 1685 nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID, 1686 NF_CT_DEFAULT_ZONE_DIR, 0); 1687 1688 err = parse_ct(attr, &ct_info, &helper, log); 1689 if (err) 1690 return err; 1691 1692 /* Set up template for tracking connections in specific zones. */ 1693 ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL); 1694 if (!ct_info.ct) { 1695 OVS_NLERR(log, "Failed to allocate conntrack template"); 1696 return -ENOMEM; 1697 } 1698 1699 if (ct_info.timeout[0]) { 1700 if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto, 1701 ct_info.timeout)) 1702 pr_info_ratelimited("Failed to associated timeout " 1703 "policy `%s'\n", ct_info.timeout); 1704 else 1705 ct_info.nf_ct_timeout = rcu_dereference( 1706 nf_ct_timeout_find(ct_info.ct)->timeout); 1707 1708 } 1709 1710 if (helper) { 1711 err = ovs_ct_add_helper(&ct_info, helper, key, log); 1712 if (err) 1713 goto err_free_ct; 1714 } 1715 1716 err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info, 1717 sizeof(ct_info), log); 1718 if (err) 1719 goto err_free_ct; 1720 1721 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status); 1722 nf_conntrack_get(&ct_info.ct->ct_general); 1723 return 0; 1724 err_free_ct: 1725 __ovs_ct_free_action(&ct_info); 1726 return err; 1727 } 1728 1729 #if IS_ENABLED(CONFIG_NF_NAT) 1730 static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info, 1731 struct sk_buff *skb) 1732 { 1733 struct nlattr *start; 1734 1735 start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT); 1736 if (!start) 1737 return false; 1738 1739 if (info->nat & OVS_CT_SRC_NAT) { 1740 if (nla_put_flag(skb, OVS_NAT_ATTR_SRC)) 1741 return false; 1742 } else if (info->nat & OVS_CT_DST_NAT) { 1743 if (nla_put_flag(skb, OVS_NAT_ATTR_DST)) 1744 return false; 1745 } else { 1746 goto out; 1747 } 1748 1749 if (info->range.flags & NF_NAT_RANGE_MAP_IPS) { 1750 if (IS_ENABLED(CONFIG_NF_NAT) && 1751 info->family == NFPROTO_IPV4) { 1752 if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN, 1753 info->range.min_addr.ip) || 1754 (info->range.max_addr.ip 1755 != info->range.min_addr.ip && 1756 (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX, 1757 info->range.max_addr.ip)))) 1758 return false; 1759 } else if (IS_ENABLED(CONFIG_IPV6) && 1760 info->family == NFPROTO_IPV6) { 1761 if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN, 1762 &info->range.min_addr.in6) || 1763 (memcmp(&info->range.max_addr.in6, 1764 &info->range.min_addr.in6, 1765 sizeof(info->range.max_addr.in6)) && 1766 (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX, 1767 &info->range.max_addr.in6)))) 1768 return false; 1769 } else { 1770 return false; 1771 } 1772 } 1773 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED && 1774 (nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MIN, 1775 ntohs(info->range.min_proto.all)) || 1776 (info->range.max_proto.all != info->range.min_proto.all && 1777 nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MAX, 1778 ntohs(info->range.max_proto.all))))) 1779 return false; 1780 1781 if (info->range.flags & NF_NAT_RANGE_PERSISTENT && 1782 nla_put_flag(skb, OVS_NAT_ATTR_PERSISTENT)) 1783 return false; 1784 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM && 1785 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_HASH)) 1786 return false; 1787 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY && 1788 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_RANDOM)) 1789 return false; 1790 out: 1791 nla_nest_end(skb, start); 1792 1793 return true; 1794 } 1795 #endif 1796 1797 int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info, 1798 struct sk_buff *skb) 1799 { 1800 struct nlattr *start; 1801 1802 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT); 1803 if (!start) 1804 return -EMSGSIZE; 1805 1806 if (ct_info->commit && nla_put_flag(skb, ct_info->force 1807 ? OVS_CT_ATTR_FORCE_COMMIT 1808 : OVS_CT_ATTR_COMMIT)) 1809 return -EMSGSIZE; 1810 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 1811 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id)) 1812 return -EMSGSIZE; 1813 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask && 1814 nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark), 1815 &ct_info->mark)) 1816 return -EMSGSIZE; 1817 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 1818 labels_nonzero(&ct_info->labels.mask) && 1819 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels), 1820 &ct_info->labels)) 1821 return -EMSGSIZE; 1822 if (ct_info->helper) { 1823 if (nla_put_string(skb, OVS_CT_ATTR_HELPER, 1824 ct_info->helper->name)) 1825 return -EMSGSIZE; 1826 } 1827 if (ct_info->have_eventmask && 1828 nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask)) 1829 return -EMSGSIZE; 1830 if (ct_info->timeout[0]) { 1831 if (nla_put_string(skb, OVS_CT_ATTR_TIMEOUT, ct_info->timeout)) 1832 return -EMSGSIZE; 1833 } 1834 1835 #if IS_ENABLED(CONFIG_NF_NAT) 1836 if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb)) 1837 return -EMSGSIZE; 1838 #endif 1839 nla_nest_end(skb, start); 1840 1841 return 0; 1842 } 1843 1844 void ovs_ct_free_action(const struct nlattr *a) 1845 { 1846 struct ovs_conntrack_info *ct_info = nla_data(a); 1847 1848 __ovs_ct_free_action(ct_info); 1849 } 1850 1851 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) 1852 { 1853 if (ct_info->helper) { 1854 #if IS_ENABLED(CONFIG_NF_NAT) 1855 if (ct_info->nat) 1856 nf_nat_helper_put(ct_info->helper); 1857 #endif 1858 nf_conntrack_helper_put(ct_info->helper); 1859 } 1860 if (ct_info->ct) { 1861 if (ct_info->timeout[0]) 1862 nf_ct_destroy_timeout(ct_info->ct); 1863 nf_ct_tmpl_free(ct_info->ct); 1864 } 1865 } 1866 1867 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) 1868 static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net) 1869 { 1870 int i, err; 1871 1872 ovs_net->ct_limit_info = kmalloc(sizeof(*ovs_net->ct_limit_info), 1873 GFP_KERNEL); 1874 if (!ovs_net->ct_limit_info) 1875 return -ENOMEM; 1876 1877 ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT; 1878 ovs_net->ct_limit_info->limits = 1879 kmalloc_array(CT_LIMIT_HASH_BUCKETS, sizeof(struct hlist_head), 1880 GFP_KERNEL); 1881 if (!ovs_net->ct_limit_info->limits) { 1882 kfree(ovs_net->ct_limit_info); 1883 return -ENOMEM; 1884 } 1885 1886 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++) 1887 INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]); 1888 1889 ovs_net->ct_limit_info->data = 1890 nf_conncount_init(net, NFPROTO_INET, sizeof(u32)); 1891 1892 if (IS_ERR(ovs_net->ct_limit_info->data)) { 1893 err = PTR_ERR(ovs_net->ct_limit_info->data); 1894 kfree(ovs_net->ct_limit_info->limits); 1895 kfree(ovs_net->ct_limit_info); 1896 pr_err("openvswitch: failed to init nf_conncount %d\n", err); 1897 return err; 1898 } 1899 return 0; 1900 } 1901 1902 static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net) 1903 { 1904 const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info; 1905 int i; 1906 1907 nf_conncount_destroy(net, NFPROTO_INET, info->data); 1908 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) { 1909 struct hlist_head *head = &info->limits[i]; 1910 struct ovs_ct_limit *ct_limit; 1911 1912 hlist_for_each_entry_rcu(ct_limit, head, hlist_node, 1913 lockdep_ovsl_is_held()) 1914 kfree_rcu(ct_limit, rcu); 1915 } 1916 kfree(info->limits); 1917 kfree(info); 1918 } 1919 1920 static struct sk_buff * 1921 ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd, 1922 struct ovs_header **ovs_reply_header) 1923 { 1924 struct ovs_header *ovs_header = info->userhdr; 1925 struct sk_buff *skb; 1926 1927 skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1928 if (!skb) 1929 return ERR_PTR(-ENOMEM); 1930 1931 *ovs_reply_header = genlmsg_put(skb, info->snd_portid, 1932 info->snd_seq, 1933 &dp_ct_limit_genl_family, 0, cmd); 1934 1935 if (!*ovs_reply_header) { 1936 nlmsg_free(skb); 1937 return ERR_PTR(-EMSGSIZE); 1938 } 1939 (*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex; 1940 1941 return skb; 1942 } 1943 1944 static bool check_zone_id(int zone_id, u16 *pzone) 1945 { 1946 if (zone_id >= 0 && zone_id <= 65535) { 1947 *pzone = (u16)zone_id; 1948 return true; 1949 } 1950 return false; 1951 } 1952 1953 static int ovs_ct_limit_set_zone_limit(struct nlattr *nla_zone_limit, 1954 struct ovs_ct_limit_info *info) 1955 { 1956 struct ovs_zone_limit *zone_limit; 1957 int rem; 1958 u16 zone; 1959 1960 rem = NLA_ALIGN(nla_len(nla_zone_limit)); 1961 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit); 1962 1963 while (rem >= sizeof(*zone_limit)) { 1964 if (unlikely(zone_limit->zone_id == 1965 OVS_ZONE_LIMIT_DEFAULT_ZONE)) { 1966 ovs_lock(); 1967 info->default_limit = zone_limit->limit; 1968 ovs_unlock(); 1969 } else if (unlikely(!check_zone_id( 1970 zone_limit->zone_id, &zone))) { 1971 OVS_NLERR(true, "zone id is out of range"); 1972 } else { 1973 struct ovs_ct_limit *ct_limit; 1974 1975 ct_limit = kmalloc(sizeof(*ct_limit), GFP_KERNEL); 1976 if (!ct_limit) 1977 return -ENOMEM; 1978 1979 ct_limit->zone = zone; 1980 ct_limit->limit = zone_limit->limit; 1981 1982 ovs_lock(); 1983 ct_limit_set(info, ct_limit); 1984 ovs_unlock(); 1985 } 1986 rem -= NLA_ALIGN(sizeof(*zone_limit)); 1987 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit + 1988 NLA_ALIGN(sizeof(*zone_limit))); 1989 } 1990 1991 if (rem) 1992 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem); 1993 1994 return 0; 1995 } 1996 1997 static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit, 1998 struct ovs_ct_limit_info *info) 1999 { 2000 struct ovs_zone_limit *zone_limit; 2001 int rem; 2002 u16 zone; 2003 2004 rem = NLA_ALIGN(nla_len(nla_zone_limit)); 2005 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit); 2006 2007 while (rem >= sizeof(*zone_limit)) { 2008 if (unlikely(zone_limit->zone_id == 2009 OVS_ZONE_LIMIT_DEFAULT_ZONE)) { 2010 ovs_lock(); 2011 info->default_limit = OVS_CT_LIMIT_DEFAULT; 2012 ovs_unlock(); 2013 } else if (unlikely(!check_zone_id( 2014 zone_limit->zone_id, &zone))) { 2015 OVS_NLERR(true, "zone id is out of range"); 2016 } else { 2017 ovs_lock(); 2018 ct_limit_del(info, zone); 2019 ovs_unlock(); 2020 } 2021 rem -= NLA_ALIGN(sizeof(*zone_limit)); 2022 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit + 2023 NLA_ALIGN(sizeof(*zone_limit))); 2024 } 2025 2026 if (rem) 2027 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem); 2028 2029 return 0; 2030 } 2031 2032 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info, 2033 struct sk_buff *reply) 2034 { 2035 struct ovs_zone_limit zone_limit; 2036 2037 zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE; 2038 zone_limit.limit = info->default_limit; 2039 2040 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit); 2041 } 2042 2043 static int __ovs_ct_limit_get_zone_limit(struct net *net, 2044 struct nf_conncount_data *data, 2045 u16 zone_id, u32 limit, 2046 struct sk_buff *reply) 2047 { 2048 struct nf_conntrack_zone ct_zone; 2049 struct ovs_zone_limit zone_limit; 2050 u32 conncount_key = zone_id; 2051 2052 zone_limit.zone_id = zone_id; 2053 zone_limit.limit = limit; 2054 nf_ct_zone_init(&ct_zone, zone_id, NF_CT_DEFAULT_ZONE_DIR, 0); 2055 2056 zone_limit.count = nf_conncount_count(net, data, &conncount_key, NULL, 2057 &ct_zone); 2058 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit); 2059 } 2060 2061 static int ovs_ct_limit_get_zone_limit(struct net *net, 2062 struct nlattr *nla_zone_limit, 2063 struct ovs_ct_limit_info *info, 2064 struct sk_buff *reply) 2065 { 2066 struct ovs_zone_limit *zone_limit; 2067 int rem, err; 2068 u32 limit; 2069 u16 zone; 2070 2071 rem = NLA_ALIGN(nla_len(nla_zone_limit)); 2072 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit); 2073 2074 while (rem >= sizeof(*zone_limit)) { 2075 if (unlikely(zone_limit->zone_id == 2076 OVS_ZONE_LIMIT_DEFAULT_ZONE)) { 2077 err = ovs_ct_limit_get_default_limit(info, reply); 2078 if (err) 2079 return err; 2080 } else if (unlikely(!check_zone_id(zone_limit->zone_id, 2081 &zone))) { 2082 OVS_NLERR(true, "zone id is out of range"); 2083 } else { 2084 rcu_read_lock(); 2085 limit = ct_limit_get(info, zone); 2086 rcu_read_unlock(); 2087 2088 err = __ovs_ct_limit_get_zone_limit( 2089 net, info->data, zone, limit, reply); 2090 if (err) 2091 return err; 2092 } 2093 rem -= NLA_ALIGN(sizeof(*zone_limit)); 2094 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit + 2095 NLA_ALIGN(sizeof(*zone_limit))); 2096 } 2097 2098 if (rem) 2099 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem); 2100 2101 return 0; 2102 } 2103 2104 static int ovs_ct_limit_get_all_zone_limit(struct net *net, 2105 struct ovs_ct_limit_info *info, 2106 struct sk_buff *reply) 2107 { 2108 struct ovs_ct_limit *ct_limit; 2109 struct hlist_head *head; 2110 int i, err = 0; 2111 2112 err = ovs_ct_limit_get_default_limit(info, reply); 2113 if (err) 2114 return err; 2115 2116 rcu_read_lock(); 2117 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) { 2118 head = &info->limits[i]; 2119 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) { 2120 err = __ovs_ct_limit_get_zone_limit(net, info->data, 2121 ct_limit->zone, ct_limit->limit, reply); 2122 if (err) 2123 goto exit_err; 2124 } 2125 } 2126 2127 exit_err: 2128 rcu_read_unlock(); 2129 return err; 2130 } 2131 2132 static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info) 2133 { 2134 struct nlattr **a = info->attrs; 2135 struct sk_buff *reply; 2136 struct ovs_header *ovs_reply_header; 2137 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id); 2138 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info; 2139 int err; 2140 2141 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_SET, 2142 &ovs_reply_header); 2143 if (IS_ERR(reply)) 2144 return PTR_ERR(reply); 2145 2146 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) { 2147 err = -EINVAL; 2148 goto exit_err; 2149 } 2150 2151 err = ovs_ct_limit_set_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], 2152 ct_limit_info); 2153 if (err) 2154 goto exit_err; 2155 2156 static_branch_enable(&ovs_ct_limit_enabled); 2157 2158 genlmsg_end(reply, ovs_reply_header); 2159 return genlmsg_reply(reply, info); 2160 2161 exit_err: 2162 nlmsg_free(reply); 2163 return err; 2164 } 2165 2166 static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info) 2167 { 2168 struct nlattr **a = info->attrs; 2169 struct sk_buff *reply; 2170 struct ovs_header *ovs_reply_header; 2171 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id); 2172 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info; 2173 int err; 2174 2175 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_DEL, 2176 &ovs_reply_header); 2177 if (IS_ERR(reply)) 2178 return PTR_ERR(reply); 2179 2180 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) { 2181 err = -EINVAL; 2182 goto exit_err; 2183 } 2184 2185 err = ovs_ct_limit_del_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], 2186 ct_limit_info); 2187 if (err) 2188 goto exit_err; 2189 2190 genlmsg_end(reply, ovs_reply_header); 2191 return genlmsg_reply(reply, info); 2192 2193 exit_err: 2194 nlmsg_free(reply); 2195 return err; 2196 } 2197 2198 static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info) 2199 { 2200 struct nlattr **a = info->attrs; 2201 struct nlattr *nla_reply; 2202 struct sk_buff *reply; 2203 struct ovs_header *ovs_reply_header; 2204 struct net *net = sock_net(skb->sk); 2205 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2206 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info; 2207 int err; 2208 2209 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_GET, 2210 &ovs_reply_header); 2211 if (IS_ERR(reply)) 2212 return PTR_ERR(reply); 2213 2214 nla_reply = nla_nest_start_noflag(reply, OVS_CT_LIMIT_ATTR_ZONE_LIMIT); 2215 if (!nla_reply) { 2216 err = -EMSGSIZE; 2217 goto exit_err; 2218 } 2219 2220 if (a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) { 2221 err = ovs_ct_limit_get_zone_limit( 2222 net, a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], ct_limit_info, 2223 reply); 2224 if (err) 2225 goto exit_err; 2226 } else { 2227 err = ovs_ct_limit_get_all_zone_limit(net, ct_limit_info, 2228 reply); 2229 if (err) 2230 goto exit_err; 2231 } 2232 2233 nla_nest_end(reply, nla_reply); 2234 genlmsg_end(reply, ovs_reply_header); 2235 return genlmsg_reply(reply, info); 2236 2237 exit_err: 2238 nlmsg_free(reply); 2239 return err; 2240 } 2241 2242 static const struct genl_small_ops ct_limit_genl_ops[] = { 2243 { .cmd = OVS_CT_LIMIT_CMD_SET, 2244 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2245 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN 2246 * privilege. */ 2247 .doit = ovs_ct_limit_cmd_set, 2248 }, 2249 { .cmd = OVS_CT_LIMIT_CMD_DEL, 2250 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2251 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN 2252 * privilege. */ 2253 .doit = ovs_ct_limit_cmd_del, 2254 }, 2255 { .cmd = OVS_CT_LIMIT_CMD_GET, 2256 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2257 .flags = 0, /* OK for unprivileged users. */ 2258 .doit = ovs_ct_limit_cmd_get, 2259 }, 2260 }; 2261 2262 static const struct genl_multicast_group ovs_ct_limit_multicast_group = { 2263 .name = OVS_CT_LIMIT_MCGROUP, 2264 }; 2265 2266 struct genl_family dp_ct_limit_genl_family __ro_after_init = { 2267 .hdrsize = sizeof(struct ovs_header), 2268 .name = OVS_CT_LIMIT_FAMILY, 2269 .version = OVS_CT_LIMIT_VERSION, 2270 .maxattr = OVS_CT_LIMIT_ATTR_MAX, 2271 .policy = ct_limit_policy, 2272 .netnsok = true, 2273 .parallel_ops = true, 2274 .small_ops = ct_limit_genl_ops, 2275 .n_small_ops = ARRAY_SIZE(ct_limit_genl_ops), 2276 .mcgrps = &ovs_ct_limit_multicast_group, 2277 .n_mcgrps = 1, 2278 .module = THIS_MODULE, 2279 }; 2280 #endif 2281 2282 int ovs_ct_init(struct net *net) 2283 { 2284 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; 2285 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2286 2287 if (nf_connlabels_get(net, n_bits - 1)) { 2288 ovs_net->xt_label = false; 2289 OVS_NLERR(true, "Failed to set connlabel length"); 2290 } else { 2291 ovs_net->xt_label = true; 2292 } 2293 2294 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) 2295 return ovs_ct_limit_init(net, ovs_net); 2296 #else 2297 return 0; 2298 #endif 2299 } 2300 2301 void ovs_ct_exit(struct net *net) 2302 { 2303 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2304 2305 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) 2306 ovs_ct_limit_exit(net, ovs_net); 2307 #endif 2308 2309 if (ovs_net->xt_label) 2310 nf_connlabels_put(net); 2311 } 2312