1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2007-2014 Nicira, Inc. 4 */ 5 6 #include "flow.h" 7 #include "datapath.h" 8 #include "flow_netlink.h" 9 #include <linux/uaccess.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/if_ether.h> 13 #include <linux/if_vlan.h> 14 #include <net/llc_pdu.h> 15 #include <linux/kernel.h> 16 #include <linux/jhash.h> 17 #include <linux/jiffies.h> 18 #include <linux/llc.h> 19 #include <linux/module.h> 20 #include <linux/in.h> 21 #include <linux/rcupdate.h> 22 #include <linux/cpumask.h> 23 #include <linux/if_arp.h> 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> 26 #include <linux/sctp.h> 27 #include <linux/tcp.h> 28 #include <linux/udp.h> 29 #include <linux/icmp.h> 30 #include <linux/icmpv6.h> 31 #include <linux/rculist.h> 32 #include <linux/sort.h> 33 #include <net/ip.h> 34 #include <net/ipv6.h> 35 #include <net/ndisc.h> 36 37 #define TBL_MIN_BUCKETS 1024 38 #define MASK_ARRAY_SIZE_MIN 16 39 #define REHASH_INTERVAL (10 * 60 * HZ) 40 41 #define MC_DEFAULT_HASH_ENTRIES 256 42 #define MC_HASH_SHIFT 8 43 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) 44 45 static struct kmem_cache *flow_cache; 46 struct kmem_cache *flow_stats_cache __read_mostly; 47 48 static u16 range_n_bytes(const struct sw_flow_key_range *range) 49 { 50 return range->end - range->start; 51 } 52 53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 54 bool full, const struct sw_flow_mask *mask) 55 { 56 int start = full ? 0 : mask->range.start; 57 int len = full ? sizeof *dst : range_n_bytes(&mask->range); 58 const long *m = (const long *)((const u8 *)&mask->key + start); 59 const long *s = (const long *)((const u8 *)src + start); 60 long *d = (long *)((u8 *)dst + start); 61 int i; 62 63 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, 64 * if 'full' is false the memory outside of the 'mask->range' is left 65 * uninitialized. This can be used as an optimization when further 66 * operations on 'dst' only use contents within 'mask->range'. 67 */ 68 for (i = 0; i < len; i += sizeof(long)) 69 *d++ = *s++ & *m++; 70 } 71 72 struct sw_flow *ovs_flow_alloc(void) 73 { 74 struct sw_flow *flow; 75 struct sw_flow_stats *stats; 76 77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); 78 if (!flow) 79 return ERR_PTR(-ENOMEM); 80 81 flow->stats_last_writer = -1; 82 83 /* Initialize the default stat node. */ 84 stats = kmem_cache_alloc_node(flow_stats_cache, 85 GFP_KERNEL | __GFP_ZERO, 86 node_online(0) ? 0 : NUMA_NO_NODE); 87 if (!stats) 88 goto err; 89 90 spin_lock_init(&stats->lock); 91 92 RCU_INIT_POINTER(flow->stats[0], stats); 93 94 cpumask_set_cpu(0, &flow->cpu_used_mask); 95 96 return flow; 97 err: 98 kmem_cache_free(flow_cache, flow); 99 return ERR_PTR(-ENOMEM); 100 } 101 102 int ovs_flow_tbl_count(const struct flow_table *table) 103 { 104 return table->count; 105 } 106 107 static void flow_free(struct sw_flow *flow) 108 { 109 int cpu; 110 111 if (ovs_identifier_is_key(&flow->id)) 112 kfree(flow->id.unmasked_key); 113 if (flow->sf_acts) 114 ovs_nla_free_flow_actions((struct sw_flow_actions __force *) 115 flow->sf_acts); 116 /* We open code this to make sure cpu 0 is always considered */ 117 for (cpu = 0; cpu < nr_cpu_ids; 118 cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 119 if (flow->stats[cpu]) 120 kmem_cache_free(flow_stats_cache, 121 (struct sw_flow_stats __force *)flow->stats[cpu]); 122 } 123 124 kmem_cache_free(flow_cache, flow); 125 } 126 127 static void rcu_free_flow_callback(struct rcu_head *rcu) 128 { 129 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); 130 131 flow_free(flow); 132 } 133 134 void ovs_flow_free(struct sw_flow *flow, bool deferred) 135 { 136 if (!flow) 137 return; 138 139 if (deferred) 140 call_rcu(&flow->rcu, rcu_free_flow_callback); 141 else 142 flow_free(flow); 143 } 144 145 static void __table_instance_destroy(struct table_instance *ti) 146 { 147 kvfree(ti->buckets); 148 kfree(ti); 149 } 150 151 static struct table_instance *table_instance_alloc(int new_size) 152 { 153 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); 154 int i; 155 156 if (!ti) 157 return NULL; 158 159 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head), 160 GFP_KERNEL); 161 if (!ti->buckets) { 162 kfree(ti); 163 return NULL; 164 } 165 166 for (i = 0; i < new_size; i++) 167 INIT_HLIST_HEAD(&ti->buckets[i]); 168 169 ti->n_buckets = new_size; 170 ti->node_ver = 0; 171 get_random_bytes(&ti->hash_seed, sizeof(u32)); 172 173 return ti; 174 } 175 176 static void __mask_array_destroy(struct mask_array *ma) 177 { 178 free_percpu(ma->masks_usage_cntr); 179 kfree(ma); 180 } 181 182 static void mask_array_rcu_cb(struct rcu_head *rcu) 183 { 184 struct mask_array *ma = container_of(rcu, struct mask_array, rcu); 185 186 __mask_array_destroy(ma); 187 } 188 189 static void tbl_mask_array_reset_counters(struct mask_array *ma) 190 { 191 int i, cpu; 192 193 /* As the per CPU counters are not atomic we can not go ahead and 194 * reset them from another CPU. To be able to still have an approximate 195 * zero based counter we store the value at reset, and subtract it 196 * later when processing. 197 */ 198 for (i = 0; i < ma->max; i++) { 199 ma->masks_usage_zero_cntr[i] = 0; 200 201 for_each_possible_cpu(cpu) { 202 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, 203 cpu); 204 unsigned int start; 205 u64 counter; 206 207 do { 208 start = u64_stats_fetch_begin_irq(&ma->syncp); 209 counter = usage_counters[i]; 210 } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); 211 212 ma->masks_usage_zero_cntr[i] += counter; 213 } 214 } 215 } 216 217 static struct mask_array *tbl_mask_array_alloc(int size) 218 { 219 struct mask_array *new; 220 221 size = max(MASK_ARRAY_SIZE_MIN, size); 222 new = kzalloc(sizeof(struct mask_array) + 223 sizeof(struct sw_flow_mask *) * size + 224 sizeof(u64) * size, GFP_KERNEL); 225 if (!new) 226 return NULL; 227 228 new->masks_usage_zero_cntr = (u64 *)((u8 *)new + 229 sizeof(struct mask_array) + 230 sizeof(struct sw_flow_mask *) * 231 size); 232 233 new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size, 234 __alignof__(u64)); 235 if (!new->masks_usage_cntr) { 236 kfree(new); 237 return NULL; 238 } 239 240 new->count = 0; 241 new->max = size; 242 243 return new; 244 } 245 246 static int tbl_mask_array_realloc(struct flow_table *tbl, int size) 247 { 248 struct mask_array *old; 249 struct mask_array *new; 250 251 new = tbl_mask_array_alloc(size); 252 if (!new) 253 return -ENOMEM; 254 255 old = ovsl_dereference(tbl->mask_array); 256 if (old) { 257 int i; 258 259 for (i = 0; i < old->max; i++) { 260 if (ovsl_dereference(old->masks[i])) 261 new->masks[new->count++] = old->masks[i]; 262 } 263 call_rcu(&old->rcu, mask_array_rcu_cb); 264 } 265 266 rcu_assign_pointer(tbl->mask_array, new); 267 268 return 0; 269 } 270 271 static int tbl_mask_array_add_mask(struct flow_table *tbl, 272 struct sw_flow_mask *new) 273 { 274 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 275 int err, ma_count = READ_ONCE(ma->count); 276 277 if (ma_count >= ma->max) { 278 err = tbl_mask_array_realloc(tbl, ma->max + 279 MASK_ARRAY_SIZE_MIN); 280 if (err) 281 return err; 282 283 ma = ovsl_dereference(tbl->mask_array); 284 } else { 285 /* On every add or delete we need to reset the counters so 286 * every new mask gets a fair chance of being prioritized. 287 */ 288 tbl_mask_array_reset_counters(ma); 289 } 290 291 BUG_ON(ovsl_dereference(ma->masks[ma_count])); 292 293 rcu_assign_pointer(ma->masks[ma_count], new); 294 WRITE_ONCE(ma->count, ma_count + 1); 295 296 return 0; 297 } 298 299 static void tbl_mask_array_del_mask(struct flow_table *tbl, 300 struct sw_flow_mask *mask) 301 { 302 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 303 int i, ma_count = READ_ONCE(ma->count); 304 305 /* Remove the deleted mask pointers from the array */ 306 for (i = 0; i < ma_count; i++) { 307 if (mask == ovsl_dereference(ma->masks[i])) 308 goto found; 309 } 310 311 BUG(); 312 return; 313 314 found: 315 WRITE_ONCE(ma->count, ma_count - 1); 316 317 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]); 318 RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL); 319 320 kfree_rcu(mask, rcu); 321 322 /* Shrink the mask array if necessary. */ 323 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) && 324 ma_count <= (ma->max / 3)) 325 tbl_mask_array_realloc(tbl, ma->max / 2); 326 else 327 tbl_mask_array_reset_counters(ma); 328 329 } 330 331 /* Remove 'mask' from the mask list, if it is not needed any more. */ 332 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) 333 { 334 if (mask) { 335 /* ovs-lock is required to protect mask-refcount and 336 * mask list. 337 */ 338 ASSERT_OVSL(); 339 BUG_ON(!mask->ref_count); 340 mask->ref_count--; 341 342 if (!mask->ref_count) 343 tbl_mask_array_del_mask(tbl, mask); 344 } 345 } 346 347 static void __mask_cache_destroy(struct mask_cache *mc) 348 { 349 free_percpu(mc->mask_cache); 350 kfree(mc); 351 } 352 353 static void mask_cache_rcu_cb(struct rcu_head *rcu) 354 { 355 struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu); 356 357 __mask_cache_destroy(mc); 358 } 359 360 static struct mask_cache *tbl_mask_cache_alloc(u32 size) 361 { 362 struct mask_cache_entry __percpu *cache = NULL; 363 struct mask_cache *new; 364 365 /* Only allow size to be 0, or a power of 2, and does not exceed 366 * percpu allocation size. 367 */ 368 if ((!is_power_of_2(size) && size != 0) || 369 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) 370 return NULL; 371 372 new = kzalloc(sizeof(*new), GFP_KERNEL); 373 if (!new) 374 return NULL; 375 376 new->cache_size = size; 377 if (new->cache_size > 0) { 378 cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry), 379 new->cache_size), 380 __alignof__(struct mask_cache_entry)); 381 if (!cache) { 382 kfree(new); 383 return NULL; 384 } 385 } 386 387 new->mask_cache = cache; 388 return new; 389 } 390 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size) 391 { 392 struct mask_cache *mc = rcu_dereference(table->mask_cache); 393 struct mask_cache *new; 394 395 if (size == mc->cache_size) 396 return 0; 397 398 if ((!is_power_of_2(size) && size != 0) || 399 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) 400 return -EINVAL; 401 402 new = tbl_mask_cache_alloc(size); 403 if (!new) 404 return -ENOMEM; 405 406 rcu_assign_pointer(table->mask_cache, new); 407 call_rcu(&mc->rcu, mask_cache_rcu_cb); 408 409 return 0; 410 } 411 412 int ovs_flow_tbl_init(struct flow_table *table) 413 { 414 struct table_instance *ti, *ufid_ti; 415 struct mask_cache *mc; 416 struct mask_array *ma; 417 418 mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES); 419 if (!mc) 420 return -ENOMEM; 421 422 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); 423 if (!ma) 424 goto free_mask_cache; 425 426 ti = table_instance_alloc(TBL_MIN_BUCKETS); 427 if (!ti) 428 goto free_mask_array; 429 430 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); 431 if (!ufid_ti) 432 goto free_ti; 433 434 rcu_assign_pointer(table->ti, ti); 435 rcu_assign_pointer(table->ufid_ti, ufid_ti); 436 rcu_assign_pointer(table->mask_array, ma); 437 rcu_assign_pointer(table->mask_cache, mc); 438 table->last_rehash = jiffies; 439 table->count = 0; 440 table->ufid_count = 0; 441 return 0; 442 443 free_ti: 444 __table_instance_destroy(ti); 445 free_mask_array: 446 __mask_array_destroy(ma); 447 free_mask_cache: 448 __mask_cache_destroy(mc); 449 return -ENOMEM; 450 } 451 452 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) 453 { 454 struct table_instance *ti; 455 456 ti = container_of(rcu, struct table_instance, rcu); 457 __table_instance_destroy(ti); 458 } 459 460 static void table_instance_flow_free(struct flow_table *table, 461 struct table_instance *ti, 462 struct table_instance *ufid_ti, 463 struct sw_flow *flow) 464 { 465 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); 466 table->count--; 467 468 if (ovs_identifier_is_ufid(&flow->id)) { 469 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); 470 table->ufid_count--; 471 } 472 473 flow_mask_remove(table, flow->mask); 474 } 475 476 /* Must be called with OVS mutex held. */ 477 void table_instance_flow_flush(struct flow_table *table, 478 struct table_instance *ti, 479 struct table_instance *ufid_ti) 480 { 481 int i; 482 483 for (i = 0; i < ti->n_buckets; i++) { 484 struct hlist_head *head = &ti->buckets[i]; 485 struct hlist_node *n; 486 struct sw_flow *flow; 487 488 hlist_for_each_entry_safe(flow, n, head, 489 flow_table.node[ti->node_ver]) { 490 491 table_instance_flow_free(table, ti, ufid_ti, 492 flow); 493 ovs_flow_free(flow, true); 494 } 495 } 496 497 if (WARN_ON(table->count != 0 || 498 table->ufid_count != 0)) { 499 table->count = 0; 500 table->ufid_count = 0; 501 } 502 } 503 504 static void table_instance_destroy(struct table_instance *ti, 505 struct table_instance *ufid_ti) 506 { 507 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 508 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb); 509 } 510 511 /* No need for locking this function is called from RCU callback or 512 * error path. 513 */ 514 void ovs_flow_tbl_destroy(struct flow_table *table) 515 { 516 struct table_instance *ti = rcu_dereference_raw(table->ti); 517 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); 518 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache); 519 struct mask_array *ma = rcu_dereference_raw(table->mask_array); 520 521 call_rcu(&mc->rcu, mask_cache_rcu_cb); 522 call_rcu(&ma->rcu, mask_array_rcu_cb); 523 table_instance_destroy(ti, ufid_ti); 524 } 525 526 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 527 u32 *bucket, u32 *last) 528 { 529 struct sw_flow *flow; 530 struct hlist_head *head; 531 int ver; 532 int i; 533 534 ver = ti->node_ver; 535 while (*bucket < ti->n_buckets) { 536 i = 0; 537 head = &ti->buckets[*bucket]; 538 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) { 539 if (i < *last) { 540 i++; 541 continue; 542 } 543 *last = i + 1; 544 return flow; 545 } 546 (*bucket)++; 547 *last = 0; 548 } 549 550 return NULL; 551 } 552 553 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) 554 { 555 hash = jhash_1word(hash, ti->hash_seed); 556 return &ti->buckets[hash & (ti->n_buckets - 1)]; 557 } 558 559 static void table_instance_insert(struct table_instance *ti, 560 struct sw_flow *flow) 561 { 562 struct hlist_head *head; 563 564 head = find_bucket(ti, flow->flow_table.hash); 565 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); 566 } 567 568 static void ufid_table_instance_insert(struct table_instance *ti, 569 struct sw_flow *flow) 570 { 571 struct hlist_head *head; 572 573 head = find_bucket(ti, flow->ufid_table.hash); 574 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); 575 } 576 577 static void flow_table_copy_flows(struct table_instance *old, 578 struct table_instance *new, bool ufid) 579 { 580 int old_ver; 581 int i; 582 583 old_ver = old->node_ver; 584 new->node_ver = !old_ver; 585 586 /* Insert in new table. */ 587 for (i = 0; i < old->n_buckets; i++) { 588 struct sw_flow *flow; 589 struct hlist_head *head = &old->buckets[i]; 590 591 if (ufid) 592 hlist_for_each_entry_rcu(flow, head, 593 ufid_table.node[old_ver], 594 lockdep_ovsl_is_held()) 595 ufid_table_instance_insert(new, flow); 596 else 597 hlist_for_each_entry_rcu(flow, head, 598 flow_table.node[old_ver], 599 lockdep_ovsl_is_held()) 600 table_instance_insert(new, flow); 601 } 602 } 603 604 static struct table_instance *table_instance_rehash(struct table_instance *ti, 605 int n_buckets, bool ufid) 606 { 607 struct table_instance *new_ti; 608 609 new_ti = table_instance_alloc(n_buckets); 610 if (!new_ti) 611 return NULL; 612 613 flow_table_copy_flows(ti, new_ti, ufid); 614 615 return new_ti; 616 } 617 618 int ovs_flow_tbl_flush(struct flow_table *flow_table) 619 { 620 struct table_instance *old_ti, *new_ti; 621 struct table_instance *old_ufid_ti, *new_ufid_ti; 622 623 new_ti = table_instance_alloc(TBL_MIN_BUCKETS); 624 if (!new_ti) 625 return -ENOMEM; 626 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); 627 if (!new_ufid_ti) 628 goto err_free_ti; 629 630 old_ti = ovsl_dereference(flow_table->ti); 631 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti); 632 633 rcu_assign_pointer(flow_table->ti, new_ti); 634 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); 635 flow_table->last_rehash = jiffies; 636 637 table_instance_flow_flush(flow_table, old_ti, old_ufid_ti); 638 table_instance_destroy(old_ti, old_ufid_ti); 639 return 0; 640 641 err_free_ti: 642 __table_instance_destroy(new_ti); 643 return -ENOMEM; 644 } 645 646 static u32 flow_hash(const struct sw_flow_key *key, 647 const struct sw_flow_key_range *range) 648 { 649 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start); 650 651 /* Make sure number of hash bytes are multiple of u32. */ 652 int hash_u32s = range_n_bytes(range) >> 2; 653 654 return jhash2(hash_key, hash_u32s, 0); 655 } 656 657 static int flow_key_start(const struct sw_flow_key *key) 658 { 659 if (key->tun_proto) 660 return 0; 661 else 662 return rounddown(offsetof(struct sw_flow_key, phy), 663 sizeof(long)); 664 } 665 666 static bool cmp_key(const struct sw_flow_key *key1, 667 const struct sw_flow_key *key2, 668 int key_start, int key_end) 669 { 670 const long *cp1 = (const long *)((const u8 *)key1 + key_start); 671 const long *cp2 = (const long *)((const u8 *)key2 + key_start); 672 long diffs = 0; 673 int i; 674 675 for (i = key_start; i < key_end; i += sizeof(long)) 676 diffs |= *cp1++ ^ *cp2++; 677 678 return diffs == 0; 679 } 680 681 static bool flow_cmp_masked_key(const struct sw_flow *flow, 682 const struct sw_flow_key *key, 683 const struct sw_flow_key_range *range) 684 { 685 return cmp_key(&flow->key, key, range->start, range->end); 686 } 687 688 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 689 const struct sw_flow_match *match) 690 { 691 struct sw_flow_key *key = match->key; 692 int key_start = flow_key_start(key); 693 int key_end = match->range.end; 694 695 BUG_ON(ovs_identifier_is_ufid(&flow->id)); 696 return cmp_key(flow->id.unmasked_key, key, key_start, key_end); 697 } 698 699 static struct sw_flow *masked_flow_lookup(struct table_instance *ti, 700 const struct sw_flow_key *unmasked, 701 const struct sw_flow_mask *mask, 702 u32 *n_mask_hit) 703 { 704 struct sw_flow *flow; 705 struct hlist_head *head; 706 u32 hash; 707 struct sw_flow_key masked_key; 708 709 ovs_flow_mask_key(&masked_key, unmasked, false, mask); 710 hash = flow_hash(&masked_key, &mask->range); 711 head = find_bucket(ti, hash); 712 (*n_mask_hit)++; 713 714 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver], 715 lockdep_ovsl_is_held()) { 716 if (flow->mask == mask && flow->flow_table.hash == hash && 717 flow_cmp_masked_key(flow, &masked_key, &mask->range)) 718 return flow; 719 } 720 return NULL; 721 } 722 723 /* Flow lookup does full lookup on flow table. It starts with 724 * mask from index passed in *index. 725 */ 726 static struct sw_flow *flow_lookup(struct flow_table *tbl, 727 struct table_instance *ti, 728 struct mask_array *ma, 729 const struct sw_flow_key *key, 730 u32 *n_mask_hit, 731 u32 *n_cache_hit, 732 u32 *index) 733 { 734 u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); 735 struct sw_flow *flow; 736 struct sw_flow_mask *mask; 737 int i; 738 739 if (likely(*index < ma->max)) { 740 mask = rcu_dereference_ovsl(ma->masks[*index]); 741 if (mask) { 742 flow = masked_flow_lookup(ti, key, mask, n_mask_hit); 743 if (flow) { 744 u64_stats_update_begin(&ma->syncp); 745 usage_counters[*index]++; 746 u64_stats_update_end(&ma->syncp); 747 (*n_cache_hit)++; 748 return flow; 749 } 750 } 751 } 752 753 for (i = 0; i < ma->max; i++) { 754 755 if (i == *index) 756 continue; 757 758 mask = rcu_dereference_ovsl(ma->masks[i]); 759 if (unlikely(!mask)) 760 break; 761 762 flow = masked_flow_lookup(ti, key, mask, n_mask_hit); 763 if (flow) { /* Found */ 764 *index = i; 765 u64_stats_update_begin(&ma->syncp); 766 usage_counters[*index]++; 767 u64_stats_update_end(&ma->syncp); 768 return flow; 769 } 770 } 771 772 return NULL; 773 } 774 775 /* 776 * mask_cache maps flow to probable mask. This cache is not tightly 777 * coupled cache, It means updates to mask list can result in inconsistent 778 * cache entry in mask cache. 779 * This is per cpu cache and is divided in MC_HASH_SEGS segments. 780 * In case of a hash collision the entry is hashed in next segment. 781 * */ 782 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, 783 const struct sw_flow_key *key, 784 u32 skb_hash, 785 u32 *n_mask_hit, 786 u32 *n_cache_hit) 787 { 788 struct mask_cache *mc = rcu_dereference(tbl->mask_cache); 789 struct mask_array *ma = rcu_dereference(tbl->mask_array); 790 struct table_instance *ti = rcu_dereference(tbl->ti); 791 struct mask_cache_entry *entries, *ce; 792 struct sw_flow *flow; 793 u32 hash; 794 int seg; 795 796 *n_mask_hit = 0; 797 *n_cache_hit = 0; 798 if (unlikely(!skb_hash || mc->cache_size == 0)) { 799 u32 mask_index = 0; 800 u32 cache = 0; 801 802 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache, 803 &mask_index); 804 } 805 806 /* Pre and post recirulation flows usually have the same skb_hash 807 * value. To avoid hash collisions, rehash the 'skb_hash' with 808 * 'recirc_id'. */ 809 if (key->recirc_id) 810 skb_hash = jhash_1word(skb_hash, key->recirc_id); 811 812 ce = NULL; 813 hash = skb_hash; 814 entries = this_cpu_ptr(mc->mask_cache); 815 816 /* Find the cache entry 'ce' to operate on. */ 817 for (seg = 0; seg < MC_HASH_SEGS; seg++) { 818 int index = hash & (mc->cache_size - 1); 819 struct mask_cache_entry *e; 820 821 e = &entries[index]; 822 if (e->skb_hash == skb_hash) { 823 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, 824 n_cache_hit, &e->mask_index); 825 if (!flow) 826 e->skb_hash = 0; 827 return flow; 828 } 829 830 if (!ce || e->skb_hash < ce->skb_hash) 831 ce = e; /* A better replacement cache candidate. */ 832 833 hash >>= MC_HASH_SHIFT; 834 } 835 836 /* Cache miss, do full lookup. */ 837 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit, 838 &ce->mask_index); 839 if (flow) 840 ce->skb_hash = skb_hash; 841 842 *n_cache_hit = 0; 843 return flow; 844 } 845 846 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, 847 const struct sw_flow_key *key) 848 { 849 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 850 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); 851 u32 __always_unused n_mask_hit; 852 u32 __always_unused n_cache_hit; 853 u32 index = 0; 854 855 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); 856 } 857 858 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, 859 const struct sw_flow_match *match) 860 { 861 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 862 int i; 863 864 /* Always called under ovs-mutex. */ 865 for (i = 0; i < ma->max; i++) { 866 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 867 u32 __always_unused n_mask_hit; 868 struct sw_flow_mask *mask; 869 struct sw_flow *flow; 870 871 mask = ovsl_dereference(ma->masks[i]); 872 if (!mask) 873 continue; 874 875 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit); 876 if (flow && ovs_identifier_is_key(&flow->id) && 877 ovs_flow_cmp_unmasked_key(flow, match)) { 878 return flow; 879 } 880 } 881 882 return NULL; 883 } 884 885 static u32 ufid_hash(const struct sw_flow_id *sfid) 886 { 887 return jhash(sfid->ufid, sfid->ufid_len, 0); 888 } 889 890 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow, 891 const struct sw_flow_id *sfid) 892 { 893 if (flow->id.ufid_len != sfid->ufid_len) 894 return false; 895 896 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); 897 } 898 899 bool ovs_flow_cmp(const struct sw_flow *flow, 900 const struct sw_flow_match *match) 901 { 902 if (ovs_identifier_is_ufid(&flow->id)) 903 return flow_cmp_masked_key(flow, match->key, &match->range); 904 905 return ovs_flow_cmp_unmasked_key(flow, match); 906 } 907 908 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, 909 const struct sw_flow_id *ufid) 910 { 911 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); 912 struct sw_flow *flow; 913 struct hlist_head *head; 914 u32 hash; 915 916 hash = ufid_hash(ufid); 917 head = find_bucket(ti, hash); 918 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver], 919 lockdep_ovsl_is_held()) { 920 if (flow->ufid_table.hash == hash && 921 ovs_flow_cmp_ufid(flow, ufid)) 922 return flow; 923 } 924 return NULL; 925 } 926 927 int ovs_flow_tbl_num_masks(const struct flow_table *table) 928 { 929 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); 930 return READ_ONCE(ma->count); 931 } 932 933 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table) 934 { 935 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); 936 937 return READ_ONCE(mc->cache_size); 938 } 939 940 static struct table_instance *table_instance_expand(struct table_instance *ti, 941 bool ufid) 942 { 943 return table_instance_rehash(ti, ti->n_buckets * 2, ufid); 944 } 945 946 /* Must be called with OVS mutex held. */ 947 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 948 { 949 struct table_instance *ti = ovsl_dereference(table->ti); 950 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); 951 952 BUG_ON(table->count == 0); 953 table_instance_flow_free(table, ti, ufid_ti, flow); 954 } 955 956 static struct sw_flow_mask *mask_alloc(void) 957 { 958 struct sw_flow_mask *mask; 959 960 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 961 if (mask) 962 mask->ref_count = 1; 963 964 return mask; 965 } 966 967 static bool mask_equal(const struct sw_flow_mask *a, 968 const struct sw_flow_mask *b) 969 { 970 const u8 *a_ = (const u8 *)&a->key + a->range.start; 971 const u8 *b_ = (const u8 *)&b->key + b->range.start; 972 973 return (a->range.end == b->range.end) 974 && (a->range.start == b->range.start) 975 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); 976 } 977 978 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, 979 const struct sw_flow_mask *mask) 980 { 981 struct mask_array *ma; 982 int i; 983 984 ma = ovsl_dereference(tbl->mask_array); 985 for (i = 0; i < ma->max; i++) { 986 struct sw_flow_mask *t; 987 t = ovsl_dereference(ma->masks[i]); 988 989 if (t && mask_equal(mask, t)) 990 return t; 991 } 992 993 return NULL; 994 } 995 996 /* Add 'mask' into the mask list, if it is not already there. */ 997 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, 998 const struct sw_flow_mask *new) 999 { 1000 struct sw_flow_mask *mask; 1001 1002 mask = flow_mask_find(tbl, new); 1003 if (!mask) { 1004 /* Allocate a new mask if none exsits. */ 1005 mask = mask_alloc(); 1006 if (!mask) 1007 return -ENOMEM; 1008 mask->key = new->key; 1009 mask->range = new->range; 1010 1011 /* Add mask to mask-list. */ 1012 if (tbl_mask_array_add_mask(tbl, mask)) { 1013 kfree(mask); 1014 return -ENOMEM; 1015 } 1016 } else { 1017 BUG_ON(!mask->ref_count); 1018 mask->ref_count++; 1019 } 1020 1021 flow->mask = mask; 1022 return 0; 1023 } 1024 1025 /* Must be called with OVS mutex held. */ 1026 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow) 1027 { 1028 struct table_instance *new_ti = NULL; 1029 struct table_instance *ti; 1030 1031 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); 1032 ti = ovsl_dereference(table->ti); 1033 table_instance_insert(ti, flow); 1034 table->count++; 1035 1036 /* Expand table, if necessary, to make room. */ 1037 if (table->count > ti->n_buckets) 1038 new_ti = table_instance_expand(ti, false); 1039 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) 1040 new_ti = table_instance_rehash(ti, ti->n_buckets, false); 1041 1042 if (new_ti) { 1043 rcu_assign_pointer(table->ti, new_ti); 1044 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 1045 table->last_rehash = jiffies; 1046 } 1047 } 1048 1049 /* Must be called with OVS mutex held. */ 1050 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow) 1051 { 1052 struct table_instance *ti; 1053 1054 flow->ufid_table.hash = ufid_hash(&flow->id); 1055 ti = ovsl_dereference(table->ufid_ti); 1056 ufid_table_instance_insert(ti, flow); 1057 table->ufid_count++; 1058 1059 /* Expand table, if necessary, to make room. */ 1060 if (table->ufid_count > ti->n_buckets) { 1061 struct table_instance *new_ti; 1062 1063 new_ti = table_instance_expand(ti, true); 1064 if (new_ti) { 1065 rcu_assign_pointer(table->ufid_ti, new_ti); 1066 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 1067 } 1068 } 1069 } 1070 1071 /* Must be called with OVS mutex held. */ 1072 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 1073 const struct sw_flow_mask *mask) 1074 { 1075 int err; 1076 1077 err = flow_mask_insert(table, flow, mask); 1078 if (err) 1079 return err; 1080 flow_key_insert(table, flow); 1081 if (ovs_identifier_is_ufid(&flow->id)) 1082 flow_ufid_insert(table, flow); 1083 1084 return 0; 1085 } 1086 1087 static int compare_mask_and_count(const void *a, const void *b) 1088 { 1089 const struct mask_count *mc_a = a; 1090 const struct mask_count *mc_b = b; 1091 1092 return (s64)mc_b->counter - (s64)mc_a->counter; 1093 } 1094 1095 /* Must be called with OVS mutex held. */ 1096 void ovs_flow_masks_rebalance(struct flow_table *table) 1097 { 1098 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); 1099 struct mask_count *masks_and_count; 1100 struct mask_array *new; 1101 int masks_entries = 0; 1102 int i; 1103 1104 /* Build array of all current entries with use counters. */ 1105 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count), 1106 GFP_KERNEL); 1107 if (!masks_and_count) 1108 return; 1109 1110 for (i = 0; i < ma->max; i++) { 1111 struct sw_flow_mask *mask; 1112 unsigned int start; 1113 int cpu; 1114 1115 mask = rcu_dereference_ovsl(ma->masks[i]); 1116 if (unlikely(!mask)) 1117 break; 1118 1119 masks_and_count[i].index = i; 1120 masks_and_count[i].counter = 0; 1121 1122 for_each_possible_cpu(cpu) { 1123 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, 1124 cpu); 1125 u64 counter; 1126 1127 do { 1128 start = u64_stats_fetch_begin_irq(&ma->syncp); 1129 counter = usage_counters[i]; 1130 } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); 1131 1132 masks_and_count[i].counter += counter; 1133 } 1134 1135 /* Subtract the zero count value. */ 1136 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i]; 1137 1138 /* Rather than calling tbl_mask_array_reset_counters() 1139 * below when no change is needed, do it inline here. 1140 */ 1141 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter; 1142 } 1143 1144 if (i == 0) 1145 goto free_mask_entries; 1146 1147 /* Sort the entries */ 1148 masks_entries = i; 1149 sort(masks_and_count, masks_entries, sizeof(*masks_and_count), 1150 compare_mask_and_count, NULL); 1151 1152 /* If the order is the same, nothing to do... */ 1153 for (i = 0; i < masks_entries; i++) { 1154 if (i != masks_and_count[i].index) 1155 break; 1156 } 1157 if (i == masks_entries) 1158 goto free_mask_entries; 1159 1160 /* Rebuilt the new list in order of usage. */ 1161 new = tbl_mask_array_alloc(ma->max); 1162 if (!new) 1163 goto free_mask_entries; 1164 1165 for (i = 0; i < masks_entries; i++) { 1166 int index = masks_and_count[i].index; 1167 1168 if (ovsl_dereference(ma->masks[index])) 1169 new->masks[new->count++] = ma->masks[index]; 1170 } 1171 1172 rcu_assign_pointer(table->mask_array, new); 1173 call_rcu(&ma->rcu, mask_array_rcu_cb); 1174 1175 free_mask_entries: 1176 kfree(masks_and_count); 1177 } 1178 1179 /* Initializes the flow module. 1180 * Returns zero if successful or a negative error code. */ 1181 int ovs_flow_init(void) 1182 { 1183 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); 1184 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 1185 1186 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 1187 + (nr_cpu_ids 1188 * sizeof(struct sw_flow_stats *)), 1189 0, 0, NULL); 1190 if (flow_cache == NULL) 1191 return -ENOMEM; 1192 1193 flow_stats_cache 1194 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats), 1195 0, SLAB_HWCACHE_ALIGN, NULL); 1196 if (flow_stats_cache == NULL) { 1197 kmem_cache_destroy(flow_cache); 1198 flow_cache = NULL; 1199 return -ENOMEM; 1200 } 1201 1202 return 0; 1203 } 1204 1205 /* Uninitializes the flow module. */ 1206 void ovs_flow_exit(void) 1207 { 1208 kmem_cache_destroy(flow_stats_cache); 1209 kmem_cache_destroy(flow_cache); 1210 } 1211