1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2007-2014 Nicira, Inc. 4 */ 5 6 #include "flow.h" 7 #include "datapath.h" 8 #include "flow_netlink.h" 9 #include <linux/uaccess.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/if_ether.h> 13 #include <linux/if_vlan.h> 14 #include <net/llc_pdu.h> 15 #include <linux/kernel.h> 16 #include <linux/jhash.h> 17 #include <linux/jiffies.h> 18 #include <linux/llc.h> 19 #include <linux/module.h> 20 #include <linux/in.h> 21 #include <linux/rcupdate.h> 22 #include <linux/cpumask.h> 23 #include <linux/if_arp.h> 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> 26 #include <linux/sctp.h> 27 #include <linux/tcp.h> 28 #include <linux/udp.h> 29 #include <linux/icmp.h> 30 #include <linux/icmpv6.h> 31 #include <linux/rculist.h> 32 #include <linux/sort.h> 33 #include <net/ip.h> 34 #include <net/ipv6.h> 35 #include <net/ndisc.h> 36 37 #define TBL_MIN_BUCKETS 1024 38 #define MASK_ARRAY_SIZE_MIN 16 39 #define REHASH_INTERVAL (10 * 60 * HZ) 40 41 #define MC_DEFAULT_HASH_ENTRIES 256 42 #define MC_HASH_SHIFT 8 43 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) 44 45 static struct kmem_cache *flow_cache; 46 struct kmem_cache *flow_stats_cache __read_mostly; 47 48 static u16 range_n_bytes(const struct sw_flow_key_range *range) 49 { 50 return range->end - range->start; 51 } 52 53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 54 bool full, const struct sw_flow_mask *mask) 55 { 56 int start = full ? 0 : mask->range.start; 57 int len = full ? sizeof *dst : range_n_bytes(&mask->range); 58 const long *m = (const long *)((const u8 *)&mask->key + start); 59 const long *s = (const long *)((const u8 *)src + start); 60 long *d = (long *)((u8 *)dst + start); 61 int i; 62 63 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, 64 * if 'full' is false the memory outside of the 'mask->range' is left 65 * uninitialized. This can be used as an optimization when further 66 * operations on 'dst' only use contents within 'mask->range'. 67 */ 68 for (i = 0; i < len; i += sizeof(long)) 69 *d++ = *s++ & *m++; 70 } 71 72 struct sw_flow *ovs_flow_alloc(void) 73 { 74 struct sw_flow *flow; 75 struct sw_flow_stats *stats; 76 77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); 78 if (!flow) 79 return ERR_PTR(-ENOMEM); 80 81 flow->stats_last_writer = -1; 82 83 /* Initialize the default stat node. */ 84 stats = kmem_cache_alloc_node(flow_stats_cache, 85 GFP_KERNEL | __GFP_ZERO, 86 node_online(0) ? 0 : NUMA_NO_NODE); 87 if (!stats) 88 goto err; 89 90 spin_lock_init(&stats->lock); 91 92 RCU_INIT_POINTER(flow->stats[0], stats); 93 94 cpumask_set_cpu(0, &flow->cpu_used_mask); 95 96 return flow; 97 err: 98 kmem_cache_free(flow_cache, flow); 99 return ERR_PTR(-ENOMEM); 100 } 101 102 int ovs_flow_tbl_count(const struct flow_table *table) 103 { 104 return table->count; 105 } 106 107 static void flow_free(struct sw_flow *flow) 108 { 109 int cpu; 110 111 if (ovs_identifier_is_key(&flow->id)) 112 kfree(flow->id.unmasked_key); 113 if (flow->sf_acts) 114 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts); 115 /* We open code this to make sure cpu 0 is always considered */ 116 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) 117 if (flow->stats[cpu]) 118 kmem_cache_free(flow_stats_cache, 119 (struct sw_flow_stats __force *)flow->stats[cpu]); 120 kmem_cache_free(flow_cache, flow); 121 } 122 123 static void rcu_free_flow_callback(struct rcu_head *rcu) 124 { 125 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); 126 127 flow_free(flow); 128 } 129 130 void ovs_flow_free(struct sw_flow *flow, bool deferred) 131 { 132 if (!flow) 133 return; 134 135 if (deferred) 136 call_rcu(&flow->rcu, rcu_free_flow_callback); 137 else 138 flow_free(flow); 139 } 140 141 static void __table_instance_destroy(struct table_instance *ti) 142 { 143 kvfree(ti->buckets); 144 kfree(ti); 145 } 146 147 static struct table_instance *table_instance_alloc(int new_size) 148 { 149 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); 150 int i; 151 152 if (!ti) 153 return NULL; 154 155 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head), 156 GFP_KERNEL); 157 if (!ti->buckets) { 158 kfree(ti); 159 return NULL; 160 } 161 162 for (i = 0; i < new_size; i++) 163 INIT_HLIST_HEAD(&ti->buckets[i]); 164 165 ti->n_buckets = new_size; 166 ti->node_ver = 0; 167 ti->keep_flows = false; 168 get_random_bytes(&ti->hash_seed, sizeof(u32)); 169 170 return ti; 171 } 172 173 static void __mask_array_destroy(struct mask_array *ma) 174 { 175 free_percpu(ma->masks_usage_cntr); 176 kfree(ma); 177 } 178 179 static void mask_array_rcu_cb(struct rcu_head *rcu) 180 { 181 struct mask_array *ma = container_of(rcu, struct mask_array, rcu); 182 183 __mask_array_destroy(ma); 184 } 185 186 static void tbl_mask_array_reset_counters(struct mask_array *ma) 187 { 188 int i, cpu; 189 190 /* As the per CPU counters are not atomic we can not go ahead and 191 * reset them from another CPU. To be able to still have an approximate 192 * zero based counter we store the value at reset, and subtract it 193 * later when processing. 194 */ 195 for (i = 0; i < ma->max; i++) { 196 ma->masks_usage_zero_cntr[i] = 0; 197 198 for_each_possible_cpu(cpu) { 199 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, 200 cpu); 201 unsigned int start; 202 u64 counter; 203 204 do { 205 start = u64_stats_fetch_begin_irq(&ma->syncp); 206 counter = usage_counters[i]; 207 } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); 208 209 ma->masks_usage_zero_cntr[i] += counter; 210 } 211 } 212 } 213 214 static struct mask_array *tbl_mask_array_alloc(int size) 215 { 216 struct mask_array *new; 217 218 size = max(MASK_ARRAY_SIZE_MIN, size); 219 new = kzalloc(sizeof(struct mask_array) + 220 sizeof(struct sw_flow_mask *) * size + 221 sizeof(u64) * size, GFP_KERNEL); 222 if (!new) 223 return NULL; 224 225 new->masks_usage_zero_cntr = (u64 *)((u8 *)new + 226 sizeof(struct mask_array) + 227 sizeof(struct sw_flow_mask *) * 228 size); 229 230 new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size, 231 __alignof__(u64)); 232 if (!new->masks_usage_cntr) { 233 kfree(new); 234 return NULL; 235 } 236 237 new->count = 0; 238 new->max = size; 239 240 return new; 241 } 242 243 static int tbl_mask_array_realloc(struct flow_table *tbl, int size) 244 { 245 struct mask_array *old; 246 struct mask_array *new; 247 248 new = tbl_mask_array_alloc(size); 249 if (!new) 250 return -ENOMEM; 251 252 old = ovsl_dereference(tbl->mask_array); 253 if (old) { 254 int i; 255 256 for (i = 0; i < old->max; i++) { 257 if (ovsl_dereference(old->masks[i])) 258 new->masks[new->count++] = old->masks[i]; 259 } 260 call_rcu(&old->rcu, mask_array_rcu_cb); 261 } 262 263 rcu_assign_pointer(tbl->mask_array, new); 264 265 return 0; 266 } 267 268 static int tbl_mask_array_add_mask(struct flow_table *tbl, 269 struct sw_flow_mask *new) 270 { 271 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 272 int err, ma_count = READ_ONCE(ma->count); 273 274 if (ma_count >= ma->max) { 275 err = tbl_mask_array_realloc(tbl, ma->max + 276 MASK_ARRAY_SIZE_MIN); 277 if (err) 278 return err; 279 280 ma = ovsl_dereference(tbl->mask_array); 281 } else { 282 /* On every add or delete we need to reset the counters so 283 * every new mask gets a fair chance of being prioritized. 284 */ 285 tbl_mask_array_reset_counters(ma); 286 } 287 288 BUG_ON(ovsl_dereference(ma->masks[ma_count])); 289 290 rcu_assign_pointer(ma->masks[ma_count], new); 291 WRITE_ONCE(ma->count, ma_count +1); 292 293 return 0; 294 } 295 296 static void tbl_mask_array_del_mask(struct flow_table *tbl, 297 struct sw_flow_mask *mask) 298 { 299 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 300 int i, ma_count = READ_ONCE(ma->count); 301 302 /* Remove the deleted mask pointers from the array */ 303 for (i = 0; i < ma_count; i++) { 304 if (mask == ovsl_dereference(ma->masks[i])) 305 goto found; 306 } 307 308 BUG(); 309 return; 310 311 found: 312 WRITE_ONCE(ma->count, ma_count -1); 313 314 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]); 315 RCU_INIT_POINTER(ma->masks[ma_count -1], NULL); 316 317 kfree_rcu(mask, rcu); 318 319 /* Shrink the mask array if necessary. */ 320 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) && 321 ma_count <= (ma->max / 3)) 322 tbl_mask_array_realloc(tbl, ma->max / 2); 323 else 324 tbl_mask_array_reset_counters(ma); 325 326 } 327 328 /* Remove 'mask' from the mask list, if it is not needed any more. */ 329 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) 330 { 331 if (mask) { 332 /* ovs-lock is required to protect mask-refcount and 333 * mask list. 334 */ 335 ASSERT_OVSL(); 336 BUG_ON(!mask->ref_count); 337 mask->ref_count--; 338 339 if (!mask->ref_count) 340 tbl_mask_array_del_mask(tbl, mask); 341 } 342 } 343 344 static void __mask_cache_destroy(struct mask_cache *mc) 345 { 346 free_percpu(mc->mask_cache); 347 kfree(mc); 348 } 349 350 static void mask_cache_rcu_cb(struct rcu_head *rcu) 351 { 352 struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu); 353 354 __mask_cache_destroy(mc); 355 } 356 357 static struct mask_cache *tbl_mask_cache_alloc(u32 size) 358 { 359 struct mask_cache_entry __percpu *cache = NULL; 360 struct mask_cache *new; 361 362 /* Only allow size to be 0, or a power of 2, and does not exceed 363 * percpu allocation size. 364 */ 365 if ((!is_power_of_2(size) && size != 0) || 366 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) 367 return NULL; 368 369 new = kzalloc(sizeof(*new), GFP_KERNEL); 370 if (!new) 371 return NULL; 372 373 new->cache_size = size; 374 if (new->cache_size > 0) { 375 cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry), 376 new->cache_size), 377 __alignof__(struct mask_cache_entry)); 378 if (!cache) { 379 kfree(new); 380 return NULL; 381 } 382 } 383 384 new->mask_cache = cache; 385 return new; 386 } 387 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size) 388 { 389 struct mask_cache *mc = rcu_dereference(table->mask_cache); 390 struct mask_cache *new; 391 392 if (size == mc->cache_size) 393 return 0; 394 395 if ((!is_power_of_2(size) && size != 0) || 396 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) 397 return -EINVAL; 398 399 new = tbl_mask_cache_alloc(size); 400 if (!new) 401 return -ENOMEM; 402 403 rcu_assign_pointer(table->mask_cache, new); 404 call_rcu(&mc->rcu, mask_cache_rcu_cb); 405 406 return 0; 407 } 408 409 int ovs_flow_tbl_init(struct flow_table *table) 410 { 411 struct table_instance *ti, *ufid_ti; 412 struct mask_cache *mc; 413 struct mask_array *ma; 414 415 mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES); 416 if (!mc) 417 return -ENOMEM; 418 419 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); 420 if (!ma) 421 goto free_mask_cache; 422 423 ti = table_instance_alloc(TBL_MIN_BUCKETS); 424 if (!ti) 425 goto free_mask_array; 426 427 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); 428 if (!ufid_ti) 429 goto free_ti; 430 431 rcu_assign_pointer(table->ti, ti); 432 rcu_assign_pointer(table->ufid_ti, ufid_ti); 433 rcu_assign_pointer(table->mask_array, ma); 434 rcu_assign_pointer(table->mask_cache, mc); 435 table->last_rehash = jiffies; 436 table->count = 0; 437 table->ufid_count = 0; 438 return 0; 439 440 free_ti: 441 __table_instance_destroy(ti); 442 free_mask_array: 443 __mask_array_destroy(ma); 444 free_mask_cache: 445 __mask_cache_destroy(mc); 446 return -ENOMEM; 447 } 448 449 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) 450 { 451 struct table_instance *ti = container_of(rcu, struct table_instance, rcu); 452 453 __table_instance_destroy(ti); 454 } 455 456 static void table_instance_flow_free(struct flow_table *table, 457 struct table_instance *ti, 458 struct table_instance *ufid_ti, 459 struct sw_flow *flow, 460 bool count) 461 { 462 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); 463 if (count) 464 table->count--; 465 466 if (ovs_identifier_is_ufid(&flow->id)) { 467 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); 468 469 if (count) 470 table->ufid_count--; 471 } 472 473 flow_mask_remove(table, flow->mask); 474 } 475 476 static void table_instance_destroy(struct flow_table *table, 477 struct table_instance *ti, 478 struct table_instance *ufid_ti, 479 bool deferred) 480 { 481 int i; 482 483 if (!ti) 484 return; 485 486 BUG_ON(!ufid_ti); 487 if (ti->keep_flows) 488 goto skip_flows; 489 490 for (i = 0; i < ti->n_buckets; i++) { 491 struct sw_flow *flow; 492 struct hlist_head *head = &ti->buckets[i]; 493 struct hlist_node *n; 494 495 hlist_for_each_entry_safe(flow, n, head, 496 flow_table.node[ti->node_ver]) { 497 498 table_instance_flow_free(table, ti, ufid_ti, 499 flow, false); 500 ovs_flow_free(flow, deferred); 501 } 502 } 503 504 skip_flows: 505 if (deferred) { 506 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 507 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb); 508 } else { 509 __table_instance_destroy(ti); 510 __table_instance_destroy(ufid_ti); 511 } 512 } 513 514 /* No need for locking this function is called from RCU callback or 515 * error path. 516 */ 517 void ovs_flow_tbl_destroy(struct flow_table *table) 518 { 519 struct table_instance *ti = rcu_dereference_raw(table->ti); 520 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); 521 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache); 522 struct mask_array *ma = rcu_dereference_raw(table->mask_array); 523 524 call_rcu(&mc->rcu, mask_cache_rcu_cb); 525 call_rcu(&ma->rcu, mask_array_rcu_cb); 526 table_instance_destroy(table, ti, ufid_ti, false); 527 } 528 529 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 530 u32 *bucket, u32 *last) 531 { 532 struct sw_flow *flow; 533 struct hlist_head *head; 534 int ver; 535 int i; 536 537 ver = ti->node_ver; 538 while (*bucket < ti->n_buckets) { 539 i = 0; 540 head = &ti->buckets[*bucket]; 541 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) { 542 if (i < *last) { 543 i++; 544 continue; 545 } 546 *last = i + 1; 547 return flow; 548 } 549 (*bucket)++; 550 *last = 0; 551 } 552 553 return NULL; 554 } 555 556 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) 557 { 558 hash = jhash_1word(hash, ti->hash_seed); 559 return &ti->buckets[hash & (ti->n_buckets - 1)]; 560 } 561 562 static void table_instance_insert(struct table_instance *ti, 563 struct sw_flow *flow) 564 { 565 struct hlist_head *head; 566 567 head = find_bucket(ti, flow->flow_table.hash); 568 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); 569 } 570 571 static void ufid_table_instance_insert(struct table_instance *ti, 572 struct sw_flow *flow) 573 { 574 struct hlist_head *head; 575 576 head = find_bucket(ti, flow->ufid_table.hash); 577 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); 578 } 579 580 static void flow_table_copy_flows(struct table_instance *old, 581 struct table_instance *new, bool ufid) 582 { 583 int old_ver; 584 int i; 585 586 old_ver = old->node_ver; 587 new->node_ver = !old_ver; 588 589 /* Insert in new table. */ 590 for (i = 0; i < old->n_buckets; i++) { 591 struct sw_flow *flow; 592 struct hlist_head *head = &old->buckets[i]; 593 594 if (ufid) 595 hlist_for_each_entry_rcu(flow, head, 596 ufid_table.node[old_ver], 597 lockdep_ovsl_is_held()) 598 ufid_table_instance_insert(new, flow); 599 else 600 hlist_for_each_entry_rcu(flow, head, 601 flow_table.node[old_ver], 602 lockdep_ovsl_is_held()) 603 table_instance_insert(new, flow); 604 } 605 606 old->keep_flows = true; 607 } 608 609 static struct table_instance *table_instance_rehash(struct table_instance *ti, 610 int n_buckets, bool ufid) 611 { 612 struct table_instance *new_ti; 613 614 new_ti = table_instance_alloc(n_buckets); 615 if (!new_ti) 616 return NULL; 617 618 flow_table_copy_flows(ti, new_ti, ufid); 619 620 return new_ti; 621 } 622 623 int ovs_flow_tbl_flush(struct flow_table *flow_table) 624 { 625 struct table_instance *old_ti, *new_ti; 626 struct table_instance *old_ufid_ti, *new_ufid_ti; 627 628 new_ti = table_instance_alloc(TBL_MIN_BUCKETS); 629 if (!new_ti) 630 return -ENOMEM; 631 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); 632 if (!new_ufid_ti) 633 goto err_free_ti; 634 635 old_ti = ovsl_dereference(flow_table->ti); 636 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti); 637 638 rcu_assign_pointer(flow_table->ti, new_ti); 639 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); 640 flow_table->last_rehash = jiffies; 641 flow_table->count = 0; 642 flow_table->ufid_count = 0; 643 644 table_instance_destroy(flow_table, old_ti, old_ufid_ti, true); 645 return 0; 646 647 err_free_ti: 648 __table_instance_destroy(new_ti); 649 return -ENOMEM; 650 } 651 652 static u32 flow_hash(const struct sw_flow_key *key, 653 const struct sw_flow_key_range *range) 654 { 655 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start); 656 657 /* Make sure number of hash bytes are multiple of u32. */ 658 int hash_u32s = range_n_bytes(range) >> 2; 659 660 return jhash2(hash_key, hash_u32s, 0); 661 } 662 663 static int flow_key_start(const struct sw_flow_key *key) 664 { 665 if (key->tun_proto) 666 return 0; 667 else 668 return rounddown(offsetof(struct sw_flow_key, phy), 669 sizeof(long)); 670 } 671 672 static bool cmp_key(const struct sw_flow_key *key1, 673 const struct sw_flow_key *key2, 674 int key_start, int key_end) 675 { 676 const long *cp1 = (const long *)((const u8 *)key1 + key_start); 677 const long *cp2 = (const long *)((const u8 *)key2 + key_start); 678 long diffs = 0; 679 int i; 680 681 for (i = key_start; i < key_end; i += sizeof(long)) 682 diffs |= *cp1++ ^ *cp2++; 683 684 return diffs == 0; 685 } 686 687 static bool flow_cmp_masked_key(const struct sw_flow *flow, 688 const struct sw_flow_key *key, 689 const struct sw_flow_key_range *range) 690 { 691 return cmp_key(&flow->key, key, range->start, range->end); 692 } 693 694 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 695 const struct sw_flow_match *match) 696 { 697 struct sw_flow_key *key = match->key; 698 int key_start = flow_key_start(key); 699 int key_end = match->range.end; 700 701 BUG_ON(ovs_identifier_is_ufid(&flow->id)); 702 return cmp_key(flow->id.unmasked_key, key, key_start, key_end); 703 } 704 705 static struct sw_flow *masked_flow_lookup(struct table_instance *ti, 706 const struct sw_flow_key *unmasked, 707 const struct sw_flow_mask *mask, 708 u32 *n_mask_hit) 709 { 710 struct sw_flow *flow; 711 struct hlist_head *head; 712 u32 hash; 713 struct sw_flow_key masked_key; 714 715 ovs_flow_mask_key(&masked_key, unmasked, false, mask); 716 hash = flow_hash(&masked_key, &mask->range); 717 head = find_bucket(ti, hash); 718 (*n_mask_hit)++; 719 720 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver], 721 lockdep_ovsl_is_held()) { 722 if (flow->mask == mask && flow->flow_table.hash == hash && 723 flow_cmp_masked_key(flow, &masked_key, &mask->range)) 724 return flow; 725 } 726 return NULL; 727 } 728 729 /* Flow lookup does full lookup on flow table. It starts with 730 * mask from index passed in *index. 731 */ 732 static struct sw_flow *flow_lookup(struct flow_table *tbl, 733 struct table_instance *ti, 734 struct mask_array *ma, 735 const struct sw_flow_key *key, 736 u32 *n_mask_hit, 737 u32 *n_cache_hit, 738 u32 *index) 739 { 740 u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); 741 struct sw_flow *flow; 742 struct sw_flow_mask *mask; 743 int i; 744 745 if (likely(*index < ma->max)) { 746 mask = rcu_dereference_ovsl(ma->masks[*index]); 747 if (mask) { 748 flow = masked_flow_lookup(ti, key, mask, n_mask_hit); 749 if (flow) { 750 u64_stats_update_begin(&ma->syncp); 751 usage_counters[*index]++; 752 u64_stats_update_end(&ma->syncp); 753 (*n_cache_hit)++; 754 return flow; 755 } 756 } 757 } 758 759 for (i = 0; i < ma->max; i++) { 760 761 if (i == *index) 762 continue; 763 764 mask = rcu_dereference_ovsl(ma->masks[i]); 765 if (unlikely(!mask)) 766 break; 767 768 flow = masked_flow_lookup(ti, key, mask, n_mask_hit); 769 if (flow) { /* Found */ 770 *index = i; 771 u64_stats_update_begin(&ma->syncp); 772 usage_counters[*index]++; 773 u64_stats_update_end(&ma->syncp); 774 return flow; 775 } 776 } 777 778 return NULL; 779 } 780 781 /* 782 * mask_cache maps flow to probable mask. This cache is not tightly 783 * coupled cache, It means updates to mask list can result in inconsistent 784 * cache entry in mask cache. 785 * This is per cpu cache and is divided in MC_HASH_SEGS segments. 786 * In case of a hash collision the entry is hashed in next segment. 787 * */ 788 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, 789 const struct sw_flow_key *key, 790 u32 skb_hash, 791 u32 *n_mask_hit, 792 u32 *n_cache_hit) 793 { 794 struct mask_cache *mc = rcu_dereference(tbl->mask_cache); 795 struct mask_array *ma = rcu_dereference(tbl->mask_array); 796 struct table_instance *ti = rcu_dereference(tbl->ti); 797 struct mask_cache_entry *entries, *ce; 798 struct sw_flow *flow; 799 u32 hash; 800 int seg; 801 802 *n_mask_hit = 0; 803 *n_cache_hit = 0; 804 if (unlikely(!skb_hash || mc->cache_size == 0)) { 805 u32 mask_index = 0; 806 u32 cache = 0; 807 808 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache, 809 &mask_index); 810 } 811 812 /* Pre and post recirulation flows usually have the same skb_hash 813 * value. To avoid hash collisions, rehash the 'skb_hash' with 814 * 'recirc_id'. */ 815 if (key->recirc_id) 816 skb_hash = jhash_1word(skb_hash, key->recirc_id); 817 818 ce = NULL; 819 hash = skb_hash; 820 entries = this_cpu_ptr(mc->mask_cache); 821 822 /* Find the cache entry 'ce' to operate on. */ 823 for (seg = 0; seg < MC_HASH_SEGS; seg++) { 824 int index = hash & (mc->cache_size - 1); 825 struct mask_cache_entry *e; 826 827 e = &entries[index]; 828 if (e->skb_hash == skb_hash) { 829 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, 830 n_cache_hit, &e->mask_index); 831 if (!flow) 832 e->skb_hash = 0; 833 return flow; 834 } 835 836 if (!ce || e->skb_hash < ce->skb_hash) 837 ce = e; /* A better replacement cache candidate. */ 838 839 hash >>= MC_HASH_SHIFT; 840 } 841 842 /* Cache miss, do full lookup. */ 843 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit, 844 &ce->mask_index); 845 if (flow) 846 ce->skb_hash = skb_hash; 847 848 *n_cache_hit = 0; 849 return flow; 850 } 851 852 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, 853 const struct sw_flow_key *key) 854 { 855 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 856 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); 857 u32 __always_unused n_mask_hit; 858 u32 __always_unused n_cache_hit; 859 u32 index = 0; 860 861 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); 862 } 863 864 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, 865 const struct sw_flow_match *match) 866 { 867 struct mask_array *ma = ovsl_dereference(tbl->mask_array); 868 int i; 869 870 /* Always called under ovs-mutex. */ 871 for (i = 0; i < ma->max; i++) { 872 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 873 u32 __always_unused n_mask_hit; 874 struct sw_flow_mask *mask; 875 struct sw_flow *flow; 876 877 mask = ovsl_dereference(ma->masks[i]); 878 if (!mask) 879 continue; 880 881 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit); 882 if (flow && ovs_identifier_is_key(&flow->id) && 883 ovs_flow_cmp_unmasked_key(flow, match)) { 884 return flow; 885 } 886 } 887 888 return NULL; 889 } 890 891 static u32 ufid_hash(const struct sw_flow_id *sfid) 892 { 893 return jhash(sfid->ufid, sfid->ufid_len, 0); 894 } 895 896 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow, 897 const struct sw_flow_id *sfid) 898 { 899 if (flow->id.ufid_len != sfid->ufid_len) 900 return false; 901 902 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); 903 } 904 905 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match) 906 { 907 if (ovs_identifier_is_ufid(&flow->id)) 908 return flow_cmp_masked_key(flow, match->key, &match->range); 909 910 return ovs_flow_cmp_unmasked_key(flow, match); 911 } 912 913 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, 914 const struct sw_flow_id *ufid) 915 { 916 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); 917 struct sw_flow *flow; 918 struct hlist_head *head; 919 u32 hash; 920 921 hash = ufid_hash(ufid); 922 head = find_bucket(ti, hash); 923 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver], 924 lockdep_ovsl_is_held()) { 925 if (flow->ufid_table.hash == hash && 926 ovs_flow_cmp_ufid(flow, ufid)) 927 return flow; 928 } 929 return NULL; 930 } 931 932 int ovs_flow_tbl_num_masks(const struct flow_table *table) 933 { 934 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); 935 return READ_ONCE(ma->count); 936 } 937 938 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table) 939 { 940 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); 941 942 return READ_ONCE(mc->cache_size); 943 } 944 945 static struct table_instance *table_instance_expand(struct table_instance *ti, 946 bool ufid) 947 { 948 return table_instance_rehash(ti, ti->n_buckets * 2, ufid); 949 } 950 951 /* Must be called with OVS mutex held. */ 952 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 953 { 954 struct table_instance *ti = ovsl_dereference(table->ti); 955 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); 956 957 BUG_ON(table->count == 0); 958 table_instance_flow_free(table, ti, ufid_ti, flow, true); 959 } 960 961 static struct sw_flow_mask *mask_alloc(void) 962 { 963 struct sw_flow_mask *mask; 964 965 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 966 if (mask) 967 mask->ref_count = 1; 968 969 return mask; 970 } 971 972 static bool mask_equal(const struct sw_flow_mask *a, 973 const struct sw_flow_mask *b) 974 { 975 const u8 *a_ = (const u8 *)&a->key + a->range.start; 976 const u8 *b_ = (const u8 *)&b->key + b->range.start; 977 978 return (a->range.end == b->range.end) 979 && (a->range.start == b->range.start) 980 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); 981 } 982 983 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, 984 const struct sw_flow_mask *mask) 985 { 986 struct mask_array *ma; 987 int i; 988 989 ma = ovsl_dereference(tbl->mask_array); 990 for (i = 0; i < ma->max; i++) { 991 struct sw_flow_mask *t; 992 t = ovsl_dereference(ma->masks[i]); 993 994 if (t && mask_equal(mask, t)) 995 return t; 996 } 997 998 return NULL; 999 } 1000 1001 /* Add 'mask' into the mask list, if it is not already there. */ 1002 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, 1003 const struct sw_flow_mask *new) 1004 { 1005 struct sw_flow_mask *mask; 1006 1007 mask = flow_mask_find(tbl, new); 1008 if (!mask) { 1009 /* Allocate a new mask if none exsits. */ 1010 mask = mask_alloc(); 1011 if (!mask) 1012 return -ENOMEM; 1013 mask->key = new->key; 1014 mask->range = new->range; 1015 1016 /* Add mask to mask-list. */ 1017 if (tbl_mask_array_add_mask(tbl, mask)) { 1018 kfree(mask); 1019 return -ENOMEM; 1020 } 1021 } else { 1022 BUG_ON(!mask->ref_count); 1023 mask->ref_count++; 1024 } 1025 1026 flow->mask = mask; 1027 return 0; 1028 } 1029 1030 /* Must be called with OVS mutex held. */ 1031 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow) 1032 { 1033 struct table_instance *new_ti = NULL; 1034 struct table_instance *ti; 1035 1036 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); 1037 ti = ovsl_dereference(table->ti); 1038 table_instance_insert(ti, flow); 1039 table->count++; 1040 1041 /* Expand table, if necessary, to make room. */ 1042 if (table->count > ti->n_buckets) 1043 new_ti = table_instance_expand(ti, false); 1044 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) 1045 new_ti = table_instance_rehash(ti, ti->n_buckets, false); 1046 1047 if (new_ti) { 1048 rcu_assign_pointer(table->ti, new_ti); 1049 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 1050 table->last_rehash = jiffies; 1051 } 1052 } 1053 1054 /* Must be called with OVS mutex held. */ 1055 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow) 1056 { 1057 struct table_instance *ti; 1058 1059 flow->ufid_table.hash = ufid_hash(&flow->id); 1060 ti = ovsl_dereference(table->ufid_ti); 1061 ufid_table_instance_insert(ti, flow); 1062 table->ufid_count++; 1063 1064 /* Expand table, if necessary, to make room. */ 1065 if (table->ufid_count > ti->n_buckets) { 1066 struct table_instance *new_ti; 1067 1068 new_ti = table_instance_expand(ti, true); 1069 if (new_ti) { 1070 rcu_assign_pointer(table->ufid_ti, new_ti); 1071 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 1072 } 1073 } 1074 } 1075 1076 /* Must be called with OVS mutex held. */ 1077 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 1078 const struct sw_flow_mask *mask) 1079 { 1080 int err; 1081 1082 err = flow_mask_insert(table, flow, mask); 1083 if (err) 1084 return err; 1085 flow_key_insert(table, flow); 1086 if (ovs_identifier_is_ufid(&flow->id)) 1087 flow_ufid_insert(table, flow); 1088 1089 return 0; 1090 } 1091 1092 static int compare_mask_and_count(const void *a, const void *b) 1093 { 1094 const struct mask_count *mc_a = a; 1095 const struct mask_count *mc_b = b; 1096 1097 return (s64)mc_b->counter - (s64)mc_a->counter; 1098 } 1099 1100 /* Must be called with OVS mutex held. */ 1101 void ovs_flow_masks_rebalance(struct flow_table *table) 1102 { 1103 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); 1104 struct mask_count *masks_and_count; 1105 struct mask_array *new; 1106 int masks_entries = 0; 1107 int i; 1108 1109 /* Build array of all current entries with use counters. */ 1110 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count), 1111 GFP_KERNEL); 1112 if (!masks_and_count) 1113 return; 1114 1115 for (i = 0; i < ma->max; i++) { 1116 struct sw_flow_mask *mask; 1117 unsigned int start; 1118 int cpu; 1119 1120 mask = rcu_dereference_ovsl(ma->masks[i]); 1121 if (unlikely(!mask)) 1122 break; 1123 1124 masks_and_count[i].index = i; 1125 masks_and_count[i].counter = 0; 1126 1127 for_each_possible_cpu(cpu) { 1128 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, 1129 cpu); 1130 u64 counter; 1131 1132 do { 1133 start = u64_stats_fetch_begin_irq(&ma->syncp); 1134 counter = usage_counters[i]; 1135 } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); 1136 1137 masks_and_count[i].counter += counter; 1138 } 1139 1140 /* Subtract the zero count value. */ 1141 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i]; 1142 1143 /* Rather than calling tbl_mask_array_reset_counters() 1144 * below when no change is needed, do it inline here. 1145 */ 1146 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter; 1147 } 1148 1149 if (i == 0) 1150 goto free_mask_entries; 1151 1152 /* Sort the entries */ 1153 masks_entries = i; 1154 sort(masks_and_count, masks_entries, sizeof(*masks_and_count), 1155 compare_mask_and_count, NULL); 1156 1157 /* If the order is the same, nothing to do... */ 1158 for (i = 0; i < masks_entries; i++) { 1159 if (i != masks_and_count[i].index) 1160 break; 1161 } 1162 if (i == masks_entries) 1163 goto free_mask_entries; 1164 1165 /* Rebuilt the new list in order of usage. */ 1166 new = tbl_mask_array_alloc(ma->max); 1167 if (!new) 1168 goto free_mask_entries; 1169 1170 for (i = 0; i < masks_entries; i++) { 1171 int index = masks_and_count[i].index; 1172 1173 if (ovsl_dereference(ma->masks[index])) 1174 new->masks[new->count++] = ma->masks[index]; 1175 } 1176 1177 rcu_assign_pointer(table->mask_array, new); 1178 call_rcu(&ma->rcu, mask_array_rcu_cb); 1179 1180 free_mask_entries: 1181 kfree(masks_and_count); 1182 } 1183 1184 /* Initializes the flow module. 1185 * Returns zero if successful or a negative error code. */ 1186 int ovs_flow_init(void) 1187 { 1188 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); 1189 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 1190 1191 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 1192 + (nr_cpu_ids 1193 * sizeof(struct sw_flow_stats *)), 1194 0, 0, NULL); 1195 if (flow_cache == NULL) 1196 return -ENOMEM; 1197 1198 flow_stats_cache 1199 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats), 1200 0, SLAB_HWCACHE_ALIGN, NULL); 1201 if (flow_stats_cache == NULL) { 1202 kmem_cache_destroy(flow_cache); 1203 flow_cache = NULL; 1204 return -ENOMEM; 1205 } 1206 1207 return 0; 1208 } 1209 1210 /* Uninitializes the flow module. */ 1211 void ovs_flow_exit(void) 1212 { 1213 kmem_cache_destroy(flow_stats_cache); 1214 kmem_cache_destroy(flow_cache); 1215 } 1216