1 /* 2 * Copyright (c) 2007-2013 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #include "flow.h" 20 #include "datapath.h" 21 #include <linux/uaccess.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/if_ether.h> 25 #include <linux/if_vlan.h> 26 #include <net/llc_pdu.h> 27 #include <linux/kernel.h> 28 #include <linux/hash.h> 29 #include <linux/jiffies.h> 30 #include <linux/llc.h> 31 #include <linux/module.h> 32 #include <linux/in.h> 33 #include <linux/rcupdate.h> 34 #include <linux/if_arp.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/sctp.h> 38 #include <linux/tcp.h> 39 #include <linux/udp.h> 40 #include <linux/icmp.h> 41 #include <linux/icmpv6.h> 42 #include <linux/rculist.h> 43 #include <net/ip.h> 44 #include <net/ipv6.h> 45 #include <net/ndisc.h> 46 47 #define TBL_MIN_BUCKETS 1024 48 #define REHASH_INTERVAL (10 * 60 * HZ) 49 50 static struct kmem_cache *flow_cache; 51 struct kmem_cache *flow_stats_cache __read_mostly; 52 53 static u16 range_n_bytes(const struct sw_flow_key_range *range) 54 { 55 return range->end - range->start; 56 } 57 58 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 59 const struct sw_flow_mask *mask) 60 { 61 const long *m = (const long *)((const u8 *)&mask->key + 62 mask->range.start); 63 const long *s = (const long *)((const u8 *)src + 64 mask->range.start); 65 long *d = (long *)((u8 *)dst + mask->range.start); 66 int i; 67 68 /* The memory outside of the 'mask->range' are not set since 69 * further operations on 'dst' only uses contents within 70 * 'mask->range'. 71 */ 72 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) 73 *d++ = *s++ & *m++; 74 } 75 76 struct sw_flow *ovs_flow_alloc(void) 77 { 78 struct sw_flow *flow; 79 struct flow_stats *stats; 80 int node; 81 82 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); 83 if (!flow) 84 return ERR_PTR(-ENOMEM); 85 86 flow->sf_acts = NULL; 87 flow->mask = NULL; 88 flow->stats_last_writer = NUMA_NO_NODE; 89 90 /* Initialize the default stat node. */ 91 stats = kmem_cache_alloc_node(flow_stats_cache, 92 GFP_KERNEL | __GFP_ZERO, 0); 93 if (!stats) 94 goto err; 95 96 spin_lock_init(&stats->lock); 97 98 RCU_INIT_POINTER(flow->stats[0], stats); 99 100 for_each_node(node) 101 if (node != 0) 102 RCU_INIT_POINTER(flow->stats[node], NULL); 103 104 return flow; 105 err: 106 kmem_cache_free(flow_cache, flow); 107 return ERR_PTR(-ENOMEM); 108 } 109 110 int ovs_flow_tbl_count(struct flow_table *table) 111 { 112 return table->count; 113 } 114 115 static struct flex_array *alloc_buckets(unsigned int n_buckets) 116 { 117 struct flex_array *buckets; 118 int i, err; 119 120 buckets = flex_array_alloc(sizeof(struct hlist_head), 121 n_buckets, GFP_KERNEL); 122 if (!buckets) 123 return NULL; 124 125 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); 126 if (err) { 127 flex_array_free(buckets); 128 return NULL; 129 } 130 131 for (i = 0; i < n_buckets; i++) 132 INIT_HLIST_HEAD((struct hlist_head *) 133 flex_array_get(buckets, i)); 134 135 return buckets; 136 } 137 138 static void flow_free(struct sw_flow *flow) 139 { 140 int node; 141 142 kfree((struct sw_flow_actions __force *)flow->sf_acts); 143 for_each_node(node) 144 if (flow->stats[node]) 145 kmem_cache_free(flow_stats_cache, 146 (struct flow_stats __force *)flow->stats[node]); 147 kmem_cache_free(flow_cache, flow); 148 } 149 150 static void rcu_free_flow_callback(struct rcu_head *rcu) 151 { 152 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); 153 154 flow_free(flow); 155 } 156 157 void ovs_flow_free(struct sw_flow *flow, bool deferred) 158 { 159 if (!flow) 160 return; 161 162 if (deferred) 163 call_rcu(&flow->rcu, rcu_free_flow_callback); 164 else 165 flow_free(flow); 166 } 167 168 static void free_buckets(struct flex_array *buckets) 169 { 170 flex_array_free(buckets); 171 } 172 173 174 static void __table_instance_destroy(struct table_instance *ti) 175 { 176 free_buckets(ti->buckets); 177 kfree(ti); 178 } 179 180 static struct table_instance *table_instance_alloc(int new_size) 181 { 182 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); 183 184 if (!ti) 185 return NULL; 186 187 ti->buckets = alloc_buckets(new_size); 188 189 if (!ti->buckets) { 190 kfree(ti); 191 return NULL; 192 } 193 ti->n_buckets = new_size; 194 ti->node_ver = 0; 195 ti->keep_flows = false; 196 get_random_bytes(&ti->hash_seed, sizeof(u32)); 197 198 return ti; 199 } 200 201 int ovs_flow_tbl_init(struct flow_table *table) 202 { 203 struct table_instance *ti; 204 205 ti = table_instance_alloc(TBL_MIN_BUCKETS); 206 207 if (!ti) 208 return -ENOMEM; 209 210 rcu_assign_pointer(table->ti, ti); 211 INIT_LIST_HEAD(&table->mask_list); 212 table->last_rehash = jiffies; 213 table->count = 0; 214 return 0; 215 } 216 217 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) 218 { 219 struct table_instance *ti = container_of(rcu, struct table_instance, rcu); 220 221 __table_instance_destroy(ti); 222 } 223 224 static void table_instance_destroy(struct table_instance *ti, bool deferred) 225 { 226 int i; 227 228 if (!ti) 229 return; 230 231 if (ti->keep_flows) 232 goto skip_flows; 233 234 for (i = 0; i < ti->n_buckets; i++) { 235 struct sw_flow *flow; 236 struct hlist_head *head = flex_array_get(ti->buckets, i); 237 struct hlist_node *n; 238 int ver = ti->node_ver; 239 240 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { 241 hlist_del_rcu(&flow->hash_node[ver]); 242 ovs_flow_free(flow, deferred); 243 } 244 } 245 246 skip_flows: 247 if (deferred) 248 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 249 else 250 __table_instance_destroy(ti); 251 } 252 253 void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) 254 { 255 struct table_instance *ti = ovsl_dereference(table->ti); 256 257 table_instance_destroy(ti, deferred); 258 } 259 260 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 261 u32 *bucket, u32 *last) 262 { 263 struct sw_flow *flow; 264 struct hlist_head *head; 265 int ver; 266 int i; 267 268 ver = ti->node_ver; 269 while (*bucket < ti->n_buckets) { 270 i = 0; 271 head = flex_array_get(ti->buckets, *bucket); 272 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { 273 if (i < *last) { 274 i++; 275 continue; 276 } 277 *last = i + 1; 278 return flow; 279 } 280 (*bucket)++; 281 *last = 0; 282 } 283 284 return NULL; 285 } 286 287 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) 288 { 289 hash = jhash_1word(hash, ti->hash_seed); 290 return flex_array_get(ti->buckets, 291 (hash & (ti->n_buckets - 1))); 292 } 293 294 static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow) 295 { 296 struct hlist_head *head; 297 298 head = find_bucket(ti, flow->hash); 299 hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head); 300 } 301 302 static void flow_table_copy_flows(struct table_instance *old, 303 struct table_instance *new) 304 { 305 int old_ver; 306 int i; 307 308 old_ver = old->node_ver; 309 new->node_ver = !old_ver; 310 311 /* Insert in new table. */ 312 for (i = 0; i < old->n_buckets; i++) { 313 struct sw_flow *flow; 314 struct hlist_head *head; 315 316 head = flex_array_get(old->buckets, i); 317 318 hlist_for_each_entry(flow, head, hash_node[old_ver]) 319 table_instance_insert(new, flow); 320 } 321 322 old->keep_flows = true; 323 } 324 325 static struct table_instance *table_instance_rehash(struct table_instance *ti, 326 int n_buckets) 327 { 328 struct table_instance *new_ti; 329 330 new_ti = table_instance_alloc(n_buckets); 331 if (!new_ti) 332 return NULL; 333 334 flow_table_copy_flows(ti, new_ti); 335 336 return new_ti; 337 } 338 339 int ovs_flow_tbl_flush(struct flow_table *flow_table) 340 { 341 struct table_instance *old_ti; 342 struct table_instance *new_ti; 343 344 old_ti = ovsl_dereference(flow_table->ti); 345 new_ti = table_instance_alloc(TBL_MIN_BUCKETS); 346 if (!new_ti) 347 return -ENOMEM; 348 349 rcu_assign_pointer(flow_table->ti, new_ti); 350 flow_table->last_rehash = jiffies; 351 flow_table->count = 0; 352 353 table_instance_destroy(old_ti, true); 354 return 0; 355 } 356 357 static u32 flow_hash(const struct sw_flow_key *key, int key_start, 358 int key_end) 359 { 360 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start); 361 int hash_u32s = (key_end - key_start) >> 2; 362 363 /* Make sure number of hash bytes are multiple of u32. */ 364 BUILD_BUG_ON(sizeof(long) % sizeof(u32)); 365 366 return arch_fast_hash2(hash_key, hash_u32s, 0); 367 } 368 369 static int flow_key_start(const struct sw_flow_key *key) 370 { 371 if (key->tun_key.ipv4_dst) 372 return 0; 373 else 374 return rounddown(offsetof(struct sw_flow_key, phy), 375 sizeof(long)); 376 } 377 378 static bool cmp_key(const struct sw_flow_key *key1, 379 const struct sw_flow_key *key2, 380 int key_start, int key_end) 381 { 382 const long *cp1 = (const long *)((const u8 *)key1 + key_start); 383 const long *cp2 = (const long *)((const u8 *)key2 + key_start); 384 long diffs = 0; 385 int i; 386 387 for (i = key_start; i < key_end; i += sizeof(long)) 388 diffs |= *cp1++ ^ *cp2++; 389 390 return diffs == 0; 391 } 392 393 static bool flow_cmp_masked_key(const struct sw_flow *flow, 394 const struct sw_flow_key *key, 395 int key_start, int key_end) 396 { 397 return cmp_key(&flow->key, key, key_start, key_end); 398 } 399 400 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 401 struct sw_flow_match *match) 402 { 403 struct sw_flow_key *key = match->key; 404 int key_start = flow_key_start(key); 405 int key_end = match->range.end; 406 407 return cmp_key(&flow->unmasked_key, key, key_start, key_end); 408 } 409 410 static struct sw_flow *masked_flow_lookup(struct table_instance *ti, 411 const struct sw_flow_key *unmasked, 412 struct sw_flow_mask *mask) 413 { 414 struct sw_flow *flow; 415 struct hlist_head *head; 416 int key_start = mask->range.start; 417 int key_end = mask->range.end; 418 u32 hash; 419 struct sw_flow_key masked_key; 420 421 ovs_flow_mask_key(&masked_key, unmasked, mask); 422 hash = flow_hash(&masked_key, key_start, key_end); 423 head = find_bucket(ti, hash); 424 hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) { 425 if (flow->mask == mask && flow->hash == hash && 426 flow_cmp_masked_key(flow, &masked_key, 427 key_start, key_end)) 428 return flow; 429 } 430 return NULL; 431 } 432 433 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, 434 const struct sw_flow_key *key, 435 u32 *n_mask_hit) 436 { 437 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 438 struct sw_flow_mask *mask; 439 struct sw_flow *flow; 440 441 *n_mask_hit = 0; 442 list_for_each_entry_rcu(mask, &tbl->mask_list, list) { 443 (*n_mask_hit)++; 444 flow = masked_flow_lookup(ti, key, mask); 445 if (flow) /* Found */ 446 return flow; 447 } 448 return NULL; 449 } 450 451 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, 452 const struct sw_flow_key *key) 453 { 454 u32 __always_unused n_mask_hit; 455 456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); 457 } 458 459 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, 460 struct sw_flow_match *match) 461 { 462 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 463 struct sw_flow_mask *mask; 464 struct sw_flow *flow; 465 466 /* Always called under ovs-mutex. */ 467 list_for_each_entry(mask, &tbl->mask_list, list) { 468 flow = masked_flow_lookup(ti, match->key, mask); 469 if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */ 470 return flow; 471 } 472 return NULL; 473 } 474 475 int ovs_flow_tbl_num_masks(const struct flow_table *table) 476 { 477 struct sw_flow_mask *mask; 478 int num = 0; 479 480 list_for_each_entry(mask, &table->mask_list, list) 481 num++; 482 483 return num; 484 } 485 486 static struct table_instance *table_instance_expand(struct table_instance *ti) 487 { 488 return table_instance_rehash(ti, ti->n_buckets * 2); 489 } 490 491 /* Remove 'mask' from the mask list, if it is not needed any more. */ 492 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) 493 { 494 if (mask) { 495 /* ovs-lock is required to protect mask-refcount and 496 * mask list. 497 */ 498 ASSERT_OVSL(); 499 BUG_ON(!mask->ref_count); 500 mask->ref_count--; 501 502 if (!mask->ref_count) { 503 list_del_rcu(&mask->list); 504 kfree_rcu(mask, rcu); 505 } 506 } 507 } 508 509 /* Must be called with OVS mutex held. */ 510 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 511 { 512 struct table_instance *ti = ovsl_dereference(table->ti); 513 514 BUG_ON(table->count == 0); 515 hlist_del_rcu(&flow->hash_node[ti->node_ver]); 516 table->count--; 517 518 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be 519 * accessible as long as the RCU read lock is held. 520 */ 521 flow_mask_remove(table, flow->mask); 522 } 523 524 static struct sw_flow_mask *mask_alloc(void) 525 { 526 struct sw_flow_mask *mask; 527 528 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 529 if (mask) 530 mask->ref_count = 1; 531 532 return mask; 533 } 534 535 static bool mask_equal(const struct sw_flow_mask *a, 536 const struct sw_flow_mask *b) 537 { 538 const u8 *a_ = (const u8 *)&a->key + a->range.start; 539 const u8 *b_ = (const u8 *)&b->key + b->range.start; 540 541 return (a->range.end == b->range.end) 542 && (a->range.start == b->range.start) 543 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); 544 } 545 546 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, 547 const struct sw_flow_mask *mask) 548 { 549 struct list_head *ml; 550 551 list_for_each(ml, &tbl->mask_list) { 552 struct sw_flow_mask *m; 553 m = container_of(ml, struct sw_flow_mask, list); 554 if (mask_equal(mask, m)) 555 return m; 556 } 557 558 return NULL; 559 } 560 561 /* Add 'mask' into the mask list, if it is not already there. */ 562 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, 563 struct sw_flow_mask *new) 564 { 565 struct sw_flow_mask *mask; 566 mask = flow_mask_find(tbl, new); 567 if (!mask) { 568 /* Allocate a new mask if none exsits. */ 569 mask = mask_alloc(); 570 if (!mask) 571 return -ENOMEM; 572 mask->key = new->key; 573 mask->range = new->range; 574 list_add_rcu(&mask->list, &tbl->mask_list); 575 } else { 576 BUG_ON(!mask->ref_count); 577 mask->ref_count++; 578 } 579 580 flow->mask = mask; 581 return 0; 582 } 583 584 /* Must be called with OVS mutex held. */ 585 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 586 struct sw_flow_mask *mask) 587 { 588 struct table_instance *new_ti = NULL; 589 struct table_instance *ti; 590 int err; 591 592 err = flow_mask_insert(table, flow, mask); 593 if (err) 594 return err; 595 596 flow->hash = flow_hash(&flow->key, flow->mask->range.start, 597 flow->mask->range.end); 598 ti = ovsl_dereference(table->ti); 599 table_instance_insert(ti, flow); 600 table->count++; 601 602 /* Expand table, if necessary, to make room. */ 603 if (table->count > ti->n_buckets) 604 new_ti = table_instance_expand(ti); 605 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) 606 new_ti = table_instance_rehash(ti, ti->n_buckets); 607 608 if (new_ti) { 609 rcu_assign_pointer(table->ti, new_ti); 610 table_instance_destroy(ti, true); 611 table->last_rehash = jiffies; 612 } 613 return 0; 614 } 615 616 /* Initializes the flow module. 617 * Returns zero if successful or a negative error code. */ 618 int ovs_flow_init(void) 619 { 620 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); 621 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 622 623 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 624 + (num_possible_nodes() 625 * sizeof(struct flow_stats *)), 626 0, 0, NULL); 627 if (flow_cache == NULL) 628 return -ENOMEM; 629 630 flow_stats_cache 631 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), 632 0, SLAB_HWCACHE_ALIGN, NULL); 633 if (flow_stats_cache == NULL) { 634 kmem_cache_destroy(flow_cache); 635 flow_cache = NULL; 636 return -ENOMEM; 637 } 638 639 return 0; 640 } 641 642 /* Uninitializes the flow module. */ 643 void ovs_flow_exit(void) 644 { 645 kmem_cache_destroy(flow_stats_cache); 646 kmem_cache_destroy(flow_cache); 647 } 648