1 /* 2 * Copyright (c) 2007-2013 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #include "flow.h" 20 #include "datapath.h" 21 #include <linux/uaccess.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/if_ether.h> 25 #include <linux/if_vlan.h> 26 #include <net/llc_pdu.h> 27 #include <linux/kernel.h> 28 #include <linux/hash.h> 29 #include <linux/jiffies.h> 30 #include <linux/llc.h> 31 #include <linux/module.h> 32 #include <linux/in.h> 33 #include <linux/rcupdate.h> 34 #include <linux/if_arp.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/sctp.h> 38 #include <linux/tcp.h> 39 #include <linux/udp.h> 40 #include <linux/icmp.h> 41 #include <linux/icmpv6.h> 42 #include <linux/rculist.h> 43 #include <net/ip.h> 44 #include <net/ipv6.h> 45 #include <net/ndisc.h> 46 47 #define TBL_MIN_BUCKETS 1024 48 #define REHASH_INTERVAL (10 * 60 * HZ) 49 50 static struct kmem_cache *flow_cache; 51 52 static u16 range_n_bytes(const struct sw_flow_key_range *range) 53 { 54 return range->end - range->start; 55 } 56 57 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 58 const struct sw_flow_mask *mask) 59 { 60 const long *m = (long *)((u8 *)&mask->key + mask->range.start); 61 const long *s = (long *)((u8 *)src + mask->range.start); 62 long *d = (long *)((u8 *)dst + mask->range.start); 63 int i; 64 65 /* The memory outside of the 'mask->range' are not set since 66 * further operations on 'dst' only uses contents within 67 * 'mask->range'. 68 */ 69 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) 70 *d++ = *s++ & *m++; 71 } 72 73 struct sw_flow *ovs_flow_alloc(bool percpu_stats) 74 { 75 struct sw_flow *flow; 76 int cpu; 77 78 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); 79 if (!flow) 80 return ERR_PTR(-ENOMEM); 81 82 flow->sf_acts = NULL; 83 flow->mask = NULL; 84 85 flow->stats.is_percpu = percpu_stats; 86 87 if (!percpu_stats) { 88 flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL); 89 if (!flow->stats.stat) 90 goto err; 91 92 spin_lock_init(&flow->stats.stat->lock); 93 } else { 94 flow->stats.cpu_stats = alloc_percpu(struct flow_stats); 95 if (!flow->stats.cpu_stats) 96 goto err; 97 98 for_each_possible_cpu(cpu) { 99 struct flow_stats *cpu_stats; 100 101 cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); 102 spin_lock_init(&cpu_stats->lock); 103 } 104 } 105 return flow; 106 err: 107 kmem_cache_free(flow_cache, flow); 108 return ERR_PTR(-ENOMEM); 109 } 110 111 int ovs_flow_tbl_count(struct flow_table *table) 112 { 113 return table->count; 114 } 115 116 static struct flex_array *alloc_buckets(unsigned int n_buckets) 117 { 118 struct flex_array *buckets; 119 int i, err; 120 121 buckets = flex_array_alloc(sizeof(struct hlist_head), 122 n_buckets, GFP_KERNEL); 123 if (!buckets) 124 return NULL; 125 126 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); 127 if (err) { 128 flex_array_free(buckets); 129 return NULL; 130 } 131 132 for (i = 0; i < n_buckets; i++) 133 INIT_HLIST_HEAD((struct hlist_head *) 134 flex_array_get(buckets, i)); 135 136 return buckets; 137 } 138 139 static void flow_free(struct sw_flow *flow) 140 { 141 kfree((struct sf_flow_acts __force *)flow->sf_acts); 142 if (flow->stats.is_percpu) 143 free_percpu(flow->stats.cpu_stats); 144 else 145 kfree(flow->stats.stat); 146 kmem_cache_free(flow_cache, flow); 147 } 148 149 static void rcu_free_flow_callback(struct rcu_head *rcu) 150 { 151 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); 152 153 flow_free(flow); 154 } 155 156 static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) 157 { 158 if (!mask) 159 return; 160 161 BUG_ON(!mask->ref_count); 162 mask->ref_count--; 163 164 if (!mask->ref_count) { 165 list_del_rcu(&mask->list); 166 if (deferred) 167 kfree_rcu(mask, rcu); 168 else 169 kfree(mask); 170 } 171 } 172 173 void ovs_flow_free(struct sw_flow *flow, bool deferred) 174 { 175 if (!flow) 176 return; 177 178 flow_mask_del_ref(flow->mask, deferred); 179 180 if (deferred) 181 call_rcu(&flow->rcu, rcu_free_flow_callback); 182 else 183 flow_free(flow); 184 } 185 186 static void free_buckets(struct flex_array *buckets) 187 { 188 flex_array_free(buckets); 189 } 190 191 static void __table_instance_destroy(struct table_instance *ti) 192 { 193 int i; 194 195 if (ti->keep_flows) 196 goto skip_flows; 197 198 for (i = 0; i < ti->n_buckets; i++) { 199 struct sw_flow *flow; 200 struct hlist_head *head = flex_array_get(ti->buckets, i); 201 struct hlist_node *n; 202 int ver = ti->node_ver; 203 204 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { 205 hlist_del(&flow->hash_node[ver]); 206 ovs_flow_free(flow, false); 207 } 208 } 209 210 skip_flows: 211 free_buckets(ti->buckets); 212 kfree(ti); 213 } 214 215 static struct table_instance *table_instance_alloc(int new_size) 216 { 217 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); 218 219 if (!ti) 220 return NULL; 221 222 ti->buckets = alloc_buckets(new_size); 223 224 if (!ti->buckets) { 225 kfree(ti); 226 return NULL; 227 } 228 ti->n_buckets = new_size; 229 ti->node_ver = 0; 230 ti->keep_flows = false; 231 get_random_bytes(&ti->hash_seed, sizeof(u32)); 232 233 return ti; 234 } 235 236 int ovs_flow_tbl_init(struct flow_table *table) 237 { 238 struct table_instance *ti; 239 240 ti = table_instance_alloc(TBL_MIN_BUCKETS); 241 242 if (!ti) 243 return -ENOMEM; 244 245 rcu_assign_pointer(table->ti, ti); 246 INIT_LIST_HEAD(&table->mask_list); 247 table->last_rehash = jiffies; 248 table->count = 0; 249 return 0; 250 } 251 252 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) 253 { 254 struct table_instance *ti = container_of(rcu, struct table_instance, rcu); 255 256 __table_instance_destroy(ti); 257 } 258 259 static void table_instance_destroy(struct table_instance *ti, bool deferred) 260 { 261 if (!ti) 262 return; 263 264 if (deferred) 265 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 266 else 267 __table_instance_destroy(ti); 268 } 269 270 void ovs_flow_tbl_destroy(struct flow_table *table) 271 { 272 struct table_instance *ti = ovsl_dereference(table->ti); 273 274 table_instance_destroy(ti, false); 275 } 276 277 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 278 u32 *bucket, u32 *last) 279 { 280 struct sw_flow *flow; 281 struct hlist_head *head; 282 int ver; 283 int i; 284 285 ver = ti->node_ver; 286 while (*bucket < ti->n_buckets) { 287 i = 0; 288 head = flex_array_get(ti->buckets, *bucket); 289 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { 290 if (i < *last) { 291 i++; 292 continue; 293 } 294 *last = i + 1; 295 return flow; 296 } 297 (*bucket)++; 298 *last = 0; 299 } 300 301 return NULL; 302 } 303 304 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) 305 { 306 hash = jhash_1word(hash, ti->hash_seed); 307 return flex_array_get(ti->buckets, 308 (hash & (ti->n_buckets - 1))); 309 } 310 311 static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow) 312 { 313 struct hlist_head *head; 314 315 head = find_bucket(ti, flow->hash); 316 hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head); 317 } 318 319 static void flow_table_copy_flows(struct table_instance *old, 320 struct table_instance *new) 321 { 322 int old_ver; 323 int i; 324 325 old_ver = old->node_ver; 326 new->node_ver = !old_ver; 327 328 /* Insert in new table. */ 329 for (i = 0; i < old->n_buckets; i++) { 330 struct sw_flow *flow; 331 struct hlist_head *head; 332 333 head = flex_array_get(old->buckets, i); 334 335 hlist_for_each_entry(flow, head, hash_node[old_ver]) 336 table_instance_insert(new, flow); 337 } 338 339 old->keep_flows = true; 340 } 341 342 static struct table_instance *table_instance_rehash(struct table_instance *ti, 343 int n_buckets) 344 { 345 struct table_instance *new_ti; 346 347 new_ti = table_instance_alloc(n_buckets); 348 if (!new_ti) 349 return NULL; 350 351 flow_table_copy_flows(ti, new_ti); 352 353 return new_ti; 354 } 355 356 int ovs_flow_tbl_flush(struct flow_table *flow_table) 357 { 358 struct table_instance *old_ti; 359 struct table_instance *new_ti; 360 361 old_ti = ovsl_dereference(flow_table->ti); 362 new_ti = table_instance_alloc(TBL_MIN_BUCKETS); 363 if (!new_ti) 364 return -ENOMEM; 365 366 rcu_assign_pointer(flow_table->ti, new_ti); 367 flow_table->last_rehash = jiffies; 368 flow_table->count = 0; 369 370 table_instance_destroy(old_ti, true); 371 return 0; 372 } 373 374 static u32 flow_hash(const struct sw_flow_key *key, int key_start, 375 int key_end) 376 { 377 u32 *hash_key = (u32 *)((u8 *)key + key_start); 378 int hash_u32s = (key_end - key_start) >> 2; 379 380 /* Make sure number of hash bytes are multiple of u32. */ 381 BUILD_BUG_ON(sizeof(long) % sizeof(u32)); 382 383 return arch_fast_hash2(hash_key, hash_u32s, 0); 384 } 385 386 static int flow_key_start(const struct sw_flow_key *key) 387 { 388 if (key->tun_key.ipv4_dst) 389 return 0; 390 else 391 return rounddown(offsetof(struct sw_flow_key, phy), 392 sizeof(long)); 393 } 394 395 static bool cmp_key(const struct sw_flow_key *key1, 396 const struct sw_flow_key *key2, 397 int key_start, int key_end) 398 { 399 const long *cp1 = (long *)((u8 *)key1 + key_start); 400 const long *cp2 = (long *)((u8 *)key2 + key_start); 401 long diffs = 0; 402 int i; 403 404 for (i = key_start; i < key_end; i += sizeof(long)) 405 diffs |= *cp1++ ^ *cp2++; 406 407 return diffs == 0; 408 } 409 410 static bool flow_cmp_masked_key(const struct sw_flow *flow, 411 const struct sw_flow_key *key, 412 int key_start, int key_end) 413 { 414 return cmp_key(&flow->key, key, key_start, key_end); 415 } 416 417 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 418 struct sw_flow_match *match) 419 { 420 struct sw_flow_key *key = match->key; 421 int key_start = flow_key_start(key); 422 int key_end = match->range.end; 423 424 return cmp_key(&flow->unmasked_key, key, key_start, key_end); 425 } 426 427 static struct sw_flow *masked_flow_lookup(struct table_instance *ti, 428 const struct sw_flow_key *unmasked, 429 struct sw_flow_mask *mask) 430 { 431 struct sw_flow *flow; 432 struct hlist_head *head; 433 int key_start = mask->range.start; 434 int key_end = mask->range.end; 435 u32 hash; 436 struct sw_flow_key masked_key; 437 438 ovs_flow_mask_key(&masked_key, unmasked, mask); 439 hash = flow_hash(&masked_key, key_start, key_end); 440 head = find_bucket(ti, hash); 441 hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) { 442 if (flow->mask == mask && flow->hash == hash && 443 flow_cmp_masked_key(flow, &masked_key, 444 key_start, key_end)) 445 return flow; 446 } 447 return NULL; 448 } 449 450 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, 451 const struct sw_flow_key *key, 452 u32 *n_mask_hit) 453 { 454 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 455 struct sw_flow_mask *mask; 456 struct sw_flow *flow; 457 458 *n_mask_hit = 0; 459 list_for_each_entry_rcu(mask, &tbl->mask_list, list) { 460 (*n_mask_hit)++; 461 flow = masked_flow_lookup(ti, key, mask); 462 if (flow) /* Found */ 463 return flow; 464 } 465 return NULL; 466 } 467 468 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, 469 const struct sw_flow_key *key) 470 { 471 u32 __always_unused n_mask_hit; 472 473 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); 474 } 475 476 int ovs_flow_tbl_num_masks(const struct flow_table *table) 477 { 478 struct sw_flow_mask *mask; 479 int num = 0; 480 481 list_for_each_entry(mask, &table->mask_list, list) 482 num++; 483 484 return num; 485 } 486 487 static struct table_instance *table_instance_expand(struct table_instance *ti) 488 { 489 return table_instance_rehash(ti, ti->n_buckets * 2); 490 } 491 492 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 493 { 494 struct table_instance *ti = ovsl_dereference(table->ti); 495 496 BUG_ON(table->count == 0); 497 hlist_del_rcu(&flow->hash_node[ti->node_ver]); 498 table->count--; 499 } 500 501 static struct sw_flow_mask *mask_alloc(void) 502 { 503 struct sw_flow_mask *mask; 504 505 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 506 if (mask) 507 mask->ref_count = 0; 508 509 return mask; 510 } 511 512 static void mask_add_ref(struct sw_flow_mask *mask) 513 { 514 mask->ref_count++; 515 } 516 517 static bool mask_equal(const struct sw_flow_mask *a, 518 const struct sw_flow_mask *b) 519 { 520 u8 *a_ = (u8 *)&a->key + a->range.start; 521 u8 *b_ = (u8 *)&b->key + b->range.start; 522 523 return (a->range.end == b->range.end) 524 && (a->range.start == b->range.start) 525 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); 526 } 527 528 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, 529 const struct sw_flow_mask *mask) 530 { 531 struct list_head *ml; 532 533 list_for_each(ml, &tbl->mask_list) { 534 struct sw_flow_mask *m; 535 m = container_of(ml, struct sw_flow_mask, list); 536 if (mask_equal(mask, m)) 537 return m; 538 } 539 540 return NULL; 541 } 542 543 /* Add 'mask' into the mask list, if it is not already there. */ 544 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, 545 struct sw_flow_mask *new) 546 { 547 struct sw_flow_mask *mask; 548 mask = flow_mask_find(tbl, new); 549 if (!mask) { 550 /* Allocate a new mask if none exsits. */ 551 mask = mask_alloc(); 552 if (!mask) 553 return -ENOMEM; 554 mask->key = new->key; 555 mask->range = new->range; 556 list_add_rcu(&mask->list, &tbl->mask_list); 557 } 558 559 mask_add_ref(mask); 560 flow->mask = mask; 561 return 0; 562 } 563 564 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 565 struct sw_flow_mask *mask) 566 { 567 struct table_instance *new_ti = NULL; 568 struct table_instance *ti; 569 int err; 570 571 err = flow_mask_insert(table, flow, mask); 572 if (err) 573 return err; 574 575 flow->hash = flow_hash(&flow->key, flow->mask->range.start, 576 flow->mask->range.end); 577 ti = ovsl_dereference(table->ti); 578 table_instance_insert(ti, flow); 579 table->count++; 580 581 /* Expand table, if necessary, to make room. */ 582 if (table->count > ti->n_buckets) 583 new_ti = table_instance_expand(ti); 584 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) 585 new_ti = table_instance_rehash(ti, ti->n_buckets); 586 587 if (new_ti) { 588 rcu_assign_pointer(table->ti, new_ti); 589 table_instance_destroy(ti, true); 590 table->last_rehash = jiffies; 591 } 592 return 0; 593 } 594 595 /* Initializes the flow module. 596 * Returns zero if successful or a negative error code. */ 597 int ovs_flow_init(void) 598 { 599 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); 600 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 601 602 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 603 0, NULL); 604 if (flow_cache == NULL) 605 return -ENOMEM; 606 607 return 0; 608 } 609 610 /* Uninitializes the flow module. */ 611 void ovs_flow_exit(void) 612 { 613 kmem_cache_destroy(flow_cache); 614 } 615