flow_table.c (b637e4988c2d689bb43f943a5af0e684a4981159) flow_table.c (618ed0c805b64c820279f50732110ab873221c3b)
1/*
2 * Copyright (c) 2007-2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but

--- 114 unchanged lines hidden (view full) ---

123
124static void rcu_free_flow_callback(struct rcu_head *rcu)
125{
126 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
127
128 flow_free(flow);
129}
130
1/*
2 * Copyright (c) 2007-2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but

--- 114 unchanged lines hidden (view full) ---

123
124static void rcu_free_flow_callback(struct rcu_head *rcu)
125{
126 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
127
128 flow_free(flow);
129}
130
131static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
132{
133 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
134
135 kfree(mask);
136}
137
138static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
139{
140 if (!mask)
141 return;
142
143 BUG_ON(!mask->ref_count);
144 mask->ref_count--;
145
146 if (!mask->ref_count) {
147 list_del_rcu(&mask->list);
148 if (deferred)
149 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
150 else
151 kfree(mask);
152 }
153}
154
131void ovs_flow_free(struct sw_flow *flow, bool deferred)
132{
133 if (!flow)
134 return;
135
155void ovs_flow_free(struct sw_flow *flow, bool deferred)
156{
157 if (!flow)
158 return;
159
136 ovs_sw_flow_mask_del_ref(flow->mask, deferred);
160 flow_mask_del_ref(flow->mask, deferred);
137
138 if (deferred)
139 call_rcu(&flow->rcu, rcu_free_flow_callback);
140 else
141 flow_free(flow);
142}
143
144static void free_buckets(struct flex_array *buckets)

--- 75 unchanged lines hidden (view full) ---

220 return;
221
222 if (deferred)
223 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
224 else
225 __table_instance_destroy(ti);
226}
227
161
162 if (deferred)
163 call_rcu(&flow->rcu, rcu_free_flow_callback);
164 else
165 flow_free(flow);
166}
167
168static void free_buckets(struct flex_array *buckets)

--- 75 unchanged lines hidden (view full) ---

244 return;
245
246 if (deferred)
247 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
248 else
249 __table_instance_destroy(ti);
250}
251
228void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
252void ovs_flow_tbl_destroy(struct flow_table *table)
229{
230 struct table_instance *ti = ovsl_dereference(table->ti);
231
253{
254 struct table_instance *ti = ovsl_dereference(table->ti);
255
232 table_instance_destroy(ti, deferred);
256 table_instance_destroy(ti, false);
233}
234
235struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
236 u32 *bucket, u32 *last)
237{
238 struct sw_flow *flow;
239 struct hlist_head *head;
240 int ver;

--- 58 unchanged lines hidden (view full) ---

299
300static struct table_instance *table_instance_rehash(struct table_instance *ti,
301 int n_buckets)
302{
303 struct table_instance *new_ti;
304
305 new_ti = table_instance_alloc(n_buckets);
306 if (!new_ti)
257}
258
259struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
260 u32 *bucket, u32 *last)
261{
262 struct sw_flow *flow;
263 struct hlist_head *head;
264 int ver;

--- 58 unchanged lines hidden (view full) ---

323
324static struct table_instance *table_instance_rehash(struct table_instance *ti,
325 int n_buckets)
326{
327 struct table_instance *new_ti;
328
329 new_ti = table_instance_alloc(n_buckets);
330 if (!new_ti)
307 return ERR_PTR(-ENOMEM);
331 return NULL;
308
309 flow_table_copy_flows(ti, new_ti);
310
311 return new_ti;
312}
313
314int ovs_flow_tbl_flush(struct flow_table *flow_table)
315{

--- 104 unchanged lines hidden (view full) ---

420 return NULL;
421}
422
423static struct table_instance *table_instance_expand(struct table_instance *ti)
424{
425 return table_instance_rehash(ti, ti->n_buckets * 2);
426}
427
332
333 flow_table_copy_flows(ti, new_ti);
334
335 return new_ti;
336}
337
338int ovs_flow_tbl_flush(struct flow_table *flow_table)
339{

--- 104 unchanged lines hidden (view full) ---

444 return NULL;
445}
446
447static struct table_instance *table_instance_expand(struct table_instance *ti)
448{
449 return table_instance_rehash(ti, ti->n_buckets * 2);
450}
451
428void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
429{
430 struct table_instance *ti = NULL;
431 struct table_instance *new_ti = NULL;
432
433 ti = ovsl_dereference(table->ti);
434
435 /* Expand table, if necessary, to make room. */
436 if (table->count > ti->n_buckets)
437 new_ti = table_instance_expand(ti);
438 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
439 new_ti = table_instance_rehash(ti, ti->n_buckets);
440
441 if (new_ti && !IS_ERR(new_ti)) {
442 rcu_assign_pointer(table->ti, new_ti);
443 ovs_flow_tbl_destroy(table, true);
444 ti = ovsl_dereference(table->ti);
445 table->last_rehash = jiffies;
446 }
447
448 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
449 flow->mask->range.end);
450 table_instance_insert(ti, flow);
451 table->count++;
452}
453
454void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
455{
456 struct table_instance *ti = ovsl_dereference(table->ti);
457
458 BUG_ON(table->count == 0);
459 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
460 table->count--;
461}
462
452void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
453{
454 struct table_instance *ti = ovsl_dereference(table->ti);
455
456 BUG_ON(table->count == 0);
457 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
458 table->count--;
459}
460
463struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
461static struct sw_flow_mask *mask_alloc(void)
464{
465 struct sw_flow_mask *mask;
466
467 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
468 if (mask)
469 mask->ref_count = 0;
470
471 return mask;
472}
473
462{
463 struct sw_flow_mask *mask;
464
465 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
466 if (mask)
467 mask->ref_count = 0;
468
469 return mask;
470}
471
474void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
472static void mask_add_ref(struct sw_flow_mask *mask)
475{
476 mask->ref_count++;
477}
478
473{
474 mask->ref_count++;
475}
476
479static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
480{
481 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
482
483 kfree(mask);
484}
485
486void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
487{
488 if (!mask)
489 return;
490
491 BUG_ON(!mask->ref_count);
492 mask->ref_count--;
493
494 if (!mask->ref_count) {
495 list_del_rcu(&mask->list);
496 if (deferred)
497 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
498 else
499 kfree(mask);
500 }
501}
502
503static bool mask_equal(const struct sw_flow_mask *a,
504 const struct sw_flow_mask *b)
505{
506 u8 *a_ = (u8 *)&a->key + a->range.start;
507 u8 *b_ = (u8 *)&b->key + b->range.start;
508
509 return (a->range.end == b->range.end)
510 && (a->range.start == b->range.start)
511 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
512}
513
477static bool mask_equal(const struct sw_flow_mask *a,
478 const struct sw_flow_mask *b)
479{
480 u8 *a_ = (u8 *)&a->key + a->range.start;
481 u8 *b_ = (u8 *)&b->key + b->range.start;
482
483 return (a->range.end == b->range.end)
484 && (a->range.start == b->range.start)
485 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
486}
487
514struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
488static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
515 const struct sw_flow_mask *mask)
516{
517 struct list_head *ml;
518
519 list_for_each(ml, &tbl->mask_list) {
520 struct sw_flow_mask *m;
521 m = container_of(ml, struct sw_flow_mask, list);
522 if (mask_equal(mask, m))
523 return m;
524 }
525
526 return NULL;
527}
528
529/**
530 * add a new mask into the mask list.
531 * The caller needs to make sure that 'mask' is not the same
532 * as any masks that are already on the list.
533 */
489 const struct sw_flow_mask *mask)
490{
491 struct list_head *ml;
492
493 list_for_each(ml, &tbl->mask_list) {
494 struct sw_flow_mask *m;
495 m = container_of(ml, struct sw_flow_mask, list);
496 if (mask_equal(mask, m))
497 return m;
498 }
499
500 return NULL;
501}
502
503/**
504 * add a new mask into the mask list.
505 * The caller needs to make sure that 'mask' is not the same
506 * as any masks that are already on the list.
507 */
534void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
508static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
509 struct sw_flow_mask *new)
535{
510{
536 list_add_rcu(&mask->list, &tbl->mask_list);
511 struct sw_flow_mask *mask;
512 mask = flow_mask_find(tbl, new);
513 if (!mask) {
514 /* Allocate a new mask if none exsits. */
515 mask = mask_alloc();
516 if (!mask)
517 return -ENOMEM;
518 mask->key = new->key;
519 mask->range = new->range;
520 list_add_rcu(&mask->list, &tbl->mask_list);
521 }
522
523 mask_add_ref(mask);
524 flow->mask = mask;
525 return 0;
537}
538
526}
527
528int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
529 struct sw_flow_mask *mask)
530{
531 struct table_instance *new_ti = NULL;
532 struct table_instance *ti;
533 int err;
534
535 err = flow_mask_insert(table, flow, mask);
536 if (err)
537 return err;
538
539 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
540 flow->mask->range.end);
541 ti = ovsl_dereference(table->ti);
542 table_instance_insert(ti, flow);
543 table->count++;
544
545 /* Expand table, if necessary, to make room. */
546 if (table->count > ti->n_buckets)
547 new_ti = table_instance_expand(ti);
548 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
549 new_ti = table_instance_rehash(ti, ti->n_buckets);
550
551 if (new_ti) {
552 rcu_assign_pointer(table->ti, new_ti);
553 table_instance_destroy(ti, true);
554 table->last_rehash = jiffies;
555 }
556 return 0;
557}
558
539/* Initializes the flow module.
540 * Returns zero if successful or a negative error code. */
541int ovs_flow_init(void)
542{
543 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
544 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
545
546 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,

--- 12 unchanged lines hidden ---
559/* Initializes the flow module.
560 * Returns zero if successful or a negative error code. */
561int ovs_flow_init(void)
562{
563 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
564 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
565
566 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,

--- 12 unchanged lines hidden ---