flow_table.c (23dabf88abb48a866fdb19ee08ebcf1ddd9b1840) flow_table.c (63e7959c4b9bd6f791061c460a22d9ee32ae2240)
1/*
2 * Copyright (c) 2007-2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but

--- 34 unchanged lines hidden (view full) ---

43#include <net/ip.h>
44#include <net/ipv6.h>
45#include <net/ndisc.h>
46
47#define TBL_MIN_BUCKETS 1024
48#define REHASH_INTERVAL (10 * 60 * HZ)
49
50static struct kmem_cache *flow_cache;
1/*
2 * Copyright (c) 2007-2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but

--- 34 unchanged lines hidden (view full) ---

43#include <net/ip.h>
44#include <net/ipv6.h>
45#include <net/ndisc.h>
46
47#define TBL_MIN_BUCKETS 1024
48#define REHASH_INTERVAL (10 * 60 * HZ)
49
50static struct kmem_cache *flow_cache;
51struct kmem_cache *flow_stats_cache __read_mostly;
51
52static u16 range_n_bytes(const struct sw_flow_key_range *range)
53{
54 return range->end - range->start;
55}
56
57void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
58 const struct sw_flow_mask *mask)

--- 11 unchanged lines hidden (view full) ---

70 */
71 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
72 *d++ = *s++ & *m++;
73}
74
75struct sw_flow *ovs_flow_alloc(void)
76{
77 struct sw_flow *flow;
52
53static u16 range_n_bytes(const struct sw_flow_key_range *range)
54{
55 return range->end - range->start;
56}
57
58void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
59 const struct sw_flow_mask *mask)

--- 11 unchanged lines hidden (view full) ---

71 */
72 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
73 *d++ = *s++ & *m++;
74}
75
76struct sw_flow *ovs_flow_alloc(void)
77{
78 struct sw_flow *flow;
78 int cpu;
79 struct flow_stats *stats;
80 int node;
79
80 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
81 if (!flow)
82 return ERR_PTR(-ENOMEM);
83
84 flow->sf_acts = NULL;
85 flow->mask = NULL;
81
82 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
83 if (!flow)
84 return ERR_PTR(-ENOMEM);
85
86 flow->sf_acts = NULL;
87 flow->mask = NULL;
88 flow->stats_last_writer = NUMA_NO_NODE;
86
89
87 flow->stats = alloc_percpu(struct flow_stats);
88 if (!flow->stats)
90 /* Initialize the default stat node. */
91 stats = kmem_cache_alloc_node(flow_stats_cache,
92 GFP_KERNEL | __GFP_ZERO, 0);
93 if (!stats)
89 goto err;
90
94 goto err;
95
91 for_each_possible_cpu(cpu) {
92 struct flow_stats *cpu_stats;
96 spin_lock_init(&stats->lock);
93
97
94 cpu_stats = per_cpu_ptr(flow->stats, cpu);
95 spin_lock_init(&cpu_stats->lock);
96 }
98 RCU_INIT_POINTER(flow->stats[0], stats);
99
100 for_each_node(node)
101 if (node != 0)
102 RCU_INIT_POINTER(flow->stats[node], NULL);
103
97 return flow;
98err:
99 kmem_cache_free(flow_cache, flow);
100 return ERR_PTR(-ENOMEM);
101}
102
103int ovs_flow_tbl_count(struct flow_table *table)
104{

--- 20 unchanged lines hidden (view full) ---

125 INIT_HLIST_HEAD((struct hlist_head *)
126 flex_array_get(buckets, i));
127
128 return buckets;
129}
130
131static void flow_free(struct sw_flow *flow)
132{
104 return flow;
105err:
106 kmem_cache_free(flow_cache, flow);
107 return ERR_PTR(-ENOMEM);
108}
109
110int ovs_flow_tbl_count(struct flow_table *table)
111{

--- 20 unchanged lines hidden (view full) ---

132 INIT_HLIST_HEAD((struct hlist_head *)
133 flex_array_get(buckets, i));
134
135 return buckets;
136}
137
138static void flow_free(struct sw_flow *flow)
139{
140 int node;
141
133 kfree((struct sf_flow_acts __force *)flow->sf_acts);
142 kfree((struct sf_flow_acts __force *)flow->sf_acts);
134 free_percpu(flow->stats);
143 for_each_node(node)
144 if (flow->stats[node])
145 kmem_cache_free(flow_stats_cache,
146 (struct flow_stats __force *)flow->stats[node]);
135 kmem_cache_free(flow_cache, flow);
136}
137
138static void rcu_free_flow_callback(struct rcu_head *rcu)
139{
140 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
141
142 flow_free(flow);

--- 438 unchanged lines hidden (view full) ---

581
582/* Initializes the flow module.
583 * Returns zero if successful or a negative error code. */
584int ovs_flow_init(void)
585{
586 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
587 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
588
147 kmem_cache_free(flow_cache, flow);
148}
149
150static void rcu_free_flow_callback(struct rcu_head *rcu)
151{
152 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
153
154 flow_free(flow);

--- 438 unchanged lines hidden (view full) ---

593
594/* Initializes the flow module.
595 * Returns zero if successful or a negative error code. */
596int ovs_flow_init(void)
597{
598 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
599 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
600
589 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
590 0, NULL);
601 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
602 + (num_possible_nodes()
603 * sizeof(struct flow_stats *)),
604 0, 0, NULL);
591 if (flow_cache == NULL)
592 return -ENOMEM;
593
605 if (flow_cache == NULL)
606 return -ENOMEM;
607
608 flow_stats_cache
609 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
610 0, SLAB_HWCACHE_ALIGN, NULL);
611 if (flow_stats_cache == NULL) {
612 kmem_cache_destroy(flow_cache);
613 flow_cache = NULL;
614 return -ENOMEM;
615 }
616
594 return 0;
595}
596
597/* Uninitializes the flow module. */
598void ovs_flow_exit(void)
599{
617 return 0;
618}
619
620/* Uninitializes the flow module. */
621void ovs_flow_exit(void)
622{
623 kmem_cache_destroy(flow_stats_cache);
600 kmem_cache_destroy(flow_cache);
601}
624 kmem_cache_destroy(flow_cache);
625}