flow.c (23dabf88abb48a866fdb19ee08ebcf1ddd9b1840) | flow.c (63e7959c4b9bd6f791061c460a22d9ee32ae2240) |
---|---|
1/* 2 * Copyright (c) 2007-2013 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but --- 51 unchanged lines hidden (view full) --- 60} 61 62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 63 64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 65{ 66 struct flow_stats *stats; 67 __be16 tcp_flags = 0; | 1/* 2 * Copyright (c) 2007-2013 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but --- 51 unchanged lines hidden (view full) --- 60} 61 62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 63 64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 65{ 66 struct flow_stats *stats; 67 __be16 tcp_flags = 0; |
68 int node = numa_node_id(); |
|
68 | 69 |
69 stats = this_cpu_ptr(flow->stats); | 70 stats = rcu_dereference(flow->stats[node]); |
70 71 if ((flow->key.eth.type == htons(ETH_P_IP) || 72 flow->key.eth.type == htons(ETH_P_IPV6)) && 73 flow->key.ip.frag != OVS_FRAG_TYPE_LATER && 74 flow->key.ip.proto == IPPROTO_TCP && 75 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { 76 tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); 77 } 78 | 71 72 if ((flow->key.eth.type == htons(ETH_P_IP) || 73 flow->key.eth.type == htons(ETH_P_IPV6)) && 74 flow->key.ip.frag != OVS_FRAG_TYPE_LATER && 75 flow->key.ip.proto == IPPROTO_TCP && 76 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { 77 tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); 78 } 79 |
79 spin_lock(&stats->lock); | 80 /* Check if already have node-specific stats. */ 81 if (likely(stats)) { 82 spin_lock(&stats->lock); 83 /* Mark if we write on the pre-allocated stats. */ 84 if (node == 0 && unlikely(flow->stats_last_writer != node)) 85 flow->stats_last_writer = node; 86 } else { 87 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */ 88 spin_lock(&stats->lock); 89 90 /* If the current NUMA-node is the only writer on the 91 * pre-allocated stats keep using them. 92 */ 93 if (unlikely(flow->stats_last_writer != node)) { 94 /* A previous locker may have already allocated the 95 * stats, so we need to check again. If node-specific 96 * stats were already allocated, we update the pre- 97 * allocated stats as we have already locked them. 98 */ 99 if (likely(flow->stats_last_writer != NUMA_NO_NODE) 100 && likely(!rcu_dereference(flow->stats[node]))) { 101 /* Try to allocate node-specific stats. */ 102 struct flow_stats *new_stats; 103 104 new_stats = 105 kmem_cache_alloc_node(flow_stats_cache, 106 GFP_THISNODE | 107 __GFP_NOMEMALLOC, 108 node); 109 if (likely(new_stats)) { 110 new_stats->used = jiffies; 111 new_stats->packet_count = 1; 112 new_stats->byte_count = skb->len; 113 new_stats->tcp_flags = tcp_flags; 114 spin_lock_init(&new_stats->lock); 115 116 rcu_assign_pointer(flow->stats[node], 117 new_stats); 118 goto unlock; 119 } 120 } 121 flow->stats_last_writer = node; 122 } 123 } 124 |
80 stats->used = jiffies; 81 stats->packet_count++; 82 stats->byte_count += skb->len; 83 stats->tcp_flags |= tcp_flags; | 125 stats->used = jiffies; 126 stats->packet_count++; 127 stats->byte_count += skb->len; 128 stats->tcp_flags |= tcp_flags; |
129unlock: |
|
84 spin_unlock(&stats->lock); 85} 86 | 130 spin_unlock(&stats->lock); 131} 132 |
87static void stats_read(struct flow_stats *stats, 88 struct ovs_flow_stats *ovs_stats, 89 unsigned long *used, __be16 *tcp_flags) 90{ 91 spin_lock(&stats->lock); 92 if (!*used || time_after(stats->used, *used)) 93 *used = stats->used; 94 *tcp_flags |= stats->tcp_flags; 95 ovs_stats->n_packets += stats->packet_count; 96 ovs_stats->n_bytes += stats->byte_count; 97 spin_unlock(&stats->lock); 98} 99 | |
100void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, 101 unsigned long *used, __be16 *tcp_flags) 102{ | 133void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, 134 unsigned long *used, __be16 *tcp_flags) 135{ |
103 int cpu; | 136 int node; |
104 105 *used = 0; 106 *tcp_flags = 0; 107 memset(ovs_stats, 0, sizeof(*ovs_stats)); 108 | 137 138 *used = 0; 139 *tcp_flags = 0; 140 memset(ovs_stats, 0, sizeof(*ovs_stats)); 141 |
109 local_bh_disable(); | 142 for_each_node(node) { 143 struct flow_stats *stats = rcu_dereference(flow->stats[node]); |
110 | 144 |
111 for_each_possible_cpu(cpu) { 112 struct flow_stats *stats; 113 114 stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); 115 stats_read(stats, ovs_stats, used, tcp_flags); | 145 if (stats) { 146 /* Local CPU may write on non-local stats, so we must 147 * block bottom-halves here. 148 */ 149 spin_lock_bh(&stats->lock); 150 if (!*used || time_after(stats->used, *used)) 151 *used = stats->used; 152 *tcp_flags |= stats->tcp_flags; 153 ovs_stats->n_packets += stats->packet_count; 154 ovs_stats->n_bytes += stats->byte_count; 155 spin_unlock_bh(&stats->lock); 156 } |
116 } | 157 } |
117 118 local_bh_enable(); | |
119} 120 | 158} 159 |
121static void stats_reset(struct flow_stats *stats) 122{ 123 spin_lock(&stats->lock); 124 stats->used = 0; 125 stats->packet_count = 0; 126 stats->byte_count = 0; 127 stats->tcp_flags = 0; 128 spin_unlock(&stats->lock); 129} 130 | |
131void ovs_flow_stats_clear(struct sw_flow *flow) 132{ | 160void ovs_flow_stats_clear(struct sw_flow *flow) 161{ |
133 int cpu; | 162 int node; |
134 | 163 |
135 local_bh_disable(); | 164 for_each_node(node) { 165 struct flow_stats *stats = rcu_dereference(flow->stats[node]); |
136 | 166 |
137 for_each_possible_cpu(cpu) 138 stats_reset(per_cpu_ptr(flow->stats, cpu)); 139 140 local_bh_enable(); | 167 if (stats) { 168 spin_lock_bh(&stats->lock); 169 stats->used = 0; 170 stats->packet_count = 0; 171 stats->byte_count = 0; 172 stats->tcp_flags = 0; 173 spin_unlock_bh(&stats->lock); 174 } 175 } |
141} 142 143static int check_header(struct sk_buff *skb, int len) 144{ 145 if (unlikely(skb->len < len)) 146 return -EINVAL; 147 if (unlikely(!pskb_may_pull(skb, len))) 148 return -ENOMEM; --- 437 unchanged lines hidden --- | 176} 177 178static int check_header(struct sk_buff *skb, int len) 179{ 180 if (unlikely(skb->len < len)) 181 return -EINVAL; 182 if (unlikely(!pskb_may_pull(skb, len))) 183 return -ENOMEM; --- 437 unchanged lines hidden --- |