1 /* 2 * Copyright (c) 2007-2012 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #include <linux/dcache.h> 20 #include <linux/etherdevice.h> 21 #include <linux/if.h> 22 #include <linux/if_vlan.h> 23 #include <linux/kernel.h> 24 #include <linux/list.h> 25 #include <linux/mutex.h> 26 #include <linux/percpu.h> 27 #include <linux/rcupdate.h> 28 #include <linux/rtnetlink.h> 29 #include <linux/compat.h> 30 31 #include "vport.h" 32 #include "vport-internal_dev.h" 33 34 /* List of statically compiled vport implementations. Don't forget to also 35 * add yours to the list at the bottom of vport.h. */ 36 static const struct vport_ops *vport_ops_list[] = { 37 &ovs_netdev_vport_ops, 38 &ovs_internal_vport_ops, 39 }; 40 41 /* Protected by RCU read lock for reading, RTNL lock for writing. */ 42 static struct hlist_head *dev_table; 43 #define VPORT_HASH_BUCKETS 1024 44 45 /** 46 * ovs_vport_init - initialize vport subsystem 47 * 48 * Called at module load time to initialize the vport subsystem. 49 */ 50 int ovs_vport_init(void) 51 { 52 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head), 53 GFP_KERNEL); 54 if (!dev_table) 55 return -ENOMEM; 56 57 return 0; 58 } 59 60 /** 61 * ovs_vport_exit - shutdown vport subsystem 62 * 63 * Called at module exit time to shutdown the vport subsystem. 64 */ 65 void ovs_vport_exit(void) 66 { 67 kfree(dev_table); 68 } 69 70 static struct hlist_head *hash_bucket(const char *name) 71 { 72 unsigned int hash = full_name_hash(name, strlen(name)); 73 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; 74 } 75 76 /** 77 * ovs_vport_locate - find a port that has already been created 78 * 79 * @name: name of port to find 80 * 81 * Must be called with RTNL or RCU read lock. 82 */ 83 struct vport *ovs_vport_locate(const char *name) 84 { 85 struct hlist_head *bucket = hash_bucket(name); 86 struct vport *vport; 87 struct hlist_node *node; 88 89 hlist_for_each_entry_rcu(vport, node, bucket, hash_node) 90 if (!strcmp(name, vport->ops->get_name(vport))) 91 return vport; 92 93 return NULL; 94 } 95 96 /** 97 * ovs_vport_alloc - allocate and initialize new vport 98 * 99 * @priv_size: Size of private data area to allocate. 100 * @ops: vport device ops 101 * 102 * Allocate and initialize a new vport defined by @ops. The vport will contain 103 * a private data area of size @priv_size that can be accessed using 104 * vport_priv(). vports that are no longer needed should be released with 105 * vport_free(). 106 */ 107 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, 108 const struct vport_parms *parms) 109 { 110 struct vport *vport; 111 size_t alloc_size; 112 113 alloc_size = sizeof(struct vport); 114 if (priv_size) { 115 alloc_size = ALIGN(alloc_size, VPORT_ALIGN); 116 alloc_size += priv_size; 117 } 118 119 vport = kzalloc(alloc_size, GFP_KERNEL); 120 if (!vport) 121 return ERR_PTR(-ENOMEM); 122 123 vport->dp = parms->dp; 124 vport->port_no = parms->port_no; 125 vport->upcall_pid = parms->upcall_pid; 126 vport->ops = ops; 127 128 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats); 129 if (!vport->percpu_stats) { 130 kfree(vport); 131 return ERR_PTR(-ENOMEM); 132 } 133 134 spin_lock_init(&vport->stats_lock); 135 136 return vport; 137 } 138 139 /** 140 * ovs_vport_free - uninitialize and free vport 141 * 142 * @vport: vport to free 143 * 144 * Frees a vport allocated with vport_alloc() when it is no longer needed. 145 * 146 * The caller must ensure that an RCU grace period has passed since the last 147 * time @vport was in a datapath. 148 */ 149 void ovs_vport_free(struct vport *vport) 150 { 151 free_percpu(vport->percpu_stats); 152 kfree(vport); 153 } 154 155 /** 156 * ovs_vport_add - add vport device (for kernel callers) 157 * 158 * @parms: Information about new vport. 159 * 160 * Creates a new vport with the specified configuration (which is dependent on 161 * device type). RTNL lock must be held. 162 */ 163 struct vport *ovs_vport_add(const struct vport_parms *parms) 164 { 165 struct vport *vport; 166 int err = 0; 167 int i; 168 169 ASSERT_RTNL(); 170 171 for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { 172 if (vport_ops_list[i]->type == parms->type) { 173 vport = vport_ops_list[i]->create(parms); 174 if (IS_ERR(vport)) { 175 err = PTR_ERR(vport); 176 goto out; 177 } 178 179 hlist_add_head_rcu(&vport->hash_node, 180 hash_bucket(vport->ops->get_name(vport))); 181 return vport; 182 } 183 } 184 185 err = -EAFNOSUPPORT; 186 187 out: 188 return ERR_PTR(err); 189 } 190 191 /** 192 * ovs_vport_set_options - modify existing vport device (for kernel callers) 193 * 194 * @vport: vport to modify. 195 * @port: New configuration. 196 * 197 * Modifies an existing device with the specified configuration (which is 198 * dependent on device type). RTNL lock must be held. 199 */ 200 int ovs_vport_set_options(struct vport *vport, struct nlattr *options) 201 { 202 ASSERT_RTNL(); 203 204 if (!vport->ops->set_options) 205 return -EOPNOTSUPP; 206 return vport->ops->set_options(vport, options); 207 } 208 209 /** 210 * ovs_vport_del - delete existing vport device 211 * 212 * @vport: vport to delete. 213 * 214 * Detaches @vport from its datapath and destroys it. It is possible to fail 215 * for reasons such as lack of memory. RTNL lock must be held. 216 */ 217 void ovs_vport_del(struct vport *vport) 218 { 219 ASSERT_RTNL(); 220 221 hlist_del_rcu(&vport->hash_node); 222 223 vport->ops->destroy(vport); 224 } 225 226 /** 227 * ovs_vport_get_stats - retrieve device stats 228 * 229 * @vport: vport from which to retrieve the stats 230 * @stats: location to store stats 231 * 232 * Retrieves transmit, receive, and error stats for the given device. 233 * 234 * Must be called with RTNL lock or rcu_read_lock. 235 */ 236 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) 237 { 238 int i; 239 240 memset(stats, 0, sizeof(*stats)); 241 242 /* We potentially have 2 sources of stats that need to be combined: 243 * those we have collected (split into err_stats and percpu_stats) from 244 * set_stats() and device error stats from netdev->get_stats() (for 245 * errors that happen downstream and therefore aren't reported through 246 * our vport_record_error() function). 247 * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS). 248 * netdev-stats can be directly read over netlink-ioctl. 249 */ 250 251 spin_lock_bh(&vport->stats_lock); 252 253 stats->rx_errors = vport->err_stats.rx_errors; 254 stats->tx_errors = vport->err_stats.tx_errors; 255 stats->tx_dropped = vport->err_stats.tx_dropped; 256 stats->rx_dropped = vport->err_stats.rx_dropped; 257 258 spin_unlock_bh(&vport->stats_lock); 259 260 for_each_possible_cpu(i) { 261 const struct vport_percpu_stats *percpu_stats; 262 struct vport_percpu_stats local_stats; 263 unsigned int start; 264 265 percpu_stats = per_cpu_ptr(vport->percpu_stats, i); 266 267 do { 268 start = u64_stats_fetch_begin_bh(&percpu_stats->sync); 269 local_stats = *percpu_stats; 270 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); 271 272 stats->rx_bytes += local_stats.rx_bytes; 273 stats->rx_packets += local_stats.rx_packets; 274 stats->tx_bytes += local_stats.tx_bytes; 275 stats->tx_packets += local_stats.tx_packets; 276 } 277 } 278 279 /** 280 * ovs_vport_get_options - retrieve device options 281 * 282 * @vport: vport from which to retrieve the options. 283 * @skb: sk_buff where options should be appended. 284 * 285 * Retrieves the configuration of the given device, appending an 286 * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested 287 * vport-specific attributes to @skb. 288 * 289 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another 290 * negative error code if a real error occurred. If an error occurs, @skb is 291 * left unmodified. 292 * 293 * Must be called with RTNL lock or rcu_read_lock. 294 */ 295 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) 296 { 297 struct nlattr *nla; 298 299 nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS); 300 if (!nla) 301 return -EMSGSIZE; 302 303 if (vport->ops->get_options) { 304 int err = vport->ops->get_options(vport, skb); 305 if (err) { 306 nla_nest_cancel(skb, nla); 307 return err; 308 } 309 } 310 311 nla_nest_end(skb, nla); 312 return 0; 313 } 314 315 /** 316 * ovs_vport_receive - pass up received packet to the datapath for processing 317 * 318 * @vport: vport that received the packet 319 * @skb: skb that was received 320 * 321 * Must be called with rcu_read_lock. The packet cannot be shared and 322 * skb->data should point to the Ethernet header. The caller must have already 323 * called compute_ip_summed() to initialize the checksumming fields. 324 */ 325 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) 326 { 327 struct vport_percpu_stats *stats; 328 329 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); 330 331 u64_stats_update_begin(&stats->sync); 332 stats->rx_packets++; 333 stats->rx_bytes += skb->len; 334 u64_stats_update_end(&stats->sync); 335 336 ovs_dp_process_received_packet(vport, skb); 337 } 338 339 /** 340 * ovs_vport_send - send a packet on a device 341 * 342 * @vport: vport on which to send the packet 343 * @skb: skb to send 344 * 345 * Sends the given packet and returns the length of data sent. Either RTNL 346 * lock or rcu_read_lock must be held. 347 */ 348 int ovs_vport_send(struct vport *vport, struct sk_buff *skb) 349 { 350 int sent = vport->ops->send(vport, skb); 351 352 if (likely(sent)) { 353 struct vport_percpu_stats *stats; 354 355 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); 356 357 u64_stats_update_begin(&stats->sync); 358 stats->tx_packets++; 359 stats->tx_bytes += sent; 360 u64_stats_update_end(&stats->sync); 361 } 362 return sent; 363 } 364 365 /** 366 * ovs_vport_record_error - indicate device error to generic stats layer 367 * 368 * @vport: vport that encountered the error 369 * @err_type: one of enum vport_err_type types to indicate the error type 370 * 371 * If using the vport generic stats layer indicate that an error of the given 372 * type has occured. 373 */ 374 void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type) 375 { 376 spin_lock(&vport->stats_lock); 377 378 switch (err_type) { 379 case VPORT_E_RX_DROPPED: 380 vport->err_stats.rx_dropped++; 381 break; 382 383 case VPORT_E_RX_ERROR: 384 vport->err_stats.rx_errors++; 385 break; 386 387 case VPORT_E_TX_DROPPED: 388 vport->err_stats.tx_dropped++; 389 break; 390 391 case VPORT_E_TX_ERROR: 392 vport->err_stats.tx_errors++; 393 break; 394 }; 395 396 spin_unlock(&vport->stats_lock); 397 } 398