xref: /openbmc/linux/net/openvswitch/vport.c (revision 97da55fc)
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #include <linux/etherdevice.h>
20 #include <linux/if.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jhash.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/compat.h>
30 #include <net/net_namespace.h>
31 
32 #include "datapath.h"
33 #include "vport.h"
34 #include "vport-internal_dev.h"
35 
36 /* List of statically compiled vport implementations.  Don't forget to also
37  * add yours to the list at the bottom of vport.h. */
38 static const struct vport_ops *vport_ops_list[] = {
39 	&ovs_netdev_vport_ops,
40 	&ovs_internal_vport_ops,
41 };
42 
43 /* Protected by RCU read lock for reading, RTNL lock for writing. */
44 static struct hlist_head *dev_table;
45 #define VPORT_HASH_BUCKETS 1024
46 
47 /**
48  *	ovs_vport_init - initialize vport subsystem
49  *
50  * Called at module load time to initialize the vport subsystem.
51  */
52 int ovs_vport_init(void)
53 {
54 	dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
55 			    GFP_KERNEL);
56 	if (!dev_table)
57 		return -ENOMEM;
58 
59 	return 0;
60 }
61 
62 /**
63  *	ovs_vport_exit - shutdown vport subsystem
64  *
65  * Called at module exit time to shutdown the vport subsystem.
66  */
67 void ovs_vport_exit(void)
68 {
69 	kfree(dev_table);
70 }
71 
72 static struct hlist_head *hash_bucket(struct net *net, const char *name)
73 {
74 	unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
75 	return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
76 }
77 
78 /**
79  *	ovs_vport_locate - find a port that has already been created
80  *
81  * @name: name of port to find
82  *
83  * Must be called with RTNL or RCU read lock.
84  */
85 struct vport *ovs_vport_locate(struct net *net, const char *name)
86 {
87 	struct hlist_head *bucket = hash_bucket(net, name);
88 	struct vport *vport;
89 
90 	hlist_for_each_entry_rcu(vport, bucket, hash_node)
91 		if (!strcmp(name, vport->ops->get_name(vport)) &&
92 		    net_eq(ovs_dp_get_net(vport->dp), net))
93 			return vport;
94 
95 	return NULL;
96 }
97 
98 /**
99  *	ovs_vport_alloc - allocate and initialize new vport
100  *
101  * @priv_size: Size of private data area to allocate.
102  * @ops: vport device ops
103  *
104  * Allocate and initialize a new vport defined by @ops.  The vport will contain
105  * a private data area of size @priv_size that can be accessed using
106  * vport_priv().  vports that are no longer needed should be released with
107  * vport_free().
108  */
109 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
110 			  const struct vport_parms *parms)
111 {
112 	struct vport *vport;
113 	size_t alloc_size;
114 
115 	alloc_size = sizeof(struct vport);
116 	if (priv_size) {
117 		alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
118 		alloc_size += priv_size;
119 	}
120 
121 	vport = kzalloc(alloc_size, GFP_KERNEL);
122 	if (!vport)
123 		return ERR_PTR(-ENOMEM);
124 
125 	vport->dp = parms->dp;
126 	vport->port_no = parms->port_no;
127 	vport->upcall_portid = parms->upcall_portid;
128 	vport->ops = ops;
129 	INIT_HLIST_NODE(&vport->dp_hash_node);
130 
131 	vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
132 	if (!vport->percpu_stats) {
133 		kfree(vport);
134 		return ERR_PTR(-ENOMEM);
135 	}
136 
137 	spin_lock_init(&vport->stats_lock);
138 
139 	return vport;
140 }
141 
142 /**
143  *	ovs_vport_free - uninitialize and free vport
144  *
145  * @vport: vport to free
146  *
147  * Frees a vport allocated with vport_alloc() when it is no longer needed.
148  *
149  * The caller must ensure that an RCU grace period has passed since the last
150  * time @vport was in a datapath.
151  */
152 void ovs_vport_free(struct vport *vport)
153 {
154 	free_percpu(vport->percpu_stats);
155 	kfree(vport);
156 }
157 
158 /**
159  *	ovs_vport_add - add vport device (for kernel callers)
160  *
161  * @parms: Information about new vport.
162  *
163  * Creates a new vport with the specified configuration (which is dependent on
164  * device type).  RTNL lock must be held.
165  */
166 struct vport *ovs_vport_add(const struct vport_parms *parms)
167 {
168 	struct vport *vport;
169 	int err = 0;
170 	int i;
171 
172 	ASSERT_RTNL();
173 
174 	for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
175 		if (vport_ops_list[i]->type == parms->type) {
176 			struct hlist_head *bucket;
177 
178 			vport = vport_ops_list[i]->create(parms);
179 			if (IS_ERR(vport)) {
180 				err = PTR_ERR(vport);
181 				goto out;
182 			}
183 
184 			bucket = hash_bucket(ovs_dp_get_net(vport->dp),
185 					     vport->ops->get_name(vport));
186 			hlist_add_head_rcu(&vport->hash_node, bucket);
187 			return vport;
188 		}
189 	}
190 
191 	err = -EAFNOSUPPORT;
192 
193 out:
194 	return ERR_PTR(err);
195 }
196 
197 /**
198  *	ovs_vport_set_options - modify existing vport device (for kernel callers)
199  *
200  * @vport: vport to modify.
201  * @port: New configuration.
202  *
203  * Modifies an existing device with the specified configuration (which is
204  * dependent on device type).  RTNL lock must be held.
205  */
206 int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
207 {
208 	ASSERT_RTNL();
209 
210 	if (!vport->ops->set_options)
211 		return -EOPNOTSUPP;
212 	return vport->ops->set_options(vport, options);
213 }
214 
215 /**
216  *	ovs_vport_del - delete existing vport device
217  *
218  * @vport: vport to delete.
219  *
220  * Detaches @vport from its datapath and destroys it.  It is possible to fail
221  * for reasons such as lack of memory.  RTNL lock must be held.
222  */
223 void ovs_vport_del(struct vport *vport)
224 {
225 	ASSERT_RTNL();
226 
227 	hlist_del_rcu(&vport->hash_node);
228 
229 	vport->ops->destroy(vport);
230 }
231 
232 /**
233  *	ovs_vport_get_stats - retrieve device stats
234  *
235  * @vport: vport from which to retrieve the stats
236  * @stats: location to store stats
237  *
238  * Retrieves transmit, receive, and error stats for the given device.
239  *
240  * Must be called with RTNL lock or rcu_read_lock.
241  */
242 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
243 {
244 	int i;
245 
246 	memset(stats, 0, sizeof(*stats));
247 
248 	/* We potentially have 2 sources of stats that need to be combined:
249 	 * those we have collected (split into err_stats and percpu_stats) from
250 	 * set_stats() and device error stats from netdev->get_stats() (for
251 	 * errors that happen  downstream and therefore aren't reported through
252 	 * our vport_record_error() function).
253 	 * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
254 	 * netdev-stats can be directly read over netlink-ioctl.
255 	 */
256 
257 	spin_lock_bh(&vport->stats_lock);
258 
259 	stats->rx_errors	= vport->err_stats.rx_errors;
260 	stats->tx_errors	= vport->err_stats.tx_errors;
261 	stats->tx_dropped	= vport->err_stats.tx_dropped;
262 	stats->rx_dropped	= vport->err_stats.rx_dropped;
263 
264 	spin_unlock_bh(&vport->stats_lock);
265 
266 	for_each_possible_cpu(i) {
267 		const struct vport_percpu_stats *percpu_stats;
268 		struct vport_percpu_stats local_stats;
269 		unsigned int start;
270 
271 		percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
272 
273 		do {
274 			start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
275 			local_stats = *percpu_stats;
276 		} while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
277 
278 		stats->rx_bytes		+= local_stats.rx_bytes;
279 		stats->rx_packets	+= local_stats.rx_packets;
280 		stats->tx_bytes		+= local_stats.tx_bytes;
281 		stats->tx_packets	+= local_stats.tx_packets;
282 	}
283 }
284 
285 /**
286  *	ovs_vport_get_options - retrieve device options
287  *
288  * @vport: vport from which to retrieve the options.
289  * @skb: sk_buff where options should be appended.
290  *
291  * Retrieves the configuration of the given device, appending an
292  * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
293  * vport-specific attributes to @skb.
294  *
295  * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
296  * negative error code if a real error occurred.  If an error occurs, @skb is
297  * left unmodified.
298  *
299  * Must be called with RTNL lock or rcu_read_lock.
300  */
301 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
302 {
303 	struct nlattr *nla;
304 
305 	nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
306 	if (!nla)
307 		return -EMSGSIZE;
308 
309 	if (vport->ops->get_options) {
310 		int err = vport->ops->get_options(vport, skb);
311 		if (err) {
312 			nla_nest_cancel(skb, nla);
313 			return err;
314 		}
315 	}
316 
317 	nla_nest_end(skb, nla);
318 	return 0;
319 }
320 
321 /**
322  *	ovs_vport_receive - pass up received packet to the datapath for processing
323  *
324  * @vport: vport that received the packet
325  * @skb: skb that was received
326  *
327  * Must be called with rcu_read_lock.  The packet cannot be shared and
328  * skb->data should point to the Ethernet header.  The caller must have already
329  * called compute_ip_summed() to initialize the checksumming fields.
330  */
331 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
332 {
333 	struct vport_percpu_stats *stats;
334 
335 	stats = this_cpu_ptr(vport->percpu_stats);
336 	u64_stats_update_begin(&stats->sync);
337 	stats->rx_packets++;
338 	stats->rx_bytes += skb->len;
339 	u64_stats_update_end(&stats->sync);
340 
341 	ovs_dp_process_received_packet(vport, skb);
342 }
343 
344 /**
345  *	ovs_vport_send - send a packet on a device
346  *
347  * @vport: vport on which to send the packet
348  * @skb: skb to send
349  *
350  * Sends the given packet and returns the length of data sent.  Either RTNL
351  * lock or rcu_read_lock must be held.
352  */
353 int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
354 {
355 	int sent = vport->ops->send(vport, skb);
356 
357 	if (likely(sent)) {
358 		struct vport_percpu_stats *stats;
359 
360 		stats = this_cpu_ptr(vport->percpu_stats);
361 
362 		u64_stats_update_begin(&stats->sync);
363 		stats->tx_packets++;
364 		stats->tx_bytes += sent;
365 		u64_stats_update_end(&stats->sync);
366 	}
367 	return sent;
368 }
369 
370 /**
371  *	ovs_vport_record_error - indicate device error to generic stats layer
372  *
373  * @vport: vport that encountered the error
374  * @err_type: one of enum vport_err_type types to indicate the error type
375  *
376  * If using the vport generic stats layer indicate that an error of the given
377  * type has occured.
378  */
379 void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
380 {
381 	spin_lock(&vport->stats_lock);
382 
383 	switch (err_type) {
384 	case VPORT_E_RX_DROPPED:
385 		vport->err_stats.rx_dropped++;
386 		break;
387 
388 	case VPORT_E_RX_ERROR:
389 		vport->err_stats.rx_errors++;
390 		break;
391 
392 	case VPORT_E_TX_DROPPED:
393 		vport->err_stats.tx_dropped++;
394 		break;
395 
396 	case VPORT_E_TX_ERROR:
397 		vport->err_stats.tx_errors++;
398 		break;
399 	}
400 
401 	spin_unlock(&vport->stats_lock);
402 }
403