xref: /openbmc/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c (revision 9977a8c3497a8f7f7f951994f298a8e4d961234f)
1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * RMNET configuration engine
13  *
14  */
15 
16 #include <net/sock.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/netdevice.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_vnd.h"
23 #include "rmnet_private.h"
24 
25 /* Locking scheme -
26  * The shared resource which needs to be protected is realdev->rx_handler_data.
27  * For the writer path, this is using rtnl_lock(). The writer paths are
28  * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
29  * paths are already called with rtnl_lock() acquired in. There is also an
30  * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
31  * dereference here, we will need to use rtnl_dereference(). Dev list writing
32  * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
33  * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
34  * path. We only need rcu_read_lock() for these scenarios. In these cases,
35  * the rcu_read_lock() is held in __dev_queue_xmit() and
36  * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
37  * to get the relevant information. For dev list reading, we again acquire
38  * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
39  * We also use unregister_netdevice_many() to free all rmnet devices in
40  * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
41  * same context.
42  */
43 
44 /* Local Definitions and Declarations */
45 
46 struct rmnet_walk_data {
47 	struct net_device *real_dev;
48 	struct list_head *head;
49 	struct rmnet_port *port;
50 };
51 
52 static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
53 {
54 	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
55 }
56 
57 /* Needs rtnl lock */
58 static struct rmnet_port*
59 rmnet_get_port_rtnl(const struct net_device *real_dev)
60 {
61 	return rtnl_dereference(real_dev->rx_handler_data);
62 }
63 
64 static int rmnet_unregister_real_device(struct net_device *real_dev,
65 					struct rmnet_port *port)
66 {
67 	if (port->nr_rmnet_devs)
68 		return -EINVAL;
69 
70 	kfree(port);
71 
72 	netdev_rx_handler_unregister(real_dev);
73 
74 	/* release reference on real_dev */
75 	dev_put(real_dev);
76 
77 	netdev_dbg(real_dev, "Removed from rmnet\n");
78 	return 0;
79 }
80 
81 static int rmnet_register_real_device(struct net_device *real_dev)
82 {
83 	struct rmnet_port *port;
84 	int rc, entry;
85 
86 	ASSERT_RTNL();
87 
88 	if (rmnet_is_real_dev_registered(real_dev))
89 		return 0;
90 
91 	port = kzalloc(sizeof(*port), GFP_ATOMIC);
92 	if (!port)
93 		return -ENOMEM;
94 
95 	port->dev = real_dev;
96 	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
97 	if (rc) {
98 		kfree(port);
99 		return -EBUSY;
100 	}
101 
102 	/* hold on to real dev for MAP data */
103 	dev_hold(real_dev);
104 
105 	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
106 		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
107 
108 	netdev_dbg(real_dev, "registered with rmnet\n");
109 	return 0;
110 }
111 
112 static void rmnet_unregister_bridge(struct net_device *dev,
113 				    struct rmnet_port *port)
114 {
115 	struct net_device *rmnet_dev, *bridge_dev;
116 	struct rmnet_port *bridge_port;
117 
118 	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
119 		return;
120 
121 	/* bridge slave handling */
122 	if (!port->nr_rmnet_devs) {
123 		rmnet_dev = netdev_master_upper_dev_get_rcu(dev);
124 		netdev_upper_dev_unlink(dev, rmnet_dev);
125 
126 		bridge_dev = port->bridge_ep;
127 
128 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
129 		bridge_port->bridge_ep = NULL;
130 		bridge_port->rmnet_mode = RMNET_EPMODE_VND;
131 	} else {
132 		bridge_dev = port->bridge_ep;
133 
134 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
135 		rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev);
136 		netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
137 
138 		rmnet_unregister_real_device(bridge_dev, bridge_port);
139 	}
140 }
141 
142 static int rmnet_newlink(struct net *src_net, struct net_device *dev,
143 			 struct nlattr *tb[], struct nlattr *data[],
144 			 struct netlink_ext_ack *extack)
145 {
146 	u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
147 	struct net_device *real_dev;
148 	int mode = RMNET_EPMODE_VND;
149 	struct rmnet_endpoint *ep;
150 	struct rmnet_port *port;
151 	int err = 0;
152 	u16 mux_id;
153 
154 	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
155 	if (!real_dev || !dev)
156 		return -ENODEV;
157 
158 	if (!data[IFLA_VLAN_ID])
159 		return -EINVAL;
160 
161 	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
162 	if (!ep)
163 		return -ENOMEM;
164 
165 	mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
166 
167 	err = rmnet_register_real_device(real_dev);
168 	if (err)
169 		goto err0;
170 
171 	port = rmnet_get_port_rtnl(real_dev);
172 	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
173 	if (err)
174 		goto err1;
175 
176 	err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack);
177 	if (err)
178 		goto err2;
179 
180 	port->rmnet_mode = mode;
181 
182 	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
183 
184 	if (data[IFLA_VLAN_FLAGS]) {
185 		struct ifla_vlan_flags *flags;
186 
187 		flags = nla_data(data[IFLA_VLAN_FLAGS]);
188 		data_format = flags->flags & flags->mask;
189 	}
190 
191 	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
192 	port->data_format = data_format;
193 
194 	return 0;
195 
196 err2:
197 	rmnet_vnd_dellink(mux_id, port, ep);
198 err1:
199 	rmnet_unregister_real_device(real_dev, port);
200 err0:
201 	kfree(ep);
202 	return err;
203 }
204 
205 static void rmnet_dellink(struct net_device *dev, struct list_head *head)
206 {
207 	struct net_device *real_dev;
208 	struct rmnet_endpoint *ep;
209 	struct rmnet_port *port;
210 	u8 mux_id;
211 
212 	rcu_read_lock();
213 	real_dev = netdev_master_upper_dev_get_rcu(dev);
214 	rcu_read_unlock();
215 
216 	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
217 		return;
218 
219 	port = rmnet_get_port_rtnl(real_dev);
220 
221 	mux_id = rmnet_vnd_get_mux(dev);
222 	netdev_upper_dev_unlink(dev, real_dev);
223 
224 	ep = rmnet_get_endpoint(port, mux_id);
225 	if (ep) {
226 		hlist_del_init_rcu(&ep->hlnode);
227 		rmnet_unregister_bridge(dev, port);
228 		rmnet_vnd_dellink(mux_id, port, ep);
229 		kfree(ep);
230 	}
231 	rmnet_unregister_real_device(real_dev, port);
232 
233 	unregister_netdevice_queue(dev, head);
234 }
235 
236 static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
237 {
238 	struct rmnet_walk_data *d = data;
239 	struct rmnet_endpoint *ep;
240 	u8 mux_id;
241 
242 	mux_id = rmnet_vnd_get_mux(rmnet_dev);
243 	ep = rmnet_get_endpoint(d->port, mux_id);
244 	if (ep) {
245 		hlist_del_init_rcu(&ep->hlnode);
246 		rmnet_vnd_dellink(mux_id, d->port, ep);
247 		kfree(ep);
248 	}
249 	netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
250 	unregister_netdevice_queue(rmnet_dev, d->head);
251 
252 	return 0;
253 }
254 
255 static void rmnet_force_unassociate_device(struct net_device *dev)
256 {
257 	struct net_device *real_dev = dev;
258 	struct rmnet_walk_data d;
259 	struct rmnet_port *port;
260 	LIST_HEAD(list);
261 
262 	if (!rmnet_is_real_dev_registered(real_dev))
263 		return;
264 
265 	ASSERT_RTNL();
266 
267 	d.real_dev = real_dev;
268 	d.head = &list;
269 
270 	port = rmnet_get_port_rtnl(dev);
271 	d.port = port;
272 
273 	rcu_read_lock();
274 	rmnet_unregister_bridge(dev, port);
275 
276 	netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d);
277 	rcu_read_unlock();
278 	unregister_netdevice_many(&list);
279 
280 	rmnet_unregister_real_device(real_dev, port);
281 }
282 
283 static int rmnet_config_notify_cb(struct notifier_block *nb,
284 				  unsigned long event, void *data)
285 {
286 	struct net_device *dev = netdev_notifier_info_to_dev(data);
287 
288 	if (!dev)
289 		return NOTIFY_DONE;
290 
291 	switch (event) {
292 	case NETDEV_UNREGISTER:
293 		netdev_dbg(dev, "Kernel unregister\n");
294 		rmnet_force_unassociate_device(dev);
295 		break;
296 
297 	default:
298 		break;
299 	}
300 
301 	return NOTIFY_DONE;
302 }
303 
304 static struct notifier_block rmnet_dev_notifier __read_mostly = {
305 	.notifier_call = rmnet_config_notify_cb,
306 };
307 
308 static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
309 			       struct netlink_ext_ack *extack)
310 {
311 	u16 mux_id;
312 
313 	if (!data || !data[IFLA_VLAN_ID])
314 		return -EINVAL;
315 
316 	mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
317 	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
318 		return -ERANGE;
319 
320 	return 0;
321 }
322 
323 static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
324 			    struct nlattr *data[],
325 			    struct netlink_ext_ack *extack)
326 {
327 	struct rmnet_priv *priv = netdev_priv(dev);
328 	struct net_device *real_dev;
329 	struct rmnet_endpoint *ep;
330 	struct rmnet_port *port;
331 	u16 mux_id;
332 
333 	real_dev = __dev_get_by_index(dev_net(dev),
334 				      nla_get_u32(tb[IFLA_LINK]));
335 
336 	if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
337 		return -ENODEV;
338 
339 	port = rmnet_get_port_rtnl(real_dev);
340 
341 	if (data[IFLA_VLAN_ID]) {
342 		mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
343 		ep = rmnet_get_endpoint(port, priv->mux_id);
344 
345 		hlist_del_init_rcu(&ep->hlnode);
346 		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
347 
348 		ep->mux_id = mux_id;
349 		priv->mux_id = mux_id;
350 	}
351 
352 	if (data[IFLA_VLAN_FLAGS]) {
353 		struct ifla_vlan_flags *flags;
354 
355 		flags = nla_data(data[IFLA_VLAN_FLAGS]);
356 		port->data_format = flags->flags & flags->mask;
357 	}
358 
359 	return 0;
360 }
361 
362 static size_t rmnet_get_size(const struct net_device *dev)
363 {
364 	return nla_total_size(2) /* IFLA_VLAN_ID */ +
365 	       nla_total_size(sizeof(struct ifla_vlan_flags)); /* IFLA_VLAN_FLAGS */
366 }
367 
368 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
369 	.kind		= "rmnet",
370 	.maxtype	= __IFLA_VLAN_MAX,
371 	.priv_size	= sizeof(struct rmnet_priv),
372 	.setup		= rmnet_vnd_setup,
373 	.validate	= rmnet_rtnl_validate,
374 	.newlink	= rmnet_newlink,
375 	.dellink	= rmnet_dellink,
376 	.get_size	= rmnet_get_size,
377 	.changelink     = rmnet_changelink,
378 };
379 
380 /* Needs either rcu_read_lock() or rtnl lock */
381 struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
382 {
383 	if (rmnet_is_real_dev_registered(real_dev))
384 		return rcu_dereference_rtnl(real_dev->rx_handler_data);
385 	else
386 		return NULL;
387 }
388 
389 struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
390 {
391 	struct rmnet_endpoint *ep;
392 
393 	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
394 		if (ep->mux_id == mux_id)
395 			return ep;
396 	}
397 
398 	return NULL;
399 }
400 
401 int rmnet_add_bridge(struct net_device *rmnet_dev,
402 		     struct net_device *slave_dev,
403 		     struct netlink_ext_ack *extack)
404 {
405 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
406 	struct net_device *real_dev = priv->real_dev;
407 	struct rmnet_port *port, *slave_port;
408 	int err;
409 
410 	port = rmnet_get_port(real_dev);
411 
412 	/* If there is more than one rmnet dev attached, its probably being
413 	 * used for muxing. Skip the briding in that case
414 	 */
415 	if (port->nr_rmnet_devs > 1)
416 		return -EINVAL;
417 
418 	if (rmnet_is_real_dev_registered(slave_dev))
419 		return -EBUSY;
420 
421 	err = rmnet_register_real_device(slave_dev);
422 	if (err)
423 		return -EBUSY;
424 
425 	err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
426 					   extack);
427 	if (err)
428 		return -EINVAL;
429 
430 	slave_port = rmnet_get_port(slave_dev);
431 	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
432 	slave_port->bridge_ep = real_dev;
433 
434 	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
435 	port->bridge_ep = slave_dev;
436 
437 	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
438 	return 0;
439 }
440 
441 int rmnet_del_bridge(struct net_device *rmnet_dev,
442 		     struct net_device *slave_dev)
443 {
444 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
445 	struct net_device *real_dev = priv->real_dev;
446 	struct rmnet_port *port, *slave_port;
447 
448 	port = rmnet_get_port(real_dev);
449 	port->rmnet_mode = RMNET_EPMODE_VND;
450 	port->bridge_ep = NULL;
451 
452 	netdev_upper_dev_unlink(slave_dev, rmnet_dev);
453 	slave_port = rmnet_get_port(slave_dev);
454 	rmnet_unregister_real_device(slave_dev, slave_port);
455 
456 	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
457 	return 0;
458 }
459 
460 /* Startup/Shutdown */
461 
462 static int __init rmnet_init(void)
463 {
464 	int rc;
465 
466 	rc = register_netdevice_notifier(&rmnet_dev_notifier);
467 	if (rc != 0)
468 		return rc;
469 
470 	rc = rtnl_link_register(&rmnet_link_ops);
471 	if (rc != 0) {
472 		unregister_netdevice_notifier(&rmnet_dev_notifier);
473 		return rc;
474 	}
475 	return rc;
476 }
477 
478 static void __exit rmnet_exit(void)
479 {
480 	unregister_netdevice_notifier(&rmnet_dev_notifier);
481 	rtnl_link_unregister(&rmnet_link_ops);
482 }
483 
484 module_init(rmnet_init)
485 module_exit(rmnet_exit)
486 MODULE_LICENSE("GPL v2");
487