1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * RMNET configuration engine
13  *
14  */
15 
16 #include <net/sock.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/netdevice.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_vnd.h"
23 #include "rmnet_private.h"
24 
25 /* Locking scheme -
26  * The shared resource which needs to be protected is realdev->rx_handler_data.
27  * For the writer path, this is using rtnl_lock(). The writer paths are
28  * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
29  * paths are already called with rtnl_lock() acquired in. There is also an
30  * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
31  * dereference here, we will need to use rtnl_dereference(). Dev list writing
32  * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
33  * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
34  * path. We only need rcu_read_lock() for these scenarios. In these cases,
35  * the rcu_read_lock() is held in __dev_queue_xmit() and
36  * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
37  * to get the relevant information. For dev list reading, we again acquire
38  * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
39  * We also use unregister_netdevice_many() to free all rmnet devices in
40  * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
41  * same context.
42  */
43 
44 /* Local Definitions and Declarations */
45 
46 static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
47 	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
48 	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
49 };
50 
51 static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
52 {
53 	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
54 }
55 
56 /* Needs rtnl lock */
57 static struct rmnet_port*
58 rmnet_get_port_rtnl(const struct net_device *real_dev)
59 {
60 	return rtnl_dereference(real_dev->rx_handler_data);
61 }
62 
63 static int rmnet_unregister_real_device(struct net_device *real_dev,
64 					struct rmnet_port *port)
65 {
66 	if (port->nr_rmnet_devs)
67 		return -EINVAL;
68 
69 	kfree(port);
70 
71 	netdev_rx_handler_unregister(real_dev);
72 
73 	/* release reference on real_dev */
74 	dev_put(real_dev);
75 
76 	netdev_dbg(real_dev, "Removed from rmnet\n");
77 	return 0;
78 }
79 
80 static int rmnet_register_real_device(struct net_device *real_dev)
81 {
82 	struct rmnet_port *port;
83 	int rc, entry;
84 
85 	ASSERT_RTNL();
86 
87 	if (rmnet_is_real_dev_registered(real_dev))
88 		return 0;
89 
90 	port = kzalloc(sizeof(*port), GFP_ATOMIC);
91 	if (!port)
92 		return -ENOMEM;
93 
94 	port->dev = real_dev;
95 	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
96 	if (rc) {
97 		kfree(port);
98 		return -EBUSY;
99 	}
100 
101 	/* hold on to real dev for MAP data */
102 	dev_hold(real_dev);
103 
104 	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
105 		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
106 
107 	netdev_dbg(real_dev, "registered with rmnet\n");
108 	return 0;
109 }
110 
111 static void rmnet_unregister_bridge(struct net_device *dev,
112 				    struct rmnet_port *port)
113 {
114 	struct rmnet_port *bridge_port;
115 	struct net_device *bridge_dev;
116 
117 	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
118 		return;
119 
120 	/* bridge slave handling */
121 	if (!port->nr_rmnet_devs) {
122 		bridge_dev = port->bridge_ep;
123 
124 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
125 		bridge_port->bridge_ep = NULL;
126 		bridge_port->rmnet_mode = RMNET_EPMODE_VND;
127 	} else {
128 		bridge_dev = port->bridge_ep;
129 
130 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
131 		rmnet_unregister_real_device(bridge_dev, bridge_port);
132 	}
133 }
134 
135 static int rmnet_newlink(struct net *src_net, struct net_device *dev,
136 			 struct nlattr *tb[], struct nlattr *data[],
137 			 struct netlink_ext_ack *extack)
138 {
139 	u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
140 	struct net_device *real_dev;
141 	int mode = RMNET_EPMODE_VND;
142 	struct rmnet_endpoint *ep;
143 	struct rmnet_port *port;
144 	int err = 0;
145 	u16 mux_id;
146 
147 	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
148 	if (!real_dev || !dev)
149 		return -ENODEV;
150 
151 	if (!data[IFLA_RMNET_MUX_ID])
152 		return -EINVAL;
153 
154 	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
155 	if (!ep)
156 		return -ENOMEM;
157 
158 	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
159 
160 	err = rmnet_register_real_device(real_dev);
161 	if (err)
162 		goto err0;
163 
164 	port = rmnet_get_port_rtnl(real_dev);
165 	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
166 	if (err)
167 		goto err1;
168 
169 	port->rmnet_mode = mode;
170 
171 	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
172 
173 	if (data[IFLA_RMNET_FLAGS]) {
174 		struct ifla_rmnet_flags *flags;
175 
176 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
177 		data_format = flags->flags & flags->mask;
178 	}
179 
180 	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
181 	port->data_format = data_format;
182 
183 	return 0;
184 
185 err1:
186 	rmnet_unregister_real_device(real_dev, port);
187 err0:
188 	kfree(ep);
189 	return err;
190 }
191 
192 static void rmnet_dellink(struct net_device *dev, struct list_head *head)
193 {
194 	struct rmnet_priv *priv = netdev_priv(dev);
195 	struct net_device *real_dev;
196 	struct rmnet_endpoint *ep;
197 	struct rmnet_port *port;
198 	u8 mux_id;
199 
200 	real_dev = priv->real_dev;
201 
202 	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
203 		return;
204 
205 	port = rmnet_get_port_rtnl(real_dev);
206 
207 	mux_id = rmnet_vnd_get_mux(dev);
208 
209 	ep = rmnet_get_endpoint(port, mux_id);
210 	if (ep) {
211 		hlist_del_init_rcu(&ep->hlnode);
212 		rmnet_unregister_bridge(dev, port);
213 		rmnet_vnd_dellink(mux_id, port, ep);
214 		kfree(ep);
215 	}
216 	rmnet_unregister_real_device(real_dev, port);
217 
218 	unregister_netdevice_queue(dev, head);
219 }
220 
221 static void rmnet_force_unassociate_device(struct net_device *dev)
222 {
223 	struct net_device *real_dev = dev;
224 	struct hlist_node *tmp_ep;
225 	struct rmnet_endpoint *ep;
226 	struct rmnet_port *port;
227 	unsigned long bkt_ep;
228 	LIST_HEAD(list);
229 
230 	if (!rmnet_is_real_dev_registered(real_dev))
231 		return;
232 
233 	ASSERT_RTNL();
234 
235 	port = rmnet_get_port_rtnl(dev);
236 
237 	rcu_read_lock();
238 	rmnet_unregister_bridge(dev, port);
239 
240 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
241 		unregister_netdevice_queue(ep->egress_dev, &list);
242 		rmnet_vnd_dellink(ep->mux_id, port, ep);
243 
244 		hlist_del_init_rcu(&ep->hlnode);
245 		kfree(ep);
246 	}
247 
248 	rcu_read_unlock();
249 	unregister_netdevice_many(&list);
250 
251 	rmnet_unregister_real_device(real_dev, port);
252 }
253 
254 static int rmnet_config_notify_cb(struct notifier_block *nb,
255 				  unsigned long event, void *data)
256 {
257 	struct net_device *dev = netdev_notifier_info_to_dev(data);
258 
259 	if (!dev)
260 		return NOTIFY_DONE;
261 
262 	switch (event) {
263 	case NETDEV_UNREGISTER:
264 		netdev_dbg(dev, "Kernel unregister\n");
265 		rmnet_force_unassociate_device(dev);
266 		break;
267 
268 	default:
269 		break;
270 	}
271 
272 	return NOTIFY_DONE;
273 }
274 
275 static struct notifier_block rmnet_dev_notifier __read_mostly = {
276 	.notifier_call = rmnet_config_notify_cb,
277 };
278 
279 static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
280 			       struct netlink_ext_ack *extack)
281 {
282 	u16 mux_id;
283 
284 	if (!data || !data[IFLA_RMNET_MUX_ID])
285 		return -EINVAL;
286 
287 	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
288 	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
289 		return -ERANGE;
290 
291 	return 0;
292 }
293 
294 static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
295 			    struct nlattr *data[],
296 			    struct netlink_ext_ack *extack)
297 {
298 	struct rmnet_priv *priv = netdev_priv(dev);
299 	struct net_device *real_dev;
300 	struct rmnet_endpoint *ep;
301 	struct rmnet_port *port;
302 	u16 mux_id;
303 
304 	if (!dev)
305 		return -ENODEV;
306 
307 	real_dev = __dev_get_by_index(dev_net(dev),
308 				      nla_get_u32(tb[IFLA_LINK]));
309 
310 	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
311 		return -ENODEV;
312 
313 	port = rmnet_get_port_rtnl(real_dev);
314 
315 	if (data[IFLA_RMNET_MUX_ID]) {
316 		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
317 		ep = rmnet_get_endpoint(port, priv->mux_id);
318 		if (!ep)
319 			return -ENODEV;
320 
321 		hlist_del_init_rcu(&ep->hlnode);
322 		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
323 
324 		ep->mux_id = mux_id;
325 		priv->mux_id = mux_id;
326 	}
327 
328 	if (data[IFLA_RMNET_FLAGS]) {
329 		struct ifla_rmnet_flags *flags;
330 
331 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
332 		port->data_format = flags->flags & flags->mask;
333 	}
334 
335 	return 0;
336 }
337 
338 static size_t rmnet_get_size(const struct net_device *dev)
339 {
340 	return
341 		/* IFLA_RMNET_MUX_ID */
342 		nla_total_size(2) +
343 		/* IFLA_RMNET_FLAGS */
344 		nla_total_size(sizeof(struct ifla_rmnet_flags));
345 }
346 
347 static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
348 {
349 	struct rmnet_priv *priv = netdev_priv(dev);
350 	struct net_device *real_dev;
351 	struct ifla_rmnet_flags f;
352 	struct rmnet_port *port;
353 
354 	real_dev = priv->real_dev;
355 
356 	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
357 		goto nla_put_failure;
358 
359 	if (rmnet_is_real_dev_registered(real_dev)) {
360 		port = rmnet_get_port_rtnl(real_dev);
361 		f.flags = port->data_format;
362 	} else {
363 		f.flags = 0;
364 	}
365 
366 	f.mask  = ~0;
367 
368 	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
369 		goto nla_put_failure;
370 
371 	return 0;
372 
373 nla_put_failure:
374 	return -EMSGSIZE;
375 }
376 
377 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
378 	.kind		= "rmnet",
379 	.maxtype	= __IFLA_RMNET_MAX,
380 	.priv_size	= sizeof(struct rmnet_priv),
381 	.setup		= rmnet_vnd_setup,
382 	.validate	= rmnet_rtnl_validate,
383 	.newlink	= rmnet_newlink,
384 	.dellink	= rmnet_dellink,
385 	.get_size	= rmnet_get_size,
386 	.changelink     = rmnet_changelink,
387 	.policy		= rmnet_policy,
388 	.fill_info	= rmnet_fill_info,
389 };
390 
391 /* Needs either rcu_read_lock() or rtnl lock */
392 struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
393 {
394 	if (rmnet_is_real_dev_registered(real_dev))
395 		return rcu_dereference_rtnl(real_dev->rx_handler_data);
396 	else
397 		return NULL;
398 }
399 
400 struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
401 {
402 	struct rmnet_endpoint *ep;
403 
404 	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
405 		if (ep->mux_id == mux_id)
406 			return ep;
407 	}
408 
409 	return NULL;
410 }
411 
412 int rmnet_add_bridge(struct net_device *rmnet_dev,
413 		     struct net_device *slave_dev,
414 		     struct netlink_ext_ack *extack)
415 {
416 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
417 	struct net_device *real_dev = priv->real_dev;
418 	struct rmnet_port *port, *slave_port;
419 	int err;
420 
421 	port = rmnet_get_port(real_dev);
422 
423 	/* If there is more than one rmnet dev attached, its probably being
424 	 * used for muxing. Skip the briding in that case
425 	 */
426 	if (port->nr_rmnet_devs > 1)
427 		return -EINVAL;
428 
429 	if (rmnet_is_real_dev_registered(slave_dev))
430 		return -EBUSY;
431 
432 	err = rmnet_register_real_device(slave_dev);
433 	if (err)
434 		return -EBUSY;
435 
436 	slave_port = rmnet_get_port(slave_dev);
437 	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
438 	slave_port->bridge_ep = real_dev;
439 
440 	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
441 	port->bridge_ep = slave_dev;
442 
443 	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
444 	return 0;
445 }
446 
447 int rmnet_del_bridge(struct net_device *rmnet_dev,
448 		     struct net_device *slave_dev)
449 {
450 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
451 	struct net_device *real_dev = priv->real_dev;
452 	struct rmnet_port *port, *slave_port;
453 
454 	port = rmnet_get_port(real_dev);
455 	port->rmnet_mode = RMNET_EPMODE_VND;
456 	port->bridge_ep = NULL;
457 
458 	slave_port = rmnet_get_port(slave_dev);
459 	rmnet_unregister_real_device(slave_dev, slave_port);
460 
461 	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
462 	return 0;
463 }
464 
465 /* Startup/Shutdown */
466 
467 static int __init rmnet_init(void)
468 {
469 	int rc;
470 
471 	rc = register_netdevice_notifier(&rmnet_dev_notifier);
472 	if (rc != 0)
473 		return rc;
474 
475 	rc = rtnl_link_register(&rmnet_link_ops);
476 	if (rc != 0) {
477 		unregister_netdevice_notifier(&rmnet_dev_notifier);
478 		return rc;
479 	}
480 	return rc;
481 }
482 
483 static void __exit rmnet_exit(void)
484 {
485 	unregister_netdevice_notifier(&rmnet_dev_notifier);
486 	rtnl_link_unregister(&rmnet_link_ops);
487 }
488 
489 module_init(rmnet_init)
490 module_exit(rmnet_exit)
491 MODULE_LICENSE("GPL v2");
492