1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * RMNET configuration engine
13  *
14  */
15 
16 #include <net/sock.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/netdevice.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_vnd.h"
23 #include "rmnet_private.h"
24 
25 /* Locking scheme -
26  * The shared resource which needs to be protected is realdev->rx_handler_data.
27  * For the writer path, this is using rtnl_lock(). The writer paths are
28  * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
29  * paths are already called with rtnl_lock() acquired in. There is also an
30  * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
31  * dereference here, we will need to use rtnl_dereference(). Dev list writing
32  * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
33  * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
34  * path. We only need rcu_read_lock() for these scenarios. In these cases,
35  * the rcu_read_lock() is held in __dev_queue_xmit() and
36  * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
37  * to get the relevant information. For dev list reading, we again acquire
38  * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
39  * We also use unregister_netdevice_many() to free all rmnet devices in
40  * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
41  * same context.
42  */
43 
44 /* Local Definitions and Declarations */
45 
46 static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
47 	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
48 	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
49 };
50 
51 static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
52 {
53 	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
54 }
55 
56 /* Needs rtnl lock */
57 static struct rmnet_port*
58 rmnet_get_port_rtnl(const struct net_device *real_dev)
59 {
60 	return rtnl_dereference(real_dev->rx_handler_data);
61 }
62 
63 static int rmnet_unregister_real_device(struct net_device *real_dev,
64 					struct rmnet_port *port)
65 {
66 	if (port->nr_rmnet_devs)
67 		return -EINVAL;
68 
69 	kfree(port);
70 
71 	netdev_rx_handler_unregister(real_dev);
72 
73 	/* release reference on real_dev */
74 	dev_put(real_dev);
75 
76 	netdev_dbg(real_dev, "Removed from rmnet\n");
77 	return 0;
78 }
79 
80 static int rmnet_register_real_device(struct net_device *real_dev)
81 {
82 	struct rmnet_port *port;
83 	int rc, entry;
84 
85 	ASSERT_RTNL();
86 
87 	if (rmnet_is_real_dev_registered(real_dev))
88 		return 0;
89 
90 	port = kzalloc(sizeof(*port), GFP_ATOMIC);
91 	if (!port)
92 		return -ENOMEM;
93 
94 	port->dev = real_dev;
95 	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
96 	if (rc) {
97 		kfree(port);
98 		return -EBUSY;
99 	}
100 
101 	/* hold on to real dev for MAP data */
102 	dev_hold(real_dev);
103 
104 	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
105 		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
106 
107 	netdev_dbg(real_dev, "registered with rmnet\n");
108 	return 0;
109 }
110 
111 static void rmnet_unregister_bridge(struct net_device *dev,
112 				    struct rmnet_port *port)
113 {
114 	struct rmnet_port *bridge_port;
115 	struct net_device *bridge_dev;
116 
117 	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
118 		return;
119 
120 	/* bridge slave handling */
121 	if (!port->nr_rmnet_devs) {
122 		bridge_dev = port->bridge_ep;
123 
124 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
125 		bridge_port->bridge_ep = NULL;
126 		bridge_port->rmnet_mode = RMNET_EPMODE_VND;
127 	} else {
128 		bridge_dev = port->bridge_ep;
129 
130 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
131 		rmnet_unregister_real_device(bridge_dev, bridge_port);
132 	}
133 }
134 
135 static int rmnet_newlink(struct net *src_net, struct net_device *dev,
136 			 struct nlattr *tb[], struct nlattr *data[],
137 			 struct netlink_ext_ack *extack)
138 {
139 	u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
140 	struct net_device *real_dev;
141 	int mode = RMNET_EPMODE_VND;
142 	struct rmnet_endpoint *ep;
143 	struct rmnet_port *port;
144 	int err = 0;
145 	u16 mux_id;
146 
147 	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
148 	if (!real_dev || !dev)
149 		return -ENODEV;
150 
151 	if (!data[IFLA_RMNET_MUX_ID])
152 		return -EINVAL;
153 
154 	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
155 	if (!ep)
156 		return -ENOMEM;
157 
158 	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
159 
160 	err = rmnet_register_real_device(real_dev);
161 	if (err)
162 		goto err0;
163 
164 	port = rmnet_get_port_rtnl(real_dev);
165 	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
166 	if (err)
167 		goto err1;
168 
169 	port->rmnet_mode = mode;
170 
171 	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
172 
173 	if (data[IFLA_RMNET_FLAGS]) {
174 		struct ifla_rmnet_flags *flags;
175 
176 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
177 		data_format = flags->flags & flags->mask;
178 	}
179 
180 	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
181 	port->data_format = data_format;
182 
183 	return 0;
184 
185 err1:
186 	rmnet_unregister_real_device(real_dev, port);
187 err0:
188 	kfree(ep);
189 	return err;
190 }
191 
192 static void rmnet_dellink(struct net_device *dev, struct list_head *head)
193 {
194 	struct rmnet_priv *priv = netdev_priv(dev);
195 	struct net_device *real_dev;
196 	struct rmnet_endpoint *ep;
197 	struct rmnet_port *port;
198 	u8 mux_id;
199 
200 	real_dev = priv->real_dev;
201 
202 	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
203 		return;
204 
205 	port = rmnet_get_port_rtnl(real_dev);
206 
207 	mux_id = rmnet_vnd_get_mux(dev);
208 
209 	ep = rmnet_get_endpoint(port, mux_id);
210 	if (ep) {
211 		hlist_del_init_rcu(&ep->hlnode);
212 		rmnet_unregister_bridge(dev, port);
213 		rmnet_vnd_dellink(mux_id, port, ep);
214 		kfree(ep);
215 	}
216 	rmnet_unregister_real_device(real_dev, port);
217 
218 	unregister_netdevice_queue(dev, head);
219 }
220 
221 static void rmnet_force_unassociate_device(struct net_device *dev)
222 {
223 	struct net_device *real_dev = dev;
224 	struct hlist_node *tmp_ep;
225 	struct rmnet_endpoint *ep;
226 	struct rmnet_port *port;
227 	unsigned long bkt_ep;
228 	LIST_HEAD(list);
229 
230 	if (!rmnet_is_real_dev_registered(real_dev))
231 		return;
232 
233 	ASSERT_RTNL();
234 
235 	port = rmnet_get_port_rtnl(dev);
236 
237 	rcu_read_lock();
238 	rmnet_unregister_bridge(dev, port);
239 
240 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
241 		unregister_netdevice_queue(ep->egress_dev, &list);
242 		rmnet_vnd_dellink(ep->mux_id, port, ep);
243 
244 		hlist_del_init_rcu(&ep->hlnode);
245 		kfree(ep);
246 	}
247 
248 	rcu_read_unlock();
249 	unregister_netdevice_many(&list);
250 
251 	rmnet_unregister_real_device(real_dev, port);
252 }
253 
254 static int rmnet_config_notify_cb(struct notifier_block *nb,
255 				  unsigned long event, void *data)
256 {
257 	struct net_device *dev = netdev_notifier_info_to_dev(data);
258 
259 	if (!dev)
260 		return NOTIFY_DONE;
261 
262 	switch (event) {
263 	case NETDEV_UNREGISTER:
264 		netdev_dbg(dev, "Kernel unregister\n");
265 		rmnet_force_unassociate_device(dev);
266 		break;
267 
268 	default:
269 		break;
270 	}
271 
272 	return NOTIFY_DONE;
273 }
274 
275 static struct notifier_block rmnet_dev_notifier __read_mostly = {
276 	.notifier_call = rmnet_config_notify_cb,
277 };
278 
279 static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
280 			       struct netlink_ext_ack *extack)
281 {
282 	u16 mux_id;
283 
284 	if (!data || !data[IFLA_RMNET_MUX_ID])
285 		return -EINVAL;
286 
287 	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
288 	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
289 		return -ERANGE;
290 
291 	return 0;
292 }
293 
294 static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
295 			    struct nlattr *data[],
296 			    struct netlink_ext_ack *extack)
297 {
298 	struct rmnet_priv *priv = netdev_priv(dev);
299 	struct net_device *real_dev;
300 	struct rmnet_endpoint *ep;
301 	struct rmnet_port *port;
302 	u16 mux_id;
303 
304 	real_dev = __dev_get_by_index(dev_net(dev),
305 				      nla_get_u32(tb[IFLA_LINK]));
306 
307 	if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
308 		return -ENODEV;
309 
310 	port = rmnet_get_port_rtnl(real_dev);
311 
312 	if (data[IFLA_RMNET_MUX_ID]) {
313 		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
314 		ep = rmnet_get_endpoint(port, priv->mux_id);
315 		if (!ep)
316 			return -ENODEV;
317 
318 		hlist_del_init_rcu(&ep->hlnode);
319 		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
320 
321 		ep->mux_id = mux_id;
322 		priv->mux_id = mux_id;
323 	}
324 
325 	if (data[IFLA_RMNET_FLAGS]) {
326 		struct ifla_rmnet_flags *flags;
327 
328 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
329 		port->data_format = flags->flags & flags->mask;
330 	}
331 
332 	return 0;
333 }
334 
335 static size_t rmnet_get_size(const struct net_device *dev)
336 {
337 	return
338 		/* IFLA_RMNET_MUX_ID */
339 		nla_total_size(2) +
340 		/* IFLA_RMNET_FLAGS */
341 		nla_total_size(sizeof(struct ifla_rmnet_flags));
342 }
343 
344 static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
345 {
346 	struct rmnet_priv *priv = netdev_priv(dev);
347 	struct net_device *real_dev;
348 	struct ifla_rmnet_flags f;
349 	struct rmnet_port *port;
350 
351 	real_dev = priv->real_dev;
352 
353 	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
354 		goto nla_put_failure;
355 
356 	if (rmnet_is_real_dev_registered(real_dev)) {
357 		port = rmnet_get_port_rtnl(real_dev);
358 		f.flags = port->data_format;
359 	} else {
360 		f.flags = 0;
361 	}
362 
363 	f.mask  = ~0;
364 
365 	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
366 		goto nla_put_failure;
367 
368 	return 0;
369 
370 nla_put_failure:
371 	return -EMSGSIZE;
372 }
373 
374 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
375 	.kind		= "rmnet",
376 	.maxtype	= __IFLA_RMNET_MAX,
377 	.priv_size	= sizeof(struct rmnet_priv),
378 	.setup		= rmnet_vnd_setup,
379 	.validate	= rmnet_rtnl_validate,
380 	.newlink	= rmnet_newlink,
381 	.dellink	= rmnet_dellink,
382 	.get_size	= rmnet_get_size,
383 	.changelink     = rmnet_changelink,
384 	.policy		= rmnet_policy,
385 	.fill_info	= rmnet_fill_info,
386 };
387 
388 /* Needs either rcu_read_lock() or rtnl lock */
389 struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
390 {
391 	if (rmnet_is_real_dev_registered(real_dev))
392 		return rcu_dereference_rtnl(real_dev->rx_handler_data);
393 	else
394 		return NULL;
395 }
396 
397 struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
398 {
399 	struct rmnet_endpoint *ep;
400 
401 	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
402 		if (ep->mux_id == mux_id)
403 			return ep;
404 	}
405 
406 	return NULL;
407 }
408 
409 int rmnet_add_bridge(struct net_device *rmnet_dev,
410 		     struct net_device *slave_dev,
411 		     struct netlink_ext_ack *extack)
412 {
413 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
414 	struct net_device *real_dev = priv->real_dev;
415 	struct rmnet_port *port, *slave_port;
416 	int err;
417 
418 	port = rmnet_get_port(real_dev);
419 
420 	/* If there is more than one rmnet dev attached, its probably being
421 	 * used for muxing. Skip the briding in that case
422 	 */
423 	if (port->nr_rmnet_devs > 1)
424 		return -EINVAL;
425 
426 	if (rmnet_is_real_dev_registered(slave_dev))
427 		return -EBUSY;
428 
429 	err = rmnet_register_real_device(slave_dev);
430 	if (err)
431 		return -EBUSY;
432 
433 	slave_port = rmnet_get_port(slave_dev);
434 	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
435 	slave_port->bridge_ep = real_dev;
436 
437 	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
438 	port->bridge_ep = slave_dev;
439 
440 	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
441 	return 0;
442 }
443 
444 int rmnet_del_bridge(struct net_device *rmnet_dev,
445 		     struct net_device *slave_dev)
446 {
447 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
448 	struct net_device *real_dev = priv->real_dev;
449 	struct rmnet_port *port, *slave_port;
450 
451 	port = rmnet_get_port(real_dev);
452 	port->rmnet_mode = RMNET_EPMODE_VND;
453 	port->bridge_ep = NULL;
454 
455 	slave_port = rmnet_get_port(slave_dev);
456 	rmnet_unregister_real_device(slave_dev, slave_port);
457 
458 	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
459 	return 0;
460 }
461 
462 /* Startup/Shutdown */
463 
464 static int __init rmnet_init(void)
465 {
466 	int rc;
467 
468 	rc = register_netdevice_notifier(&rmnet_dev_notifier);
469 	if (rc != 0)
470 		return rc;
471 
472 	rc = rtnl_link_register(&rmnet_link_ops);
473 	if (rc != 0) {
474 		unregister_netdevice_notifier(&rmnet_dev_notifier);
475 		return rc;
476 	}
477 	return rc;
478 }
479 
480 static void __exit rmnet_exit(void)
481 {
482 	unregister_netdevice_notifier(&rmnet_dev_notifier);
483 	rtnl_link_unregister(&rmnet_link_ops);
484 }
485 
486 module_init(rmnet_init)
487 module_exit(rmnet_exit)
488 MODULE_LICENSE("GPL v2");
489