1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * RMNET configuration engine
13  *
14  */
15 
16 #include <net/sock.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/netdevice.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_vnd.h"
23 #include "rmnet_private.h"
24 
25 /* Locking scheme -
26  * The shared resource which needs to be protected is realdev->rx_handler_data.
27  * For the writer path, this is using rtnl_lock(). The writer paths are
28  * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
29  * paths are already called with rtnl_lock() acquired in. There is also an
30  * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
31  * dereference here, we will need to use rtnl_dereference(). Dev list writing
32  * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
33  * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
34  * path. We only need rcu_read_lock() for these scenarios. In these cases,
35  * the rcu_read_lock() is held in __dev_queue_xmit() and
36  * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
37  * to get the relevant information. For dev list reading, we again acquire
38  * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
39  * We also use unregister_netdevice_many() to free all rmnet devices in
40  * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
41  * same context.
42  */
43 
44 /* Local Definitions and Declarations */
45 
46 static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
47 	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
48 	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
49 };
50 
51 static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
52 {
53 	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
54 }
55 
56 /* Needs rtnl lock */
57 static struct rmnet_port*
58 rmnet_get_port_rtnl(const struct net_device *real_dev)
59 {
60 	return rtnl_dereference(real_dev->rx_handler_data);
61 }
62 
63 static int rmnet_unregister_real_device(struct net_device *real_dev,
64 					struct rmnet_port *port)
65 {
66 	if (port->nr_rmnet_devs)
67 		return -EINVAL;
68 
69 	kfree(port);
70 
71 	netdev_rx_handler_unregister(real_dev);
72 
73 	/* release reference on real_dev */
74 	dev_put(real_dev);
75 
76 	netdev_dbg(real_dev, "Removed from rmnet\n");
77 	return 0;
78 }
79 
80 static int rmnet_register_real_device(struct net_device *real_dev)
81 {
82 	struct rmnet_port *port;
83 	int rc, entry;
84 
85 	ASSERT_RTNL();
86 
87 	if (rmnet_is_real_dev_registered(real_dev))
88 		return 0;
89 
90 	port = kzalloc(sizeof(*port), GFP_ATOMIC);
91 	if (!port)
92 		return -ENOMEM;
93 
94 	port->dev = real_dev;
95 	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
96 	if (rc) {
97 		kfree(port);
98 		return -EBUSY;
99 	}
100 
101 	/* hold on to real dev for MAP data */
102 	dev_hold(real_dev);
103 
104 	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
105 		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
106 
107 	netdev_dbg(real_dev, "registered with rmnet\n");
108 	return 0;
109 }
110 
111 static void rmnet_unregister_bridge(struct net_device *dev,
112 				    struct rmnet_port *port)
113 {
114 	struct rmnet_port *bridge_port;
115 	struct net_device *bridge_dev;
116 
117 	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
118 		return;
119 
120 	/* bridge slave handling */
121 	if (!port->nr_rmnet_devs) {
122 		bridge_dev = port->bridge_ep;
123 
124 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
125 		bridge_port->bridge_ep = NULL;
126 		bridge_port->rmnet_mode = RMNET_EPMODE_VND;
127 	} else {
128 		bridge_dev = port->bridge_ep;
129 
130 		bridge_port = rmnet_get_port_rtnl(bridge_dev);
131 		rmnet_unregister_real_device(bridge_dev, bridge_port);
132 	}
133 }
134 
135 static int rmnet_newlink(struct net *src_net, struct net_device *dev,
136 			 struct nlattr *tb[], struct nlattr *data[],
137 			 struct netlink_ext_ack *extack)
138 {
139 	u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
140 	struct net_device *real_dev;
141 	int mode = RMNET_EPMODE_VND;
142 	struct rmnet_endpoint *ep;
143 	struct rmnet_port *port;
144 	int err = 0;
145 	u16 mux_id;
146 
147 	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
148 	if (!real_dev || !dev)
149 		return -ENODEV;
150 
151 	if (!data[IFLA_RMNET_MUX_ID])
152 		return -EINVAL;
153 
154 	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
155 	if (!ep)
156 		return -ENOMEM;
157 
158 	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
159 
160 	err = rmnet_register_real_device(real_dev);
161 	if (err)
162 		goto err0;
163 
164 	port = rmnet_get_port_rtnl(real_dev);
165 	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
166 	if (err)
167 		goto err1;
168 
169 	port->rmnet_mode = mode;
170 
171 	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
172 
173 	if (data[IFLA_RMNET_FLAGS]) {
174 		struct ifla_rmnet_flags *flags;
175 
176 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
177 		data_format = flags->flags & flags->mask;
178 	}
179 
180 	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
181 	port->data_format = data_format;
182 
183 	return 0;
184 
185 err1:
186 	rmnet_unregister_real_device(real_dev, port);
187 err0:
188 	kfree(ep);
189 	return err;
190 }
191 
192 static void rmnet_dellink(struct net_device *dev, struct list_head *head)
193 {
194 	struct rmnet_priv *priv = netdev_priv(dev);
195 	struct net_device *real_dev;
196 	struct rmnet_endpoint *ep;
197 	struct rmnet_port *port;
198 	u8 mux_id;
199 
200 	real_dev = priv->real_dev;
201 
202 	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
203 		return;
204 
205 	port = rmnet_get_port_rtnl(real_dev);
206 
207 	mux_id = rmnet_vnd_get_mux(dev);
208 
209 	ep = rmnet_get_endpoint(port, mux_id);
210 	if (ep) {
211 		hlist_del_init_rcu(&ep->hlnode);
212 		rmnet_unregister_bridge(dev, port);
213 		rmnet_vnd_dellink(mux_id, port, ep);
214 		kfree(ep);
215 	}
216 	rmnet_unregister_real_device(real_dev, port);
217 
218 	unregister_netdevice_queue(dev, head);
219 }
220 
221 static void rmnet_force_unassociate_device(struct net_device *dev)
222 {
223 	struct net_device *real_dev = dev;
224 	struct hlist_node *tmp_ep;
225 	struct rmnet_endpoint *ep;
226 	struct rmnet_port *port;
227 	unsigned long bkt_ep;
228 	LIST_HEAD(list);
229 
230 	if (!rmnet_is_real_dev_registered(real_dev))
231 		return;
232 
233 	ASSERT_RTNL();
234 
235 	port = rmnet_get_port_rtnl(dev);
236 
237 	rcu_read_lock();
238 	rmnet_unregister_bridge(dev, port);
239 
240 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
241 		unregister_netdevice_queue(ep->egress_dev, &list);
242 		rmnet_vnd_dellink(ep->mux_id, port, ep);
243 
244 		hlist_del_init_rcu(&ep->hlnode);
245 		kfree(ep);
246 	}
247 
248 	rcu_read_unlock();
249 	unregister_netdevice_many(&list);
250 
251 	rmnet_unregister_real_device(real_dev, port);
252 }
253 
254 static int rmnet_config_notify_cb(struct notifier_block *nb,
255 				  unsigned long event, void *data)
256 {
257 	struct net_device *dev = netdev_notifier_info_to_dev(data);
258 
259 	if (!dev)
260 		return NOTIFY_DONE;
261 
262 	switch (event) {
263 	case NETDEV_UNREGISTER:
264 		netdev_dbg(dev, "Kernel unregister\n");
265 		rmnet_force_unassociate_device(dev);
266 		break;
267 
268 	default:
269 		break;
270 	}
271 
272 	return NOTIFY_DONE;
273 }
274 
275 static struct notifier_block rmnet_dev_notifier __read_mostly = {
276 	.notifier_call = rmnet_config_notify_cb,
277 };
278 
279 static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
280 			       struct netlink_ext_ack *extack)
281 {
282 	u16 mux_id;
283 
284 	if (!data || !data[IFLA_RMNET_MUX_ID])
285 		return -EINVAL;
286 
287 	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
288 	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
289 		return -ERANGE;
290 
291 	return 0;
292 }
293 
294 static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
295 			    struct nlattr *data[],
296 			    struct netlink_ext_ack *extack)
297 {
298 	struct rmnet_priv *priv = netdev_priv(dev);
299 	struct net_device *real_dev;
300 	struct rmnet_endpoint *ep;
301 	struct rmnet_port *port;
302 	u16 mux_id;
303 
304 	real_dev = __dev_get_by_index(dev_net(dev),
305 				      nla_get_u32(tb[IFLA_LINK]));
306 
307 	if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
308 		return -ENODEV;
309 
310 	port = rmnet_get_port_rtnl(real_dev);
311 
312 	if (data[IFLA_RMNET_MUX_ID]) {
313 		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
314 		ep = rmnet_get_endpoint(port, priv->mux_id);
315 		if (!ep)
316 			return -ENODEV;
317 
318 		hlist_del_init_rcu(&ep->hlnode);
319 		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
320 
321 		ep->mux_id = mux_id;
322 		priv->mux_id = mux_id;
323 	}
324 
325 	if (data[IFLA_RMNET_FLAGS]) {
326 		struct ifla_rmnet_flags *flags;
327 
328 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
329 		port->data_format = flags->flags & flags->mask;
330 	}
331 
332 	return 0;
333 }
334 
335 static size_t rmnet_get_size(const struct net_device *dev)
336 {
337 	return
338 		/* IFLA_RMNET_MUX_ID */
339 		nla_total_size(2) +
340 		/* IFLA_RMNET_FLAGS */
341 		nla_total_size(sizeof(struct ifla_rmnet_flags));
342 }
343 
344 static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
345 {
346 	struct rmnet_priv *priv = netdev_priv(dev);
347 	struct net_device *real_dev;
348 	struct ifla_rmnet_flags f;
349 	struct rmnet_port *port;
350 
351 	real_dev = priv->real_dev;
352 
353 	if (!rmnet_is_real_dev_registered(real_dev))
354 		return -ENODEV;
355 
356 	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
357 		goto nla_put_failure;
358 
359 	port = rmnet_get_port_rtnl(real_dev);
360 
361 	f.flags = port->data_format;
362 	f.mask  = ~0;
363 
364 	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
365 		goto nla_put_failure;
366 
367 	return 0;
368 
369 nla_put_failure:
370 	return -EMSGSIZE;
371 }
372 
373 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
374 	.kind		= "rmnet",
375 	.maxtype	= __IFLA_RMNET_MAX,
376 	.priv_size	= sizeof(struct rmnet_priv),
377 	.setup		= rmnet_vnd_setup,
378 	.validate	= rmnet_rtnl_validate,
379 	.newlink	= rmnet_newlink,
380 	.dellink	= rmnet_dellink,
381 	.get_size	= rmnet_get_size,
382 	.changelink     = rmnet_changelink,
383 	.policy		= rmnet_policy,
384 	.fill_info	= rmnet_fill_info,
385 };
386 
387 /* Needs either rcu_read_lock() or rtnl lock */
388 struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
389 {
390 	if (rmnet_is_real_dev_registered(real_dev))
391 		return rcu_dereference_rtnl(real_dev->rx_handler_data);
392 	else
393 		return NULL;
394 }
395 
396 struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
397 {
398 	struct rmnet_endpoint *ep;
399 
400 	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
401 		if (ep->mux_id == mux_id)
402 			return ep;
403 	}
404 
405 	return NULL;
406 }
407 
408 int rmnet_add_bridge(struct net_device *rmnet_dev,
409 		     struct net_device *slave_dev,
410 		     struct netlink_ext_ack *extack)
411 {
412 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
413 	struct net_device *real_dev = priv->real_dev;
414 	struct rmnet_port *port, *slave_port;
415 	int err;
416 
417 	port = rmnet_get_port(real_dev);
418 
419 	/* If there is more than one rmnet dev attached, its probably being
420 	 * used for muxing. Skip the briding in that case
421 	 */
422 	if (port->nr_rmnet_devs > 1)
423 		return -EINVAL;
424 
425 	if (rmnet_is_real_dev_registered(slave_dev))
426 		return -EBUSY;
427 
428 	err = rmnet_register_real_device(slave_dev);
429 	if (err)
430 		return -EBUSY;
431 
432 	slave_port = rmnet_get_port(slave_dev);
433 	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
434 	slave_port->bridge_ep = real_dev;
435 
436 	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
437 	port->bridge_ep = slave_dev;
438 
439 	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
440 	return 0;
441 }
442 
443 int rmnet_del_bridge(struct net_device *rmnet_dev,
444 		     struct net_device *slave_dev)
445 {
446 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
447 	struct net_device *real_dev = priv->real_dev;
448 	struct rmnet_port *port, *slave_port;
449 
450 	port = rmnet_get_port(real_dev);
451 	port->rmnet_mode = RMNET_EPMODE_VND;
452 	port->bridge_ep = NULL;
453 
454 	slave_port = rmnet_get_port(slave_dev);
455 	rmnet_unregister_real_device(slave_dev, slave_port);
456 
457 	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
458 	return 0;
459 }
460 
461 /* Startup/Shutdown */
462 
463 static int __init rmnet_init(void)
464 {
465 	int rc;
466 
467 	rc = register_netdevice_notifier(&rmnet_dev_notifier);
468 	if (rc != 0)
469 		return rc;
470 
471 	rc = rtnl_link_register(&rmnet_link_ops);
472 	if (rc != 0) {
473 		unregister_netdevice_notifier(&rmnet_dev_notifier);
474 		return rc;
475 	}
476 	return rc;
477 }
478 
479 static void __exit rmnet_exit(void)
480 {
481 	unregister_netdevice_notifier(&rmnet_dev_notifier);
482 	rtnl_link_unregister(&rmnet_link_ops);
483 }
484 
485 module_init(rmnet_init)
486 module_exit(rmnet_exit)
487 MODULE_LICENSE("GPL v2");
488