1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * RMNET configuration engine
13  *
14  */
15 
16 #include <net/sock.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/netdevice.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_vnd.h"
23 #include "rmnet_private.h"
24 
25 /* Locking scheme -
26  * The shared resource which needs to be protected is realdev->rx_handler_data.
27  * For the writer path, this is using rtnl_lock(). The writer paths are
28  * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
29  * paths are already called with rtnl_lock() acquired in. There is also an
30  * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
31  * dereference here, we will need to use rtnl_dereference(). Dev list writing
32  * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
33  * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
34  * path. We only need rcu_read_lock() for these scenarios. In these cases,
35  * the rcu_read_lock() is held in __dev_queue_xmit() and
36  * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
37  * to get the relevant information. For dev list reading, we again acquire
38  * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
39  * We also use unregister_netdevice_many() to free all rmnet devices in
40  * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
41  * same context.
42  */
43 
44 /* Local Definitions and Declarations */
45 
46 struct rmnet_walk_data {
47 	struct net_device *real_dev;
48 	struct list_head *head;
49 	struct rmnet_port *port;
50 };
51 
52 static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
53 {
54 	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
55 }
56 
57 /* Needs rtnl lock */
58 static struct rmnet_port*
59 rmnet_get_port_rtnl(const struct net_device *real_dev)
60 {
61 	return rtnl_dereference(real_dev->rx_handler_data);
62 }
63 
64 static struct rmnet_endpoint*
65 rmnet_get_endpoint(struct net_device *dev, int config_id)
66 {
67 	struct rmnet_endpoint *ep;
68 	struct rmnet_port *port;
69 
70 	if (!rmnet_is_real_dev_registered(dev)) {
71 		ep = rmnet_vnd_get_endpoint(dev);
72 	} else {
73 		port = rmnet_get_port_rtnl(dev);
74 
75 		ep = &port->muxed_ep[config_id];
76 	}
77 
78 	return ep;
79 }
80 
81 static int rmnet_unregister_real_device(struct net_device *real_dev,
82 					struct rmnet_port *port)
83 {
84 	if (port->nr_rmnet_devs)
85 		return -EINVAL;
86 
87 	kfree(port);
88 
89 	netdev_rx_handler_unregister(real_dev);
90 
91 	/* release reference on real_dev */
92 	dev_put(real_dev);
93 
94 	netdev_dbg(real_dev, "Removed from rmnet\n");
95 	return 0;
96 }
97 
98 static int rmnet_register_real_device(struct net_device *real_dev)
99 {
100 	struct rmnet_port *port;
101 	int rc;
102 
103 	ASSERT_RTNL();
104 
105 	if (rmnet_is_real_dev_registered(real_dev))
106 		return 0;
107 
108 	port = kzalloc(sizeof(*port), GFP_ATOMIC);
109 	if (!port)
110 		return -ENOMEM;
111 
112 	port->dev = real_dev;
113 	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
114 	if (rc) {
115 		kfree(port);
116 		return -EBUSY;
117 	}
118 
119 	/* hold on to real dev for MAP data */
120 	dev_hold(real_dev);
121 
122 	netdev_dbg(real_dev, "registered with rmnet\n");
123 	return 0;
124 }
125 
126 static void rmnet_set_endpoint_config(struct net_device *dev,
127 				      u8 mux_id, u8 rmnet_mode,
128 				      struct net_device *egress_dev)
129 {
130 	struct rmnet_endpoint *ep;
131 
132 	netdev_dbg(dev, "id %d mode %d dev %s\n",
133 		   mux_id, rmnet_mode, egress_dev->name);
134 
135 	ep = rmnet_get_endpoint(dev, mux_id);
136 	/* This config is cleared on every set, so its ok to not
137 	 * clear it on a device delete.
138 	 */
139 	memset(ep, 0, sizeof(struct rmnet_endpoint));
140 	ep->rmnet_mode = rmnet_mode;
141 	ep->egress_dev = egress_dev;
142 	ep->mux_id = mux_id;
143 }
144 
145 static int rmnet_newlink(struct net *src_net, struct net_device *dev,
146 			 struct nlattr *tb[], struct nlattr *data[],
147 			 struct netlink_ext_ack *extack)
148 {
149 	int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING |
150 			     RMNET_INGRESS_FORMAT_DEAGGREGATION |
151 			     RMNET_INGRESS_FORMAT_MAP;
152 	int egress_format = RMNET_EGRESS_FORMAT_MUXING |
153 			    RMNET_EGRESS_FORMAT_MAP;
154 	struct net_device *real_dev;
155 	int mode = RMNET_EPMODE_VND;
156 	struct rmnet_port *port;
157 	int err = 0;
158 	u16 mux_id;
159 
160 	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
161 	if (!real_dev || !dev)
162 		return -ENODEV;
163 
164 	if (!data[IFLA_VLAN_ID])
165 		return -EINVAL;
166 
167 	mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
168 
169 	err = rmnet_register_real_device(real_dev);
170 	if (err)
171 		goto err0;
172 
173 	port = rmnet_get_port_rtnl(real_dev);
174 	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev);
175 	if (err)
176 		goto err1;
177 
178 	err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL);
179 	if (err)
180 		goto err2;
181 
182 	netdev_dbg(dev, "data format [ingress 0x%08X] [egress 0x%08X]\n",
183 		   ingress_format, egress_format);
184 	port->egress_data_format = egress_format;
185 	port->ingress_data_format = ingress_format;
186 
187 	rmnet_set_endpoint_config(real_dev, mux_id, mode, dev);
188 	rmnet_set_endpoint_config(dev, mux_id, mode, real_dev);
189 	return 0;
190 
191 err2:
192 	rmnet_vnd_dellink(mux_id, port);
193 err1:
194 	rmnet_unregister_real_device(real_dev, port);
195 err0:
196 	return err;
197 }
198 
199 static void rmnet_dellink(struct net_device *dev, struct list_head *head)
200 {
201 	struct net_device *real_dev;
202 	struct rmnet_port *port;
203 	u8 mux_id;
204 
205 	rcu_read_lock();
206 	real_dev = netdev_master_upper_dev_get_rcu(dev);
207 	rcu_read_unlock();
208 
209 	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
210 		return;
211 
212 	port = rmnet_get_port_rtnl(real_dev);
213 
214 	mux_id = rmnet_vnd_get_mux(dev);
215 	rmnet_vnd_dellink(mux_id, port);
216 	netdev_upper_dev_unlink(dev, real_dev);
217 	rmnet_unregister_real_device(real_dev, port);
218 
219 	unregister_netdevice_queue(dev, head);
220 }
221 
222 static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
223 {
224 	struct rmnet_walk_data *d = data;
225 	u8 mux_id;
226 
227 	mux_id = rmnet_vnd_get_mux(rmnet_dev);
228 
229 	rmnet_vnd_dellink(mux_id, d->port);
230 	netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
231 	unregister_netdevice_queue(rmnet_dev, d->head);
232 
233 	return 0;
234 }
235 
236 static void rmnet_force_unassociate_device(struct net_device *dev)
237 {
238 	struct net_device *real_dev = dev;
239 	struct rmnet_walk_data d;
240 	struct rmnet_port *port;
241 	LIST_HEAD(list);
242 
243 	if (!rmnet_is_real_dev_registered(real_dev))
244 		return;
245 
246 	ASSERT_RTNL();
247 
248 	d.real_dev = real_dev;
249 	d.head = &list;
250 
251 	port = rmnet_get_port_rtnl(dev);
252 	d.port = port;
253 
254 	rcu_read_lock();
255 	netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d);
256 	rcu_read_unlock();
257 	unregister_netdevice_many(&list);
258 
259 	rmnet_unregister_real_device(real_dev, port);
260 }
261 
262 static int rmnet_config_notify_cb(struct notifier_block *nb,
263 				  unsigned long event, void *data)
264 {
265 	struct net_device *dev = netdev_notifier_info_to_dev(data);
266 
267 	if (!dev)
268 		return NOTIFY_DONE;
269 
270 	switch (event) {
271 	case NETDEV_UNREGISTER:
272 		netdev_dbg(dev, "Kernel unregister\n");
273 		rmnet_force_unassociate_device(dev);
274 		break;
275 
276 	default:
277 		break;
278 	}
279 
280 	return NOTIFY_DONE;
281 }
282 
283 static struct notifier_block rmnet_dev_notifier __read_mostly = {
284 	.notifier_call = rmnet_config_notify_cb,
285 };
286 
287 static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
288 			       struct netlink_ext_ack *extack)
289 {
290 	u16 mux_id;
291 
292 	if (!data || !data[IFLA_VLAN_ID])
293 		return -EINVAL;
294 
295 	mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
296 	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
297 		return -ERANGE;
298 
299 	return 0;
300 }
301 
302 static size_t rmnet_get_size(const struct net_device *dev)
303 {
304 	return nla_total_size(2); /* IFLA_VLAN_ID */
305 }
306 
307 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
308 	.kind		= "rmnet",
309 	.maxtype	= __IFLA_VLAN_MAX,
310 	.priv_size	= sizeof(struct rmnet_priv),
311 	.setup		= rmnet_vnd_setup,
312 	.validate	= rmnet_rtnl_validate,
313 	.newlink	= rmnet_newlink,
314 	.dellink	= rmnet_dellink,
315 	.get_size	= rmnet_get_size,
316 };
317 
318 /* Needs either rcu_read_lock() or rtnl lock */
319 struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
320 {
321 	if (rmnet_is_real_dev_registered(real_dev))
322 		return rcu_dereference_rtnl(real_dev->rx_handler_data);
323 	else
324 		return NULL;
325 }
326 
327 /* Startup/Shutdown */
328 
329 static int __init rmnet_init(void)
330 {
331 	int rc;
332 
333 	rc = register_netdevice_notifier(&rmnet_dev_notifier);
334 	if (rc != 0)
335 		return rc;
336 
337 	rc = rtnl_link_register(&rmnet_link_ops);
338 	if (rc != 0) {
339 		unregister_netdevice_notifier(&rmnet_dev_notifier);
340 		return rc;
341 	}
342 	return rc;
343 }
344 
345 static void __exit rmnet_exit(void)
346 {
347 	unregister_netdevice_notifier(&rmnet_dev_notifier);
348 	rtnl_link_unregister(&rmnet_link_ops);
349 }
350 
351 module_init(rmnet_init)
352 module_exit(rmnet_exit)
353 MODULE_LICENSE("GPL v2");
354