xref: /openbmc/linux/net/batman-adv/main.c (revision cdfce539)
1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19 
20 #include <linux/crc32c.h>
21 #include <linux/highmem.h>
22 #include "main.h"
23 #include "sysfs.h"
24 #include "debugfs.h"
25 #include "routing.h"
26 #include "send.h"
27 #include "originator.h"
28 #include "soft-interface.h"
29 #include "icmp_socket.h"
30 #include "translation-table.h"
31 #include "hard-interface.h"
32 #include "gateway_client.h"
33 #include "bridge_loop_avoidance.h"
34 #include "distributed-arp-table.h"
35 #include "vis.h"
36 #include "hash.h"
37 #include "bat_algo.h"
38 #include "network-coding.h"
39 
40 
41 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
42  * list traversals just rcu-locked
43  */
44 struct list_head batadv_hardif_list;
45 static int (*batadv_rx_handler[256])(struct sk_buff *,
46 				     struct batadv_hard_iface *);
47 char batadv_routing_algo[20] = "BATMAN_IV";
48 static struct hlist_head batadv_algo_list;
49 
50 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
51 
52 struct workqueue_struct *batadv_event_workqueue;
53 
54 static void batadv_recv_handler_init(void);
55 
56 static int __init batadv_init(void)
57 {
58 	INIT_LIST_HEAD(&batadv_hardif_list);
59 	INIT_HLIST_HEAD(&batadv_algo_list);
60 
61 	batadv_recv_handler_init();
62 
63 	batadv_iv_init();
64 
65 	batadv_event_workqueue = create_singlethread_workqueue("bat_events");
66 
67 	if (!batadv_event_workqueue)
68 		return -ENOMEM;
69 
70 	batadv_socket_init();
71 	batadv_debugfs_init();
72 
73 	register_netdevice_notifier(&batadv_hard_if_notifier);
74 	rtnl_link_register(&batadv_link_ops);
75 
76 	pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
77 		BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
78 
79 	return 0;
80 }
81 
82 static void __exit batadv_exit(void)
83 {
84 	batadv_debugfs_destroy();
85 	rtnl_link_unregister(&batadv_link_ops);
86 	unregister_netdevice_notifier(&batadv_hard_if_notifier);
87 	batadv_hardif_remove_interfaces();
88 
89 	flush_workqueue(batadv_event_workqueue);
90 	destroy_workqueue(batadv_event_workqueue);
91 	batadv_event_workqueue = NULL;
92 
93 	rcu_barrier();
94 }
95 
96 int batadv_mesh_init(struct net_device *soft_iface)
97 {
98 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
99 	int ret;
100 
101 	spin_lock_init(&bat_priv->forw_bat_list_lock);
102 	spin_lock_init(&bat_priv->forw_bcast_list_lock);
103 	spin_lock_init(&bat_priv->tt.changes_list_lock);
104 	spin_lock_init(&bat_priv->tt.req_list_lock);
105 	spin_lock_init(&bat_priv->tt.roam_list_lock);
106 	spin_lock_init(&bat_priv->tt.last_changeset_lock);
107 	spin_lock_init(&bat_priv->gw.list_lock);
108 	spin_lock_init(&bat_priv->vis.hash_lock);
109 	spin_lock_init(&bat_priv->vis.list_lock);
110 
111 	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
112 	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
113 	INIT_HLIST_HEAD(&bat_priv->gw.list);
114 	INIT_LIST_HEAD(&bat_priv->tt.changes_list);
115 	INIT_LIST_HEAD(&bat_priv->tt.req_list);
116 	INIT_LIST_HEAD(&bat_priv->tt.roam_list);
117 
118 	ret = batadv_originator_init(bat_priv);
119 	if (ret < 0)
120 		goto err;
121 
122 	ret = batadv_tt_init(bat_priv);
123 	if (ret < 0)
124 		goto err;
125 
126 	batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
127 			    BATADV_NULL_IFINDEX);
128 
129 	ret = batadv_vis_init(bat_priv);
130 	if (ret < 0)
131 		goto err;
132 
133 	ret = batadv_bla_init(bat_priv);
134 	if (ret < 0)
135 		goto err;
136 
137 	ret = batadv_dat_init(bat_priv);
138 	if (ret < 0)
139 		goto err;
140 
141 	ret = batadv_nc_init(bat_priv);
142 	if (ret < 0)
143 		goto err;
144 
145 	atomic_set(&bat_priv->gw.reselect, 0);
146 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
147 
148 	return 0;
149 
150 err:
151 	batadv_mesh_free(soft_iface);
152 	return ret;
153 }
154 
155 void batadv_mesh_free(struct net_device *soft_iface)
156 {
157 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
158 
159 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
160 
161 	batadv_purge_outstanding_packets(bat_priv, NULL);
162 
163 	batadv_vis_quit(bat_priv);
164 
165 	batadv_gw_node_purge(bat_priv);
166 	batadv_nc_free(bat_priv);
167 	batadv_dat_free(bat_priv);
168 	batadv_bla_free(bat_priv);
169 
170 	/* Free the TT and the originator tables only after having terminated
171 	 * all the other depending components which may use these structures for
172 	 * their purposes.
173 	 */
174 	batadv_tt_free(bat_priv);
175 
176 	/* Since the originator table clean up routine is accessing the TT
177 	 * tables as well, it has to be invoked after the TT tables have been
178 	 * freed and marked as empty. This ensures that no cleanup RCU callbacks
179 	 * accessing the TT data are scheduled for later execution.
180 	 */
181 	batadv_originator_free(bat_priv);
182 
183 	free_percpu(bat_priv->bat_counters);
184 
185 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
186 }
187 
188 /**
189  * batadv_is_my_mac - check if the given mac address belongs to any of the real
190  * interfaces in the current mesh
191  * @bat_priv: the bat priv with all the soft interface information
192  * @addr: the address to check
193  */
194 int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
195 {
196 	const struct batadv_hard_iface *hard_iface;
197 
198 	rcu_read_lock();
199 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
200 		if (hard_iface->if_status != BATADV_IF_ACTIVE)
201 			continue;
202 
203 		if (hard_iface->soft_iface != bat_priv->soft_iface)
204 			continue;
205 
206 		if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
207 			rcu_read_unlock();
208 			return 1;
209 		}
210 	}
211 	rcu_read_unlock();
212 	return 0;
213 }
214 
215 /**
216  * batadv_seq_print_text_primary_if_get - called from debugfs table printing
217  *  function that requires the primary interface
218  * @seq: debugfs table seq_file struct
219  *
220  * Returns primary interface if found or NULL otherwise.
221  */
222 struct batadv_hard_iface *
223 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
224 {
225 	struct net_device *net_dev = (struct net_device *)seq->private;
226 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
227 	struct batadv_hard_iface *primary_if;
228 
229 	primary_if = batadv_primary_if_get_selected(bat_priv);
230 
231 	if (!primary_if) {
232 		seq_printf(seq,
233 			   "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
234 			   net_dev->name);
235 		goto out;
236 	}
237 
238 	if (primary_if->if_status == BATADV_IF_ACTIVE)
239 		goto out;
240 
241 	seq_printf(seq,
242 		   "BATMAN mesh %s disabled - primary interface not active\n",
243 		   net_dev->name);
244 	batadv_hardif_free_ref(primary_if);
245 	primary_if = NULL;
246 
247 out:
248 	return primary_if;
249 }
250 
251 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
252 					struct batadv_hard_iface *recv_if)
253 {
254 	return NET_RX_DROP;
255 }
256 
257 /* incoming packets with the batman ethertype received on any active hard
258  * interface
259  */
260 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
261 			   struct packet_type *ptype,
262 			   struct net_device *orig_dev)
263 {
264 	struct batadv_priv *bat_priv;
265 	struct batadv_ogm_packet *batadv_ogm_packet;
266 	struct batadv_hard_iface *hard_iface;
267 	uint8_t idx;
268 	int ret;
269 
270 	hard_iface = container_of(ptype, struct batadv_hard_iface,
271 				  batman_adv_ptype);
272 	skb = skb_share_check(skb, GFP_ATOMIC);
273 
274 	/* skb was released by skb_share_check() */
275 	if (!skb)
276 		goto err_out;
277 
278 	/* packet should hold at least type and version */
279 	if (unlikely(!pskb_may_pull(skb, 2)))
280 		goto err_free;
281 
282 	/* expect a valid ethernet header here. */
283 	if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
284 		goto err_free;
285 
286 	if (!hard_iface->soft_iface)
287 		goto err_free;
288 
289 	bat_priv = netdev_priv(hard_iface->soft_iface);
290 
291 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
292 		goto err_free;
293 
294 	/* discard frames on not active interfaces */
295 	if (hard_iface->if_status != BATADV_IF_ACTIVE)
296 		goto err_free;
297 
298 	batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
299 
300 	if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
301 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
302 			   "Drop packet: incompatible batman version (%i)\n",
303 			   batadv_ogm_packet->header.version);
304 		goto err_free;
305 	}
306 
307 	/* all receive handlers return whether they received or reused
308 	 * the supplied skb. if not, we have to free the skb.
309 	 */
310 	idx = batadv_ogm_packet->header.packet_type;
311 	ret = (*batadv_rx_handler[idx])(skb, hard_iface);
312 
313 	if (ret == NET_RX_DROP)
314 		kfree_skb(skb);
315 
316 	/* return NET_RX_SUCCESS in any case as we
317 	 * most probably dropped the packet for
318 	 * routing-logical reasons.
319 	 */
320 	return NET_RX_SUCCESS;
321 
322 err_free:
323 	kfree_skb(skb);
324 err_out:
325 	return NET_RX_DROP;
326 }
327 
328 static void batadv_recv_handler_init(void)
329 {
330 	int i;
331 
332 	for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
333 		batadv_rx_handler[i] = batadv_recv_unhandled_packet;
334 
335 	/* batman icmp packet */
336 	batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
337 	/* unicast with 4 addresses packet */
338 	batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
339 	/* unicast packet */
340 	batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
341 	/* fragmented unicast packet */
342 	batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
343 	/* broadcast packet */
344 	batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
345 	/* vis packet */
346 	batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
347 	/* Translation table query (request or response) */
348 	batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
349 	/* Roaming advertisement */
350 	batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
351 }
352 
353 int
354 batadv_recv_handler_register(uint8_t packet_type,
355 			     int (*recv_handler)(struct sk_buff *,
356 						 struct batadv_hard_iface *))
357 {
358 	if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
359 		return -EBUSY;
360 
361 	batadv_rx_handler[packet_type] = recv_handler;
362 	return 0;
363 }
364 
365 void batadv_recv_handler_unregister(uint8_t packet_type)
366 {
367 	batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
368 }
369 
370 static struct batadv_algo_ops *batadv_algo_get(char *name)
371 {
372 	struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
373 
374 	hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
375 		if (strcmp(bat_algo_ops_tmp->name, name) != 0)
376 			continue;
377 
378 		bat_algo_ops = bat_algo_ops_tmp;
379 		break;
380 	}
381 
382 	return bat_algo_ops;
383 }
384 
385 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
386 {
387 	struct batadv_algo_ops *bat_algo_ops_tmp;
388 	int ret;
389 
390 	bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
391 	if (bat_algo_ops_tmp) {
392 		pr_info("Trying to register already registered routing algorithm: %s\n",
393 			bat_algo_ops->name);
394 		ret = -EEXIST;
395 		goto out;
396 	}
397 
398 	/* all algorithms must implement all ops (for now) */
399 	if (!bat_algo_ops->bat_iface_enable ||
400 	    !bat_algo_ops->bat_iface_disable ||
401 	    !bat_algo_ops->bat_iface_update_mac ||
402 	    !bat_algo_ops->bat_primary_iface_set ||
403 	    !bat_algo_ops->bat_ogm_schedule ||
404 	    !bat_algo_ops->bat_ogm_emit) {
405 		pr_info("Routing algo '%s' does not implement required ops\n",
406 			bat_algo_ops->name);
407 		ret = -EINVAL;
408 		goto out;
409 	}
410 
411 	INIT_HLIST_NODE(&bat_algo_ops->list);
412 	hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
413 	ret = 0;
414 
415 out:
416 	return ret;
417 }
418 
419 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
420 {
421 	struct batadv_algo_ops *bat_algo_ops;
422 	int ret = -EINVAL;
423 
424 	bat_algo_ops = batadv_algo_get(name);
425 	if (!bat_algo_ops)
426 		goto out;
427 
428 	bat_priv->bat_algo_ops = bat_algo_ops;
429 	ret = 0;
430 
431 out:
432 	return ret;
433 }
434 
435 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
436 {
437 	struct batadv_algo_ops *bat_algo_ops;
438 
439 	seq_puts(seq, "Available routing algorithms:\n");
440 
441 	hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
442 		seq_printf(seq, "%s\n", bat_algo_ops->name);
443 	}
444 
445 	return 0;
446 }
447 
448 /**
449  * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
450  *  the header
451  * @skb: skb pointing to fragmented socket buffers
452  * @payload_ptr: Pointer to position inside the head buffer of the skb
453  *  marking the start of the data to be CRC'ed
454  *
455  * payload_ptr must always point to an address in the skb head buffer and not to
456  * a fragment.
457  */
458 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
459 {
460 	u32 crc = 0;
461 	unsigned int from;
462 	unsigned int to = skb->len;
463 	struct skb_seq_state st;
464 	const u8 *data;
465 	unsigned int len;
466 	unsigned int consumed = 0;
467 
468 	from = (unsigned int)(payload_ptr - skb->data);
469 
470 	skb_prepare_seq_read(skb, from, to, &st);
471 	while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
472 		crc = crc32c(crc, data, len);
473 		consumed += len;
474 	}
475 	skb_abort_seq_read(&st);
476 
477 	return htonl(crc);
478 }
479 
480 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
481 {
482 	struct batadv_algo_ops *bat_algo_ops;
483 	char *algo_name = (char *)val;
484 	size_t name_len = strlen(algo_name);
485 
486 	if (name_len > 0 && algo_name[name_len - 1] == '\n')
487 		algo_name[name_len - 1] = '\0';
488 
489 	bat_algo_ops = batadv_algo_get(algo_name);
490 	if (!bat_algo_ops) {
491 		pr_err("Routing algorithm '%s' is not supported\n", algo_name);
492 		return -EINVAL;
493 	}
494 
495 	return param_set_copystring(algo_name, kp);
496 }
497 
498 static const struct kernel_param_ops batadv_param_ops_ra = {
499 	.set = batadv_param_set_ra,
500 	.get = param_get_string,
501 };
502 
503 static struct kparam_string batadv_param_string_ra = {
504 	.maxlen = sizeof(batadv_routing_algo),
505 	.string = batadv_routing_algo,
506 };
507 
508 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
509 		0644);
510 module_init(batadv_init);
511 module_exit(batadv_exit);
512 
513 MODULE_LICENSE("GPL");
514 
515 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
516 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
517 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
518 MODULE_VERSION(BATADV_SOURCE_VERSION);
519