xref: /openbmc/linux/net/batman-adv/main.c (revision e23feb16)
1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19 
20 #include <linux/crc32c.h>
21 #include <linux/highmem.h>
22 #include <linux/if_vlan.h>
23 #include <net/ip.h>
24 #include <net/ipv6.h>
25 #include <net/dsfield.h>
26 #include "main.h"
27 #include "sysfs.h"
28 #include "debugfs.h"
29 #include "routing.h"
30 #include "send.h"
31 #include "originator.h"
32 #include "soft-interface.h"
33 #include "icmp_socket.h"
34 #include "translation-table.h"
35 #include "hard-interface.h"
36 #include "gateway_client.h"
37 #include "bridge_loop_avoidance.h"
38 #include "distributed-arp-table.h"
39 #include "vis.h"
40 #include "hash.h"
41 #include "bat_algo.h"
42 #include "network-coding.h"
43 
44 
45 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
46  * list traversals just rcu-locked
47  */
48 struct list_head batadv_hardif_list;
49 static int (*batadv_rx_handler[256])(struct sk_buff *,
50 				     struct batadv_hard_iface *);
51 char batadv_routing_algo[20] = "BATMAN_IV";
52 static struct hlist_head batadv_algo_list;
53 
54 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
55 
56 struct workqueue_struct *batadv_event_workqueue;
57 
58 static void batadv_recv_handler_init(void);
59 
60 static int __init batadv_init(void)
61 {
62 	INIT_LIST_HEAD(&batadv_hardif_list);
63 	INIT_HLIST_HEAD(&batadv_algo_list);
64 
65 	batadv_recv_handler_init();
66 
67 	batadv_iv_init();
68 
69 	batadv_event_workqueue = create_singlethread_workqueue("bat_events");
70 
71 	if (!batadv_event_workqueue)
72 		return -ENOMEM;
73 
74 	batadv_socket_init();
75 	batadv_debugfs_init();
76 
77 	register_netdevice_notifier(&batadv_hard_if_notifier);
78 	rtnl_link_register(&batadv_link_ops);
79 
80 	pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
81 		BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
82 
83 	return 0;
84 }
85 
86 static void __exit batadv_exit(void)
87 {
88 	batadv_debugfs_destroy();
89 	rtnl_link_unregister(&batadv_link_ops);
90 	unregister_netdevice_notifier(&batadv_hard_if_notifier);
91 	batadv_hardif_remove_interfaces();
92 
93 	flush_workqueue(batadv_event_workqueue);
94 	destroy_workqueue(batadv_event_workqueue);
95 	batadv_event_workqueue = NULL;
96 
97 	rcu_barrier();
98 }
99 
100 int batadv_mesh_init(struct net_device *soft_iface)
101 {
102 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
103 	int ret;
104 
105 	spin_lock_init(&bat_priv->forw_bat_list_lock);
106 	spin_lock_init(&bat_priv->forw_bcast_list_lock);
107 	spin_lock_init(&bat_priv->tt.changes_list_lock);
108 	spin_lock_init(&bat_priv->tt.req_list_lock);
109 	spin_lock_init(&bat_priv->tt.roam_list_lock);
110 	spin_lock_init(&bat_priv->tt.last_changeset_lock);
111 	spin_lock_init(&bat_priv->gw.list_lock);
112 	spin_lock_init(&bat_priv->vis.hash_lock);
113 	spin_lock_init(&bat_priv->vis.list_lock);
114 
115 	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
116 	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
117 	INIT_HLIST_HEAD(&bat_priv->gw.list);
118 	INIT_LIST_HEAD(&bat_priv->tt.changes_list);
119 	INIT_LIST_HEAD(&bat_priv->tt.req_list);
120 	INIT_LIST_HEAD(&bat_priv->tt.roam_list);
121 
122 	ret = batadv_originator_init(bat_priv);
123 	if (ret < 0)
124 		goto err;
125 
126 	ret = batadv_tt_init(bat_priv);
127 	if (ret < 0)
128 		goto err;
129 
130 	batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
131 			    BATADV_NULL_IFINDEX);
132 
133 	ret = batadv_vis_init(bat_priv);
134 	if (ret < 0)
135 		goto err;
136 
137 	ret = batadv_bla_init(bat_priv);
138 	if (ret < 0)
139 		goto err;
140 
141 	ret = batadv_dat_init(bat_priv);
142 	if (ret < 0)
143 		goto err;
144 
145 	ret = batadv_nc_init(bat_priv);
146 	if (ret < 0)
147 		goto err;
148 
149 	atomic_set(&bat_priv->gw.reselect, 0);
150 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
151 
152 	return 0;
153 
154 err:
155 	batadv_mesh_free(soft_iface);
156 	return ret;
157 }
158 
159 void batadv_mesh_free(struct net_device *soft_iface)
160 {
161 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
162 
163 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
164 
165 	batadv_purge_outstanding_packets(bat_priv, NULL);
166 
167 	batadv_vis_quit(bat_priv);
168 
169 	batadv_gw_node_purge(bat_priv);
170 	batadv_nc_free(bat_priv);
171 	batadv_dat_free(bat_priv);
172 	batadv_bla_free(bat_priv);
173 
174 	/* Free the TT and the originator tables only after having terminated
175 	 * all the other depending components which may use these structures for
176 	 * their purposes.
177 	 */
178 	batadv_tt_free(bat_priv);
179 
180 	/* Since the originator table clean up routine is accessing the TT
181 	 * tables as well, it has to be invoked after the TT tables have been
182 	 * freed and marked as empty. This ensures that no cleanup RCU callbacks
183 	 * accessing the TT data are scheduled for later execution.
184 	 */
185 	batadv_originator_free(bat_priv);
186 
187 	free_percpu(bat_priv->bat_counters);
188 	bat_priv->bat_counters = NULL;
189 
190 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
191 }
192 
193 /**
194  * batadv_is_my_mac - check if the given mac address belongs to any of the real
195  * interfaces in the current mesh
196  * @bat_priv: the bat priv with all the soft interface information
197  * @addr: the address to check
198  */
199 int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
200 {
201 	const struct batadv_hard_iface *hard_iface;
202 
203 	rcu_read_lock();
204 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
205 		if (hard_iface->if_status != BATADV_IF_ACTIVE)
206 			continue;
207 
208 		if (hard_iface->soft_iface != bat_priv->soft_iface)
209 			continue;
210 
211 		if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
212 			rcu_read_unlock();
213 			return 1;
214 		}
215 	}
216 	rcu_read_unlock();
217 	return 0;
218 }
219 
220 /**
221  * batadv_seq_print_text_primary_if_get - called from debugfs table printing
222  *  function that requires the primary interface
223  * @seq: debugfs table seq_file struct
224  *
225  * Returns primary interface if found or NULL otherwise.
226  */
227 struct batadv_hard_iface *
228 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
229 {
230 	struct net_device *net_dev = (struct net_device *)seq->private;
231 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
232 	struct batadv_hard_iface *primary_if;
233 
234 	primary_if = batadv_primary_if_get_selected(bat_priv);
235 
236 	if (!primary_if) {
237 		seq_printf(seq,
238 			   "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
239 			   net_dev->name);
240 		goto out;
241 	}
242 
243 	if (primary_if->if_status == BATADV_IF_ACTIVE)
244 		goto out;
245 
246 	seq_printf(seq,
247 		   "BATMAN mesh %s disabled - primary interface not active\n",
248 		   net_dev->name);
249 	batadv_hardif_free_ref(primary_if);
250 	primary_if = NULL;
251 
252 out:
253 	return primary_if;
254 }
255 
256 /**
257  * batadv_skb_set_priority - sets skb priority according to packet content
258  * @skb: the packet to be sent
259  * @offset: offset to the packet content
260  *
261  * This function sets a value between 256 and 263 (802.1d priority), which
262  * can be interpreted by the cfg80211 or other drivers.
263  */
264 void batadv_skb_set_priority(struct sk_buff *skb, int offset)
265 {
266 	struct iphdr ip_hdr_tmp, *ip_hdr;
267 	struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
268 	struct ethhdr ethhdr_tmp, *ethhdr;
269 	struct vlan_ethhdr *vhdr, vhdr_tmp;
270 	u32 prio;
271 
272 	/* already set, do nothing */
273 	if (skb->priority >= 256 && skb->priority <= 263)
274 		return;
275 
276 	ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
277 	if (!ethhdr)
278 		return;
279 
280 	switch (ethhdr->h_proto) {
281 	case htons(ETH_P_8021Q):
282 		vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
283 					  sizeof(*vhdr), &vhdr_tmp);
284 		if (!vhdr)
285 			return;
286 		prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
287 		prio = prio >> VLAN_PRIO_SHIFT;
288 		break;
289 	case htons(ETH_P_IP):
290 		ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
291 					    sizeof(*ip_hdr), &ip_hdr_tmp);
292 		if (!ip_hdr)
293 			return;
294 		prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
295 		break;
296 	case htons(ETH_P_IPV6):
297 		ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
298 					     sizeof(*ip6_hdr), &ip6_hdr_tmp);
299 		if (!ip6_hdr)
300 			return;
301 		prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
302 		break;
303 	default:
304 		return;
305 	}
306 
307 	skb->priority = prio + 256;
308 }
309 
310 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
311 					struct batadv_hard_iface *recv_if)
312 {
313 	return NET_RX_DROP;
314 }
315 
316 /* incoming packets with the batman ethertype received on any active hard
317  * interface
318  */
319 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
320 			   struct packet_type *ptype,
321 			   struct net_device *orig_dev)
322 {
323 	struct batadv_priv *bat_priv;
324 	struct batadv_ogm_packet *batadv_ogm_packet;
325 	struct batadv_hard_iface *hard_iface;
326 	uint8_t idx;
327 	int ret;
328 
329 	hard_iface = container_of(ptype, struct batadv_hard_iface,
330 				  batman_adv_ptype);
331 	skb = skb_share_check(skb, GFP_ATOMIC);
332 
333 	/* skb was released by skb_share_check() */
334 	if (!skb)
335 		goto err_out;
336 
337 	/* packet should hold at least type and version */
338 	if (unlikely(!pskb_may_pull(skb, 2)))
339 		goto err_free;
340 
341 	/* expect a valid ethernet header here. */
342 	if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
343 		goto err_free;
344 
345 	if (!hard_iface->soft_iface)
346 		goto err_free;
347 
348 	bat_priv = netdev_priv(hard_iface->soft_iface);
349 
350 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
351 		goto err_free;
352 
353 	/* discard frames on not active interfaces */
354 	if (hard_iface->if_status != BATADV_IF_ACTIVE)
355 		goto err_free;
356 
357 	batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
358 
359 	if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
360 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
361 			   "Drop packet: incompatible batman version (%i)\n",
362 			   batadv_ogm_packet->header.version);
363 		goto err_free;
364 	}
365 
366 	/* all receive handlers return whether they received or reused
367 	 * the supplied skb. if not, we have to free the skb.
368 	 */
369 	idx = batadv_ogm_packet->header.packet_type;
370 	ret = (*batadv_rx_handler[idx])(skb, hard_iface);
371 
372 	if (ret == NET_RX_DROP)
373 		kfree_skb(skb);
374 
375 	/* return NET_RX_SUCCESS in any case as we
376 	 * most probably dropped the packet for
377 	 * routing-logical reasons.
378 	 */
379 	return NET_RX_SUCCESS;
380 
381 err_free:
382 	kfree_skb(skb);
383 err_out:
384 	return NET_RX_DROP;
385 }
386 
387 static void batadv_recv_handler_init(void)
388 {
389 	int i;
390 
391 	for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
392 		batadv_rx_handler[i] = batadv_recv_unhandled_packet;
393 
394 	/* batman icmp packet */
395 	batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
396 	/* unicast with 4 addresses packet */
397 	batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
398 	/* unicast packet */
399 	batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
400 	/* fragmented unicast packet */
401 	batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
402 	/* broadcast packet */
403 	batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
404 	/* vis packet */
405 	batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
406 	/* Translation table query (request or response) */
407 	batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
408 	/* Roaming advertisement */
409 	batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
410 }
411 
412 int
413 batadv_recv_handler_register(uint8_t packet_type,
414 			     int (*recv_handler)(struct sk_buff *,
415 						 struct batadv_hard_iface *))
416 {
417 	if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
418 		return -EBUSY;
419 
420 	batadv_rx_handler[packet_type] = recv_handler;
421 	return 0;
422 }
423 
424 void batadv_recv_handler_unregister(uint8_t packet_type)
425 {
426 	batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
427 }
428 
429 static struct batadv_algo_ops *batadv_algo_get(char *name)
430 {
431 	struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
432 
433 	hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
434 		if (strcmp(bat_algo_ops_tmp->name, name) != 0)
435 			continue;
436 
437 		bat_algo_ops = bat_algo_ops_tmp;
438 		break;
439 	}
440 
441 	return bat_algo_ops;
442 }
443 
444 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
445 {
446 	struct batadv_algo_ops *bat_algo_ops_tmp;
447 	int ret;
448 
449 	bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
450 	if (bat_algo_ops_tmp) {
451 		pr_info("Trying to register already registered routing algorithm: %s\n",
452 			bat_algo_ops->name);
453 		ret = -EEXIST;
454 		goto out;
455 	}
456 
457 	/* all algorithms must implement all ops (for now) */
458 	if (!bat_algo_ops->bat_iface_enable ||
459 	    !bat_algo_ops->bat_iface_disable ||
460 	    !bat_algo_ops->bat_iface_update_mac ||
461 	    !bat_algo_ops->bat_primary_iface_set ||
462 	    !bat_algo_ops->bat_ogm_schedule ||
463 	    !bat_algo_ops->bat_ogm_emit) {
464 		pr_info("Routing algo '%s' does not implement required ops\n",
465 			bat_algo_ops->name);
466 		ret = -EINVAL;
467 		goto out;
468 	}
469 
470 	INIT_HLIST_NODE(&bat_algo_ops->list);
471 	hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
472 	ret = 0;
473 
474 out:
475 	return ret;
476 }
477 
478 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
479 {
480 	struct batadv_algo_ops *bat_algo_ops;
481 	int ret = -EINVAL;
482 
483 	bat_algo_ops = batadv_algo_get(name);
484 	if (!bat_algo_ops)
485 		goto out;
486 
487 	bat_priv->bat_algo_ops = bat_algo_ops;
488 	ret = 0;
489 
490 out:
491 	return ret;
492 }
493 
494 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
495 {
496 	struct batadv_algo_ops *bat_algo_ops;
497 
498 	seq_puts(seq, "Available routing algorithms:\n");
499 
500 	hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
501 		seq_printf(seq, "%s\n", bat_algo_ops->name);
502 	}
503 
504 	return 0;
505 }
506 
507 /**
508  * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
509  *  the header
510  * @skb: skb pointing to fragmented socket buffers
511  * @payload_ptr: Pointer to position inside the head buffer of the skb
512  *  marking the start of the data to be CRC'ed
513  *
514  * payload_ptr must always point to an address in the skb head buffer and not to
515  * a fragment.
516  */
517 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
518 {
519 	u32 crc = 0;
520 	unsigned int from;
521 	unsigned int to = skb->len;
522 	struct skb_seq_state st;
523 	const u8 *data;
524 	unsigned int len;
525 	unsigned int consumed = 0;
526 
527 	from = (unsigned int)(payload_ptr - skb->data);
528 
529 	skb_prepare_seq_read(skb, from, to, &st);
530 	while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
531 		crc = crc32c(crc, data, len);
532 		consumed += len;
533 	}
534 
535 	return htonl(crc);
536 }
537 
538 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
539 {
540 	struct batadv_algo_ops *bat_algo_ops;
541 	char *algo_name = (char *)val;
542 	size_t name_len = strlen(algo_name);
543 
544 	if (name_len > 0 && algo_name[name_len - 1] == '\n')
545 		algo_name[name_len - 1] = '\0';
546 
547 	bat_algo_ops = batadv_algo_get(algo_name);
548 	if (!bat_algo_ops) {
549 		pr_err("Routing algorithm '%s' is not supported\n", algo_name);
550 		return -EINVAL;
551 	}
552 
553 	return param_set_copystring(algo_name, kp);
554 }
555 
556 static const struct kernel_param_ops batadv_param_ops_ra = {
557 	.set = batadv_param_set_ra,
558 	.get = param_get_string,
559 };
560 
561 static struct kparam_string batadv_param_string_ra = {
562 	.maxlen = sizeof(batadv_routing_algo),
563 	.string = batadv_routing_algo,
564 };
565 
566 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
567 		0644);
568 module_init(batadv_init);
569 module_exit(batadv_exit);
570 
571 MODULE_LICENSE("GPL");
572 
573 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
574 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
575 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
576 MODULE_VERSION(BATADV_SOURCE_VERSION);
577