xref: /openbmc/linux/net/batman-adv/originator.c (revision 33a03aad)
1 /*
2  * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21 
22 #include "main.h"
23 #include "originator.h"
24 #include "hash.h"
25 #include "translation-table.h"
26 #include "routing.h"
27 #include "gateway_client.h"
28 #include "hard-interface.h"
29 #include "unicast.h"
30 #include "soft-interface.h"
31 #include "bridge_loop_avoidance.h"
32 
33 static void purge_orig(struct work_struct *work);
34 
35 static void start_purge_timer(struct bat_priv *bat_priv)
36 {
37 	INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
38 	queue_delayed_work(bat_event_workqueue,
39 			   &bat_priv->orig_work, msecs_to_jiffies(1000));
40 }
41 
42 /* returns 1 if they are the same originator */
43 static int compare_orig(const struct hlist_node *node, const void *data2)
44 {
45 	const void *data1 = container_of(node, struct orig_node, hash_entry);
46 
47 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
48 }
49 
50 int originator_init(struct bat_priv *bat_priv)
51 {
52 	if (bat_priv->orig_hash)
53 		return 1;
54 
55 	bat_priv->orig_hash = hash_new(1024);
56 
57 	if (!bat_priv->orig_hash)
58 		goto err;
59 
60 	start_purge_timer(bat_priv);
61 	return 1;
62 
63 err:
64 	return 0;
65 }
66 
67 void neigh_node_free_ref(struct neigh_node *neigh_node)
68 {
69 	if (atomic_dec_and_test(&neigh_node->refcount))
70 		kfree_rcu(neigh_node, rcu);
71 }
72 
73 /* increases the refcounter of a found router */
74 struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
75 {
76 	struct neigh_node *router;
77 
78 	rcu_read_lock();
79 	router = rcu_dereference(orig_node->router);
80 
81 	if (router && !atomic_inc_not_zero(&router->refcount))
82 		router = NULL;
83 
84 	rcu_read_unlock();
85 	return router;
86 }
87 
88 struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
89 					 const uint8_t *neigh_addr,
90 					 uint32_t seqno)
91 {
92 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
93 	struct neigh_node *neigh_node;
94 
95 	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
96 	if (!neigh_node)
97 		goto out;
98 
99 	INIT_HLIST_NODE(&neigh_node->list);
100 
101 	memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
102 	spin_lock_init(&neigh_node->lq_update_lock);
103 
104 	/* extra reference for return */
105 	atomic_set(&neigh_node->refcount, 2);
106 
107 	bat_dbg(DBG_BATMAN, bat_priv,
108 		"Creating new neighbor %pM, initial seqno %d\n",
109 		neigh_addr, seqno);
110 
111 out:
112 	return neigh_node;
113 }
114 
115 static void orig_node_free_rcu(struct rcu_head *rcu)
116 {
117 	struct hlist_node *node, *node_tmp;
118 	struct neigh_node *neigh_node, *tmp_neigh_node;
119 	struct orig_node *orig_node;
120 
121 	orig_node = container_of(rcu, struct orig_node, rcu);
122 
123 	spin_lock_bh(&orig_node->neigh_list_lock);
124 
125 	/* for all bonding members ... */
126 	list_for_each_entry_safe(neigh_node, tmp_neigh_node,
127 				 &orig_node->bond_list, bonding_list) {
128 		list_del_rcu(&neigh_node->bonding_list);
129 		neigh_node_free_ref(neigh_node);
130 	}
131 
132 	/* for all neighbors towards this originator ... */
133 	hlist_for_each_entry_safe(neigh_node, node, node_tmp,
134 				  &orig_node->neigh_list, list) {
135 		hlist_del_rcu(&neigh_node->list);
136 		neigh_node_free_ref(neigh_node);
137 	}
138 
139 	spin_unlock_bh(&orig_node->neigh_list_lock);
140 
141 	frag_list_free(&orig_node->frag_list);
142 	tt_global_del_orig(orig_node->bat_priv, orig_node,
143 			   "originator timed out");
144 
145 	kfree(orig_node->tt_buff);
146 	kfree(orig_node->bcast_own);
147 	kfree(orig_node->bcast_own_sum);
148 	kfree(orig_node);
149 }
150 
151 void orig_node_free_ref(struct orig_node *orig_node)
152 {
153 	if (atomic_dec_and_test(&orig_node->refcount))
154 		call_rcu(&orig_node->rcu, orig_node_free_rcu);
155 }
156 
157 void originator_free(struct bat_priv *bat_priv)
158 {
159 	struct hashtable_t *hash = bat_priv->orig_hash;
160 	struct hlist_node *node, *node_tmp;
161 	struct hlist_head *head;
162 	spinlock_t *list_lock; /* spinlock to protect write access */
163 	struct orig_node *orig_node;
164 	uint32_t i;
165 
166 	if (!hash)
167 		return;
168 
169 	cancel_delayed_work_sync(&bat_priv->orig_work);
170 
171 	bat_priv->orig_hash = NULL;
172 
173 	for (i = 0; i < hash->size; i++) {
174 		head = &hash->table[i];
175 		list_lock = &hash->list_locks[i];
176 
177 		spin_lock_bh(list_lock);
178 		hlist_for_each_entry_safe(orig_node, node, node_tmp,
179 					  head, hash_entry) {
180 
181 			hlist_del_rcu(node);
182 			orig_node_free_ref(orig_node);
183 		}
184 		spin_unlock_bh(list_lock);
185 	}
186 
187 	hash_destroy(hash);
188 }
189 
190 /* this function finds or creates an originator entry for the given
191  * address if it does not exits */
192 struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
193 {
194 	struct orig_node *orig_node;
195 	int size;
196 	int hash_added;
197 
198 	orig_node = orig_hash_find(bat_priv, addr);
199 	if (orig_node)
200 		return orig_node;
201 
202 	bat_dbg(DBG_BATMAN, bat_priv,
203 		"Creating new originator: %pM\n", addr);
204 
205 	orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
206 	if (!orig_node)
207 		return NULL;
208 
209 	INIT_HLIST_HEAD(&orig_node->neigh_list);
210 	INIT_LIST_HEAD(&orig_node->bond_list);
211 	spin_lock_init(&orig_node->ogm_cnt_lock);
212 	spin_lock_init(&orig_node->bcast_seqno_lock);
213 	spin_lock_init(&orig_node->neigh_list_lock);
214 	spin_lock_init(&orig_node->tt_buff_lock);
215 
216 	/* extra reference for return */
217 	atomic_set(&orig_node->refcount, 2);
218 
219 	orig_node->tt_initialised = false;
220 	orig_node->tt_poss_change = false;
221 	orig_node->bat_priv = bat_priv;
222 	memcpy(orig_node->orig, addr, ETH_ALEN);
223 	orig_node->router = NULL;
224 	orig_node->tt_crc = 0;
225 	atomic_set(&orig_node->last_ttvn, 0);
226 	orig_node->tt_buff = NULL;
227 	orig_node->tt_buff_len = 0;
228 	atomic_set(&orig_node->tt_size, 0);
229 	orig_node->bcast_seqno_reset = jiffies - 1
230 					- msecs_to_jiffies(RESET_PROTECTION_MS);
231 	orig_node->batman_seqno_reset = jiffies - 1
232 					- msecs_to_jiffies(RESET_PROTECTION_MS);
233 
234 	atomic_set(&orig_node->bond_candidates, 0);
235 
236 	size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
237 
238 	orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
239 	if (!orig_node->bcast_own)
240 		goto free_orig_node;
241 
242 	size = bat_priv->num_ifaces * sizeof(uint8_t);
243 	orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
244 
245 	INIT_LIST_HEAD(&orig_node->frag_list);
246 	orig_node->last_frag_packet = 0;
247 
248 	if (!orig_node->bcast_own_sum)
249 		goto free_bcast_own;
250 
251 	hash_added = hash_add(bat_priv->orig_hash, compare_orig,
252 			      choose_orig, orig_node, &orig_node->hash_entry);
253 	if (hash_added != 0)
254 		goto free_bcast_own_sum;
255 
256 	return orig_node;
257 free_bcast_own_sum:
258 	kfree(orig_node->bcast_own_sum);
259 free_bcast_own:
260 	kfree(orig_node->bcast_own);
261 free_orig_node:
262 	kfree(orig_node);
263 	return NULL;
264 }
265 
266 static bool purge_orig_neighbors(struct bat_priv *bat_priv,
267 				 struct orig_node *orig_node,
268 				 struct neigh_node **best_neigh_node)
269 {
270 	struct hlist_node *node, *node_tmp;
271 	struct neigh_node *neigh_node;
272 	bool neigh_purged = false;
273 	unsigned long last_seen;
274 
275 	*best_neigh_node = NULL;
276 
277 	spin_lock_bh(&orig_node->neigh_list_lock);
278 
279 	/* for all neighbors towards this originator ... */
280 	hlist_for_each_entry_safe(neigh_node, node, node_tmp,
281 				  &orig_node->neigh_list, list) {
282 
283 		if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) ||
284 		    (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
285 		    (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
286 		    (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
287 
288 			last_seen = neigh_node->last_seen;
289 
290 			if ((neigh_node->if_incoming->if_status ==
291 								IF_INACTIVE) ||
292 			    (neigh_node->if_incoming->if_status ==
293 							IF_NOT_IN_USE) ||
294 			    (neigh_node->if_incoming->if_status ==
295 							IF_TO_BE_REMOVED))
296 				bat_dbg(DBG_BATMAN, bat_priv,
297 					"neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
298 					orig_node->orig, neigh_node->addr,
299 					neigh_node->if_incoming->net_dev->name);
300 			else
301 				bat_dbg(DBG_BATMAN, bat_priv,
302 					"neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
303 					orig_node->orig, neigh_node->addr,
304 					jiffies_to_msecs(last_seen));
305 
306 			neigh_purged = true;
307 
308 			hlist_del_rcu(&neigh_node->list);
309 			bonding_candidate_del(orig_node, neigh_node);
310 			neigh_node_free_ref(neigh_node);
311 		} else {
312 			if ((!*best_neigh_node) ||
313 			    (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
314 				*best_neigh_node = neigh_node;
315 		}
316 	}
317 
318 	spin_unlock_bh(&orig_node->neigh_list_lock);
319 	return neigh_purged;
320 }
321 
322 static bool purge_orig_node(struct bat_priv *bat_priv,
323 			    struct orig_node *orig_node)
324 {
325 	struct neigh_node *best_neigh_node;
326 
327 	if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) {
328 		bat_dbg(DBG_BATMAN, bat_priv,
329 			"Originator timeout: originator %pM, last_seen %u\n",
330 			orig_node->orig,
331 			jiffies_to_msecs(orig_node->last_seen));
332 		return true;
333 	} else {
334 		if (purge_orig_neighbors(bat_priv, orig_node,
335 					 &best_neigh_node))
336 			update_route(bat_priv, orig_node, best_neigh_node);
337 	}
338 
339 	return false;
340 }
341 
342 static void _purge_orig(struct bat_priv *bat_priv)
343 {
344 	struct hashtable_t *hash = bat_priv->orig_hash;
345 	struct hlist_node *node, *node_tmp;
346 	struct hlist_head *head;
347 	spinlock_t *list_lock; /* spinlock to protect write access */
348 	struct orig_node *orig_node;
349 	uint32_t i;
350 
351 	if (!hash)
352 		return;
353 
354 	/* for all origins... */
355 	for (i = 0; i < hash->size; i++) {
356 		head = &hash->table[i];
357 		list_lock = &hash->list_locks[i];
358 
359 		spin_lock_bh(list_lock);
360 		hlist_for_each_entry_safe(orig_node, node, node_tmp,
361 					  head, hash_entry) {
362 			if (purge_orig_node(bat_priv, orig_node)) {
363 				if (orig_node->gw_flags)
364 					gw_node_delete(bat_priv, orig_node);
365 				hlist_del_rcu(node);
366 				orig_node_free_ref(orig_node);
367 				continue;
368 			}
369 
370 			if (has_timed_out(orig_node->last_frag_packet,
371 					  FRAG_TIMEOUT))
372 				frag_list_free(&orig_node->frag_list);
373 		}
374 		spin_unlock_bh(list_lock);
375 	}
376 
377 	gw_node_purge(bat_priv);
378 	gw_election(bat_priv);
379 }
380 
381 static void purge_orig(struct work_struct *work)
382 {
383 	struct delayed_work *delayed_work =
384 		container_of(work, struct delayed_work, work);
385 	struct bat_priv *bat_priv =
386 		container_of(delayed_work, struct bat_priv, orig_work);
387 
388 	_purge_orig(bat_priv);
389 	start_purge_timer(bat_priv);
390 }
391 
392 void purge_orig_ref(struct bat_priv *bat_priv)
393 {
394 	_purge_orig(bat_priv);
395 }
396 
397 int orig_seq_print_text(struct seq_file *seq, void *offset)
398 {
399 	struct net_device *net_dev = (struct net_device *)seq->private;
400 	struct bat_priv *bat_priv = netdev_priv(net_dev);
401 	struct hashtable_t *hash = bat_priv->orig_hash;
402 	struct hlist_node *node, *node_tmp;
403 	struct hlist_head *head;
404 	struct hard_iface *primary_if;
405 	struct orig_node *orig_node;
406 	struct neigh_node *neigh_node, *neigh_node_tmp;
407 	int batman_count = 0;
408 	int last_seen_secs;
409 	int last_seen_msecs;
410 	uint32_t i;
411 	int ret = 0;
412 
413 	primary_if = primary_if_get_selected(bat_priv);
414 
415 	if (!primary_if) {
416 		ret = seq_printf(seq,
417 				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
418 				 net_dev->name);
419 		goto out;
420 	}
421 
422 	if (primary_if->if_status != IF_ACTIVE) {
423 		ret = seq_printf(seq,
424 				 "BATMAN mesh %s disabled - primary interface not active\n",
425 				 net_dev->name);
426 		goto out;
427 	}
428 
429 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
430 		   SOURCE_VERSION, primary_if->net_dev->name,
431 		   primary_if->net_dev->dev_addr, net_dev->name);
432 	seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
433 		   "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
434 		   "outgoingIF", "Potential nexthops");
435 
436 	for (i = 0; i < hash->size; i++) {
437 		head = &hash->table[i];
438 
439 		rcu_read_lock();
440 		hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
441 			neigh_node = orig_node_get_router(orig_node);
442 			if (!neigh_node)
443 				continue;
444 
445 			if (neigh_node->tq_avg == 0)
446 				goto next;
447 
448 			last_seen_secs = jiffies_to_msecs(jiffies -
449 						orig_node->last_seen) / 1000;
450 			last_seen_msecs = jiffies_to_msecs(jiffies -
451 						orig_node->last_seen) % 1000;
452 
453 			seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
454 				   orig_node->orig, last_seen_secs,
455 				   last_seen_msecs, neigh_node->tq_avg,
456 				   neigh_node->addr,
457 				   neigh_node->if_incoming->net_dev->name);
458 
459 			hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
460 						 &orig_node->neigh_list, list) {
461 				seq_printf(seq, " %pM (%3i)",
462 					   neigh_node_tmp->addr,
463 					   neigh_node_tmp->tq_avg);
464 			}
465 
466 			seq_printf(seq, "\n");
467 			batman_count++;
468 
469 next:
470 			neigh_node_free_ref(neigh_node);
471 		}
472 		rcu_read_unlock();
473 	}
474 
475 	if (batman_count == 0)
476 		seq_printf(seq, "No batman nodes in range ...\n");
477 
478 out:
479 	if (primary_if)
480 		hardif_free_ref(primary_if);
481 	return ret;
482 }
483 
484 static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
485 {
486 	void *data_ptr;
487 
488 	data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
489 			   GFP_ATOMIC);
490 	if (!data_ptr)
491 		return -1;
492 
493 	memcpy(data_ptr, orig_node->bcast_own,
494 	       (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
495 	kfree(orig_node->bcast_own);
496 	orig_node->bcast_own = data_ptr;
497 
498 	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
499 	if (!data_ptr)
500 		return -1;
501 
502 	memcpy(data_ptr, orig_node->bcast_own_sum,
503 	       (max_if_num - 1) * sizeof(uint8_t));
504 	kfree(orig_node->bcast_own_sum);
505 	orig_node->bcast_own_sum = data_ptr;
506 
507 	return 0;
508 }
509 
510 int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
511 {
512 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
513 	struct hashtable_t *hash = bat_priv->orig_hash;
514 	struct hlist_node *node;
515 	struct hlist_head *head;
516 	struct orig_node *orig_node;
517 	uint32_t i;
518 	int ret;
519 
520 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
521 	 * if_num */
522 	for (i = 0; i < hash->size; i++) {
523 		head = &hash->table[i];
524 
525 		rcu_read_lock();
526 		hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
527 			spin_lock_bh(&orig_node->ogm_cnt_lock);
528 			ret = orig_node_add_if(orig_node, max_if_num);
529 			spin_unlock_bh(&orig_node->ogm_cnt_lock);
530 
531 			if (ret == -1)
532 				goto err;
533 		}
534 		rcu_read_unlock();
535 	}
536 
537 	return 0;
538 
539 err:
540 	rcu_read_unlock();
541 	return -ENOMEM;
542 }
543 
544 static int orig_node_del_if(struct orig_node *orig_node,
545 		     int max_if_num, int del_if_num)
546 {
547 	void *data_ptr = NULL;
548 	int chunk_size;
549 
550 	/* last interface was removed */
551 	if (max_if_num == 0)
552 		goto free_bcast_own;
553 
554 	chunk_size = sizeof(unsigned long) * NUM_WORDS;
555 	data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
556 	if (!data_ptr)
557 		return -1;
558 
559 	/* copy first part */
560 	memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
561 
562 	/* copy second part */
563 	memcpy((char *)data_ptr + del_if_num * chunk_size,
564 	       orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
565 	       (max_if_num - del_if_num) * chunk_size);
566 
567 free_bcast_own:
568 	kfree(orig_node->bcast_own);
569 	orig_node->bcast_own = data_ptr;
570 
571 	if (max_if_num == 0)
572 		goto free_own_sum;
573 
574 	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
575 	if (!data_ptr)
576 		return -1;
577 
578 	memcpy(data_ptr, orig_node->bcast_own_sum,
579 	       del_if_num * sizeof(uint8_t));
580 
581 	memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
582 	       orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
583 	       (max_if_num - del_if_num) * sizeof(uint8_t));
584 
585 free_own_sum:
586 	kfree(orig_node->bcast_own_sum);
587 	orig_node->bcast_own_sum = data_ptr;
588 
589 	return 0;
590 }
591 
592 int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
593 {
594 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
595 	struct hashtable_t *hash = bat_priv->orig_hash;
596 	struct hlist_node *node;
597 	struct hlist_head *head;
598 	struct hard_iface *hard_iface_tmp;
599 	struct orig_node *orig_node;
600 	uint32_t i;
601 	int ret;
602 
603 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
604 	 * if_num */
605 	for (i = 0; i < hash->size; i++) {
606 		head = &hash->table[i];
607 
608 		rcu_read_lock();
609 		hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
610 			spin_lock_bh(&orig_node->ogm_cnt_lock);
611 			ret = orig_node_del_if(orig_node, max_if_num,
612 					hard_iface->if_num);
613 			spin_unlock_bh(&orig_node->ogm_cnt_lock);
614 
615 			if (ret == -1)
616 				goto err;
617 		}
618 		rcu_read_unlock();
619 	}
620 
621 	/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
622 	rcu_read_lock();
623 	list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
624 		if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
625 			continue;
626 
627 		if (hard_iface == hard_iface_tmp)
628 			continue;
629 
630 		if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
631 			continue;
632 
633 		if (hard_iface_tmp->if_num > hard_iface->if_num)
634 			hard_iface_tmp->if_num--;
635 	}
636 	rcu_read_unlock();
637 
638 	hard_iface->if_num = -1;
639 	return 0;
640 
641 err:
642 	rcu_read_unlock();
643 	return -ENOMEM;
644 }
645