xref: /openbmc/linux/net/batman-adv/originator.c (revision 37be287c)
1 /* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "main.h"
19 #include "distributed-arp-table.h"
20 #include "originator.h"
21 #include "hash.h"
22 #include "translation-table.h"
23 #include "routing.h"
24 #include "gateway_client.h"
25 #include "hard-interface.h"
26 #include "soft-interface.h"
27 #include "bridge_loop_avoidance.h"
28 #include "network-coding.h"
29 #include "fragmentation.h"
30 
31 /* hash class keys */
32 static struct lock_class_key batadv_orig_hash_lock_class_key;
33 
34 static void batadv_purge_orig(struct work_struct *work);
35 
36 /* returns 1 if they are the same originator */
37 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
38 {
39 	const void *data1 = container_of(node, struct batadv_orig_node,
40 					 hash_entry);
41 
42 	return batadv_compare_eth(data1, data2);
43 }
44 
45 /**
46  * batadv_orig_node_vlan_get - get an orig_node_vlan object
47  * @orig_node: the originator serving the VLAN
48  * @vid: the VLAN identifier
49  *
50  * Returns the vlan object identified by vid and belonging to orig_node or NULL
51  * if it does not exist.
52  */
53 struct batadv_orig_node_vlan *
54 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
55 			  unsigned short vid)
56 {
57 	struct batadv_orig_node_vlan *vlan = NULL, *tmp;
58 
59 	rcu_read_lock();
60 	list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
61 		if (tmp->vid != vid)
62 			continue;
63 
64 		if (!atomic_inc_not_zero(&tmp->refcount))
65 			continue;
66 
67 		vlan = tmp;
68 
69 		break;
70 	}
71 	rcu_read_unlock();
72 
73 	return vlan;
74 }
75 
76 /**
77  * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
78  *  object
79  * @orig_node: the originator serving the VLAN
80  * @vid: the VLAN identifier
81  *
82  * Returns NULL in case of failure or the vlan object identified by vid and
83  * belonging to orig_node otherwise. The object is created and added to the list
84  * if it does not exist.
85  *
86  * The object is returned with refcounter increased by 1.
87  */
88 struct batadv_orig_node_vlan *
89 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
90 			  unsigned short vid)
91 {
92 	struct batadv_orig_node_vlan *vlan;
93 
94 	spin_lock_bh(&orig_node->vlan_list_lock);
95 
96 	/* first look if an object for this vid already exists */
97 	vlan = batadv_orig_node_vlan_get(orig_node, vid);
98 	if (vlan)
99 		goto out;
100 
101 	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
102 	if (!vlan)
103 		goto out;
104 
105 	atomic_set(&vlan->refcount, 2);
106 	vlan->vid = vid;
107 
108 	list_add_rcu(&vlan->list, &orig_node->vlan_list);
109 
110 out:
111 	spin_unlock_bh(&orig_node->vlan_list_lock);
112 
113 	return vlan;
114 }
115 
116 /**
117  * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
118  *  the originator-vlan object
119  * @orig_vlan: the originator-vlan object to release
120  */
121 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
122 {
123 	if (atomic_dec_and_test(&orig_vlan->refcount))
124 		kfree_rcu(orig_vlan, rcu);
125 }
126 
127 int batadv_originator_init(struct batadv_priv *bat_priv)
128 {
129 	if (bat_priv->orig_hash)
130 		return 0;
131 
132 	bat_priv->orig_hash = batadv_hash_new(1024);
133 
134 	if (!bat_priv->orig_hash)
135 		goto err;
136 
137 	batadv_hash_set_lock_class(bat_priv->orig_hash,
138 				   &batadv_orig_hash_lock_class_key);
139 
140 	INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
141 	queue_delayed_work(batadv_event_workqueue,
142 			   &bat_priv->orig_work,
143 			   msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
144 
145 	return 0;
146 
147 err:
148 	return -ENOMEM;
149 }
150 
151 /**
152  * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
153  * @rcu: rcu pointer of the neigh_ifinfo object
154  */
155 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
156 {
157 	struct batadv_neigh_ifinfo *neigh_ifinfo;
158 
159 	neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
160 
161 	if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
162 		batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
163 
164 	kfree(neigh_ifinfo);
165 }
166 
167 /**
168  * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
169  *  the neigh_ifinfo (without rcu callback)
170  * @neigh_ifinfo: the neigh_ifinfo object to release
171  */
172 static void
173 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
174 {
175 	if (atomic_dec_and_test(&neigh_ifinfo->refcount))
176 		batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
177 }
178 
179 /**
180  * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
181  *  the neigh_ifinfo
182  * @neigh_ifinfo: the neigh_ifinfo object to release
183  */
184 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
185 {
186 	if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187 		call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
188 }
189 
190 /**
191  * batadv_neigh_node_free_rcu - free the neigh_node
192  * @rcu: rcu pointer of the neigh_node
193  */
194 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
195 {
196 	struct hlist_node *node_tmp;
197 	struct batadv_neigh_node *neigh_node;
198 	struct batadv_neigh_ifinfo *neigh_ifinfo;
199 
200 	neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
201 
202 	hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
203 				  &neigh_node->ifinfo_list, list) {
204 		batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
205 	}
206 	batadv_hardif_free_ref_now(neigh_node->if_incoming);
207 
208 	kfree(neigh_node);
209 }
210 
211 /**
212  * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
213  *  and possibly free it (without rcu callback)
214  * @neigh_node: neigh neighbor to free
215  */
216 static void
217 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
218 {
219 	if (atomic_dec_and_test(&neigh_node->refcount))
220 		batadv_neigh_node_free_rcu(&neigh_node->rcu);
221 }
222 
223 /**
224  * batadv_neigh_node_free_ref - decrement the neighbors refcounter
225  *  and possibly free it
226  * @neigh_node: neigh neighbor to free
227  */
228 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
229 {
230 	if (atomic_dec_and_test(&neigh_node->refcount))
231 		call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
232 }
233 
234 /**
235  * batadv_orig_node_get_router - router to the originator depending on iface
236  * @orig_node: the orig node for the router
237  * @if_outgoing: the interface where the payload packet has been received or
238  *  the OGM should be sent to
239  *
240  * Returns the neighbor which should be router for this orig_node/iface.
241  *
242  * The object is returned with refcounter increased by 1.
243  */
244 struct batadv_neigh_node *
245 batadv_orig_router_get(struct batadv_orig_node *orig_node,
246 		       const struct batadv_hard_iface *if_outgoing)
247 {
248 	struct batadv_orig_ifinfo *orig_ifinfo;
249 	struct batadv_neigh_node *router = NULL;
250 
251 	rcu_read_lock();
252 	hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
253 		if (orig_ifinfo->if_outgoing != if_outgoing)
254 			continue;
255 
256 		router = rcu_dereference(orig_ifinfo->router);
257 		break;
258 	}
259 
260 	if (router && !atomic_inc_not_zero(&router->refcount))
261 		router = NULL;
262 
263 	rcu_read_unlock();
264 	return router;
265 }
266 
267 /**
268  * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
269  * @orig_node: the orig node to be queried
270  * @if_outgoing: the interface for which the ifinfo should be acquired
271  *
272  * Returns the requested orig_ifinfo or NULL if not found.
273  *
274  * The object is returned with refcounter increased by 1.
275  */
276 struct batadv_orig_ifinfo *
277 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
278 		       struct batadv_hard_iface *if_outgoing)
279 {
280 	struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
281 
282 	rcu_read_lock();
283 	hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
284 				 list) {
285 		if (tmp->if_outgoing != if_outgoing)
286 			continue;
287 
288 		if (!atomic_inc_not_zero(&tmp->refcount))
289 			continue;
290 
291 		orig_ifinfo = tmp;
292 		break;
293 	}
294 	rcu_read_unlock();
295 
296 	return orig_ifinfo;
297 }
298 
299 /**
300  * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
301  * @orig_node: the orig node to be queried
302  * @if_outgoing: the interface for which the ifinfo should be acquired
303  *
304  * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
305  * interface otherwise. The object is created and added to the list
306  * if it does not exist.
307  *
308  * The object is returned with refcounter increased by 1.
309  */
310 struct batadv_orig_ifinfo *
311 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
312 		       struct batadv_hard_iface *if_outgoing)
313 {
314 	struct batadv_orig_ifinfo *orig_ifinfo = NULL;
315 	unsigned long reset_time;
316 
317 	spin_lock_bh(&orig_node->neigh_list_lock);
318 
319 	orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
320 	if (orig_ifinfo)
321 		goto out;
322 
323 	orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
324 	if (!orig_ifinfo)
325 		goto out;
326 
327 	if (if_outgoing != BATADV_IF_DEFAULT &&
328 	    !atomic_inc_not_zero(&if_outgoing->refcount)) {
329 		kfree(orig_ifinfo);
330 		orig_ifinfo = NULL;
331 		goto out;
332 	}
333 
334 	reset_time = jiffies - 1;
335 	reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
336 	orig_ifinfo->batman_seqno_reset = reset_time;
337 	orig_ifinfo->if_outgoing = if_outgoing;
338 	INIT_HLIST_NODE(&orig_ifinfo->list);
339 	atomic_set(&orig_ifinfo->refcount, 2);
340 	hlist_add_head_rcu(&orig_ifinfo->list,
341 			   &orig_node->ifinfo_list);
342 out:
343 	spin_unlock_bh(&orig_node->neigh_list_lock);
344 	return orig_ifinfo;
345 }
346 
347 /**
348  * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
349  * @neigh_node: the neigh node to be queried
350  * @if_outgoing: the interface for which the ifinfo should be acquired
351  *
352  * The object is returned with refcounter increased by 1.
353  *
354  * Returns the requested neigh_ifinfo or NULL if not found
355  */
356 struct batadv_neigh_ifinfo *
357 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
358 			struct batadv_hard_iface *if_outgoing)
359 {
360 	struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
361 				   *tmp_neigh_ifinfo;
362 
363 	rcu_read_lock();
364 	hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
365 				 list) {
366 		if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
367 			continue;
368 
369 		if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
370 			continue;
371 
372 		neigh_ifinfo = tmp_neigh_ifinfo;
373 		break;
374 	}
375 	rcu_read_unlock();
376 
377 	return neigh_ifinfo;
378 }
379 
380 /**
381  * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
382  * @neigh_node: the neigh node to be queried
383  * @if_outgoing: the interface for which the ifinfo should be acquired
384  *
385  * Returns NULL in case of failure or the neigh_ifinfo object for the
386  * if_outgoing interface otherwise. The object is created and added to the list
387  * if it does not exist.
388  *
389  * The object is returned with refcounter increased by 1.
390  */
391 struct batadv_neigh_ifinfo *
392 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
393 			struct batadv_hard_iface *if_outgoing)
394 {
395 	struct batadv_neigh_ifinfo *neigh_ifinfo;
396 
397 	spin_lock_bh(&neigh->ifinfo_lock);
398 
399 	neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
400 	if (neigh_ifinfo)
401 		goto out;
402 
403 	neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
404 	if (!neigh_ifinfo)
405 		goto out;
406 
407 	if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
408 		kfree(neigh_ifinfo);
409 		neigh_ifinfo = NULL;
410 		goto out;
411 	}
412 
413 	INIT_HLIST_NODE(&neigh_ifinfo->list);
414 	atomic_set(&neigh_ifinfo->refcount, 2);
415 	neigh_ifinfo->if_outgoing = if_outgoing;
416 
417 	hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
418 
419 out:
420 	spin_unlock_bh(&neigh->ifinfo_lock);
421 
422 	return neigh_ifinfo;
423 }
424 
425 /**
426  * batadv_neigh_node_new - create and init a new neigh_node object
427  * @hard_iface: the interface where the neighbour is connected to
428  * @neigh_addr: the mac address of the neighbour interface
429  * @orig_node: originator object representing the neighbour
430  *
431  * Allocates a new neigh_node object and initialises all the generic fields.
432  * Returns the new object or NULL on failure.
433  */
434 struct batadv_neigh_node *
435 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
436 		      const uint8_t *neigh_addr,
437 		      struct batadv_orig_node *orig_node)
438 {
439 	struct batadv_neigh_node *neigh_node;
440 
441 	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
442 	if (!neigh_node)
443 		goto out;
444 
445 	INIT_HLIST_NODE(&neigh_node->list);
446 	INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
447 	spin_lock_init(&neigh_node->ifinfo_lock);
448 
449 	memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
450 	neigh_node->if_incoming = hard_iface;
451 	neigh_node->orig_node = orig_node;
452 
453 	/* extra reference for return */
454 	atomic_set(&neigh_node->refcount, 2);
455 
456 out:
457 	return neigh_node;
458 }
459 
460 /**
461  * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
462  * @rcu: rcu pointer of the orig_ifinfo object
463  */
464 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
465 {
466 	struct batadv_orig_ifinfo *orig_ifinfo;
467 
468 	orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
469 
470 	if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
471 		batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
472 
473 	kfree(orig_ifinfo);
474 }
475 
476 /**
477  * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
478  *  the orig_ifinfo (without rcu callback)
479  * @orig_ifinfo: the orig_ifinfo object to release
480  */
481 static void
482 batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
483 {
484 	if (atomic_dec_and_test(&orig_ifinfo->refcount))
485 		batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
486 }
487 
488 /**
489  * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
490  *  the orig_ifinfo
491  * @orig_ifinfo: the orig_ifinfo object to release
492  */
493 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
494 {
495 	if (atomic_dec_and_test(&orig_ifinfo->refcount))
496 		call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
497 }
498 
499 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
500 {
501 	struct hlist_node *node_tmp;
502 	struct batadv_neigh_node *neigh_node;
503 	struct batadv_orig_node *orig_node;
504 	struct batadv_orig_ifinfo *orig_ifinfo;
505 
506 	orig_node = container_of(rcu, struct batadv_orig_node, rcu);
507 
508 	spin_lock_bh(&orig_node->neigh_list_lock);
509 
510 	/* for all neighbors towards this originator ... */
511 	hlist_for_each_entry_safe(neigh_node, node_tmp,
512 				  &orig_node->neigh_list, list) {
513 		hlist_del_rcu(&neigh_node->list);
514 		batadv_neigh_node_free_ref_now(neigh_node);
515 	}
516 
517 	hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
518 				  &orig_node->ifinfo_list, list) {
519 		hlist_del_rcu(&orig_ifinfo->list);
520 		batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
521 	}
522 	spin_unlock_bh(&orig_node->neigh_list_lock);
523 
524 	/* Free nc_nodes */
525 	batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
526 
527 	batadv_frag_purge_orig(orig_node, NULL);
528 
529 	batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
530 				  "originator timed out");
531 
532 	if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
533 		orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
534 
535 	kfree(orig_node->tt_buff);
536 	kfree(orig_node);
537 }
538 
539 /**
540  * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
541  * schedule an rcu callback for freeing it
542  * @orig_node: the orig node to free
543  */
544 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
545 {
546 	if (atomic_dec_and_test(&orig_node->refcount))
547 		call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
548 }
549 
550 /**
551  * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
552  * possibly free it (without rcu callback)
553  * @orig_node: the orig node to free
554  */
555 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
556 {
557 	if (atomic_dec_and_test(&orig_node->refcount))
558 		batadv_orig_node_free_rcu(&orig_node->rcu);
559 }
560 
561 void batadv_originator_free(struct batadv_priv *bat_priv)
562 {
563 	struct batadv_hashtable *hash = bat_priv->orig_hash;
564 	struct hlist_node *node_tmp;
565 	struct hlist_head *head;
566 	spinlock_t *list_lock; /* spinlock to protect write access */
567 	struct batadv_orig_node *orig_node;
568 	uint32_t i;
569 
570 	if (!hash)
571 		return;
572 
573 	cancel_delayed_work_sync(&bat_priv->orig_work);
574 
575 	bat_priv->orig_hash = NULL;
576 
577 	for (i = 0; i < hash->size; i++) {
578 		head = &hash->table[i];
579 		list_lock = &hash->list_locks[i];
580 
581 		spin_lock_bh(list_lock);
582 		hlist_for_each_entry_safe(orig_node, node_tmp,
583 					  head, hash_entry) {
584 			hlist_del_rcu(&orig_node->hash_entry);
585 			batadv_orig_node_free_ref(orig_node);
586 		}
587 		spin_unlock_bh(list_lock);
588 	}
589 
590 	batadv_hash_destroy(hash);
591 }
592 
593 /**
594  * batadv_orig_node_new - creates a new orig_node
595  * @bat_priv: the bat priv with all the soft interface information
596  * @addr: the mac address of the originator
597  *
598  * Creates a new originator object and initialise all the generic fields.
599  * The new object is not added to the originator list.
600  * Returns the newly created object or NULL on failure.
601  */
602 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
603 					      const uint8_t *addr)
604 {
605 	struct batadv_orig_node *orig_node;
606 	struct batadv_orig_node_vlan *vlan;
607 	unsigned long reset_time;
608 	int i;
609 
610 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
611 		   "Creating new originator: %pM\n", addr);
612 
613 	orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
614 	if (!orig_node)
615 		return NULL;
616 
617 	INIT_HLIST_HEAD(&orig_node->neigh_list);
618 	INIT_LIST_HEAD(&orig_node->vlan_list);
619 	INIT_HLIST_HEAD(&orig_node->ifinfo_list);
620 	spin_lock_init(&orig_node->bcast_seqno_lock);
621 	spin_lock_init(&orig_node->neigh_list_lock);
622 	spin_lock_init(&orig_node->tt_buff_lock);
623 	spin_lock_init(&orig_node->tt_lock);
624 	spin_lock_init(&orig_node->vlan_list_lock);
625 
626 	batadv_nc_init_orig(orig_node);
627 
628 	/* extra reference for return */
629 	atomic_set(&orig_node->refcount, 2);
630 
631 	orig_node->tt_initialised = false;
632 	orig_node->bat_priv = bat_priv;
633 	memcpy(orig_node->orig, addr, ETH_ALEN);
634 	batadv_dat_init_orig_node_addr(orig_node);
635 	atomic_set(&orig_node->last_ttvn, 0);
636 	orig_node->tt_buff = NULL;
637 	orig_node->tt_buff_len = 0;
638 	reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
639 	orig_node->bcast_seqno_reset = reset_time;
640 
641 	/* create a vlan object for the "untagged" LAN */
642 	vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
643 	if (!vlan)
644 		goto free_orig_node;
645 	/* batadv_orig_node_vlan_new() increases the refcounter.
646 	 * Immediately release vlan since it is not needed anymore in this
647 	 * context
648 	 */
649 	batadv_orig_node_vlan_free_ref(vlan);
650 
651 	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
652 		INIT_HLIST_HEAD(&orig_node->fragments[i].head);
653 		spin_lock_init(&orig_node->fragments[i].lock);
654 		orig_node->fragments[i].size = 0;
655 	}
656 
657 	return orig_node;
658 free_orig_node:
659 	kfree(orig_node);
660 	return NULL;
661 }
662 
663 /**
664  * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
665  * @bat_priv: the bat priv with all the soft interface information
666  * @orig_node: orig node which is to be checked
667  *
668  * Returns true if any ifinfo entry was purged, false otherwise.
669  */
670 static bool
671 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
672 			 struct batadv_orig_node *orig_node)
673 {
674 	struct batadv_orig_ifinfo *orig_ifinfo;
675 	struct batadv_hard_iface *if_outgoing;
676 	struct hlist_node *node_tmp;
677 	bool ifinfo_purged = false;
678 
679 	spin_lock_bh(&orig_node->neigh_list_lock);
680 
681 	/* for all ifinfo objects for this originator */
682 	hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
683 				  &orig_node->ifinfo_list, list) {
684 		if_outgoing = orig_ifinfo->if_outgoing;
685 
686 		/* always keep the default interface */
687 		if (if_outgoing == BATADV_IF_DEFAULT)
688 			continue;
689 
690 		/* don't purge if the interface is not (going) down */
691 		if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
692 		    (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
693 		    (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
694 			continue;
695 
696 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
697 			   "router/ifinfo purge: originator %pM, iface: %s\n",
698 			   orig_node->orig, if_outgoing->net_dev->name);
699 
700 		ifinfo_purged = true;
701 
702 		hlist_del_rcu(&orig_ifinfo->list);
703 		batadv_orig_ifinfo_free_ref(orig_ifinfo);
704 		if (orig_node->last_bonding_candidate == orig_ifinfo) {
705 			orig_node->last_bonding_candidate = NULL;
706 			batadv_orig_ifinfo_free_ref(orig_ifinfo);
707 		}
708 	}
709 
710 	spin_unlock_bh(&orig_node->neigh_list_lock);
711 
712 	return ifinfo_purged;
713 }
714 
715 
716 /**
717  * batadv_purge_orig_neighbors - purges neighbors from originator
718  * @bat_priv: the bat priv with all the soft interface information
719  * @orig_node: orig node which is to be checked
720  *
721  * Returns true if any neighbor was purged, false otherwise
722  */
723 static bool
724 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
725 			    struct batadv_orig_node *orig_node)
726 {
727 	struct hlist_node *node_tmp;
728 	struct batadv_neigh_node *neigh_node;
729 	bool neigh_purged = false;
730 	unsigned long last_seen;
731 	struct batadv_hard_iface *if_incoming;
732 
733 	spin_lock_bh(&orig_node->neigh_list_lock);
734 
735 	/* for all neighbors towards this originator ... */
736 	hlist_for_each_entry_safe(neigh_node, node_tmp,
737 				  &orig_node->neigh_list, list) {
738 		last_seen = neigh_node->last_seen;
739 		if_incoming = neigh_node->if_incoming;
740 
741 		if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
742 		    (if_incoming->if_status == BATADV_IF_INACTIVE) ||
743 		    (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
744 		    (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
745 			if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
746 			    (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
747 			    (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
748 				batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
749 					   "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
750 					   orig_node->orig, neigh_node->addr,
751 					   if_incoming->net_dev->name);
752 			else
753 				batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
754 					   "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
755 					   orig_node->orig, neigh_node->addr,
756 					   jiffies_to_msecs(last_seen));
757 
758 			neigh_purged = true;
759 
760 			hlist_del_rcu(&neigh_node->list);
761 			batadv_neigh_node_free_ref(neigh_node);
762 		}
763 	}
764 
765 	spin_unlock_bh(&orig_node->neigh_list_lock);
766 	return neigh_purged;
767 }
768 
769 /**
770  * batadv_find_best_neighbor - finds the best neighbor after purging
771  * @bat_priv: the bat priv with all the soft interface information
772  * @orig_node: orig node which is to be checked
773  * @if_outgoing: the interface for which the metric should be compared
774  *
775  * Returns the current best neighbor, with refcount increased.
776  */
777 static struct batadv_neigh_node *
778 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
779 			  struct batadv_orig_node *orig_node,
780 			  struct batadv_hard_iface *if_outgoing)
781 {
782 	struct batadv_neigh_node *best = NULL, *neigh;
783 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
784 
785 	rcu_read_lock();
786 	hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
787 		if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
788 						best, if_outgoing) <= 0))
789 			continue;
790 
791 		if (!atomic_inc_not_zero(&neigh->refcount))
792 			continue;
793 
794 		if (best)
795 			batadv_neigh_node_free_ref(best);
796 
797 		best = neigh;
798 	}
799 	rcu_read_unlock();
800 
801 	return best;
802 }
803 
804 /**
805  * batadv_purge_orig_node - purges obsolete information from an orig_node
806  * @bat_priv: the bat priv with all the soft interface information
807  * @orig_node: orig node which is to be checked
808  *
809  * This function checks if the orig_node or substructures of it have become
810  * obsolete, and purges this information if that's the case.
811  *
812  * Returns true if the orig_node is to be removed, false otherwise.
813  */
814 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
815 				   struct batadv_orig_node *orig_node)
816 {
817 	struct batadv_neigh_node *best_neigh_node;
818 	struct batadv_hard_iface *hard_iface;
819 	bool changed;
820 
821 	if (batadv_has_timed_out(orig_node->last_seen,
822 				 2 * BATADV_PURGE_TIMEOUT)) {
823 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
824 			   "Originator timeout: originator %pM, last_seen %u\n",
825 			   orig_node->orig,
826 			   jiffies_to_msecs(orig_node->last_seen));
827 		return true;
828 	}
829 	changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
830 	changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
831 
832 	if (!changed)
833 		return false;
834 
835 	/* first for NULL ... */
836 	best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
837 						    BATADV_IF_DEFAULT);
838 	batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
839 			    best_neigh_node);
840 	if (best_neigh_node)
841 		batadv_neigh_node_free_ref(best_neigh_node);
842 
843 	/* ... then for all other interfaces. */
844 	rcu_read_lock();
845 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
846 		if (hard_iface->if_status != BATADV_IF_ACTIVE)
847 			continue;
848 
849 		if (hard_iface->soft_iface != bat_priv->soft_iface)
850 			continue;
851 
852 		best_neigh_node = batadv_find_best_neighbor(bat_priv,
853 							    orig_node,
854 							    hard_iface);
855 		batadv_update_route(bat_priv, orig_node, hard_iface,
856 				    best_neigh_node);
857 		if (best_neigh_node)
858 			batadv_neigh_node_free_ref(best_neigh_node);
859 	}
860 	rcu_read_unlock();
861 
862 	return false;
863 }
864 
865 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
866 {
867 	struct batadv_hashtable *hash = bat_priv->orig_hash;
868 	struct hlist_node *node_tmp;
869 	struct hlist_head *head;
870 	spinlock_t *list_lock; /* spinlock to protect write access */
871 	struct batadv_orig_node *orig_node;
872 	uint32_t i;
873 
874 	if (!hash)
875 		return;
876 
877 	/* for all origins... */
878 	for (i = 0; i < hash->size; i++) {
879 		head = &hash->table[i];
880 		list_lock = &hash->list_locks[i];
881 
882 		spin_lock_bh(list_lock);
883 		hlist_for_each_entry_safe(orig_node, node_tmp,
884 					  head, hash_entry) {
885 			if (batadv_purge_orig_node(bat_priv, orig_node)) {
886 				batadv_gw_node_delete(bat_priv, orig_node);
887 				hlist_del_rcu(&orig_node->hash_entry);
888 				batadv_orig_node_free_ref(orig_node);
889 				continue;
890 			}
891 
892 			batadv_frag_purge_orig(orig_node,
893 					       batadv_frag_check_entry);
894 		}
895 		spin_unlock_bh(list_lock);
896 	}
897 
898 	batadv_gw_node_purge(bat_priv);
899 	batadv_gw_election(bat_priv);
900 }
901 
902 static void batadv_purge_orig(struct work_struct *work)
903 {
904 	struct delayed_work *delayed_work;
905 	struct batadv_priv *bat_priv;
906 
907 	delayed_work = container_of(work, struct delayed_work, work);
908 	bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
909 	_batadv_purge_orig(bat_priv);
910 	queue_delayed_work(batadv_event_workqueue,
911 			   &bat_priv->orig_work,
912 			   msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
913 }
914 
915 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
916 {
917 	_batadv_purge_orig(bat_priv);
918 }
919 
920 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
921 {
922 	struct net_device *net_dev = (struct net_device *)seq->private;
923 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
924 	struct batadv_hard_iface *primary_if;
925 
926 	primary_if = batadv_seq_print_text_primary_if_get(seq);
927 	if (!primary_if)
928 		return 0;
929 
930 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
931 		   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
932 		   primary_if->net_dev->dev_addr, net_dev->name,
933 		   bat_priv->bat_algo_ops->name);
934 
935 	batadv_hardif_free_ref(primary_if);
936 
937 	if (!bat_priv->bat_algo_ops->bat_orig_print) {
938 		seq_puts(seq,
939 			 "No printing function for this routing protocol\n");
940 		return 0;
941 	}
942 
943 	bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
944 					       BATADV_IF_DEFAULT);
945 
946 	return 0;
947 }
948 
949 /**
950  * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
951  *  outgoing interface
952  * @seq: debugfs table seq_file struct
953  * @offset: not used
954  *
955  * Returns 0
956  */
957 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
958 {
959 	struct net_device *net_dev = (struct net_device *)seq->private;
960 	struct batadv_hard_iface *hard_iface;
961 	struct batadv_priv *bat_priv;
962 
963 	hard_iface = batadv_hardif_get_by_netdev(net_dev);
964 
965 	if (!hard_iface || !hard_iface->soft_iface) {
966 		seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
967 		goto out;
968 	}
969 
970 	bat_priv = netdev_priv(hard_iface->soft_iface);
971 	if (!bat_priv->bat_algo_ops->bat_orig_print) {
972 		seq_puts(seq,
973 			 "No printing function for this routing protocol\n");
974 		goto out;
975 	}
976 
977 	if (hard_iface->if_status != BATADV_IF_ACTIVE) {
978 		seq_puts(seq, "Interface not active\n");
979 		goto out;
980 	}
981 
982 	seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
983 		   BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
984 		   hard_iface->net_dev->dev_addr,
985 		   hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
986 
987 	bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
988 
989 out:
990 	batadv_hardif_free_ref(hard_iface);
991 	return 0;
992 }
993 
994 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
995 			    int max_if_num)
996 {
997 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
998 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
999 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1000 	struct hlist_head *head;
1001 	struct batadv_orig_node *orig_node;
1002 	uint32_t i;
1003 	int ret;
1004 
1005 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1006 	 * if_num
1007 	 */
1008 	for (i = 0; i < hash->size; i++) {
1009 		head = &hash->table[i];
1010 
1011 		rcu_read_lock();
1012 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1013 			ret = 0;
1014 			if (bao->bat_orig_add_if)
1015 				ret = bao->bat_orig_add_if(orig_node,
1016 							   max_if_num);
1017 			if (ret == -ENOMEM)
1018 				goto err;
1019 		}
1020 		rcu_read_unlock();
1021 	}
1022 
1023 	return 0;
1024 
1025 err:
1026 	rcu_read_unlock();
1027 	return -ENOMEM;
1028 }
1029 
1030 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1031 			    int max_if_num)
1032 {
1033 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1034 	struct batadv_hashtable *hash = bat_priv->orig_hash;
1035 	struct hlist_head *head;
1036 	struct batadv_hard_iface *hard_iface_tmp;
1037 	struct batadv_orig_node *orig_node;
1038 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1039 	uint32_t i;
1040 	int ret;
1041 
1042 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1043 	 * if_num
1044 	 */
1045 	for (i = 0; i < hash->size; i++) {
1046 		head = &hash->table[i];
1047 
1048 		rcu_read_lock();
1049 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1050 			ret = 0;
1051 			if (bao->bat_orig_del_if)
1052 				ret = bao->bat_orig_del_if(orig_node,
1053 							   max_if_num,
1054 							   hard_iface->if_num);
1055 			if (ret == -ENOMEM)
1056 				goto err;
1057 		}
1058 		rcu_read_unlock();
1059 	}
1060 
1061 	/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1062 	rcu_read_lock();
1063 	list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1064 		if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1065 			continue;
1066 
1067 		if (hard_iface == hard_iface_tmp)
1068 			continue;
1069 
1070 		if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1071 			continue;
1072 
1073 		if (hard_iface_tmp->if_num > hard_iface->if_num)
1074 			hard_iface_tmp->if_num--;
1075 	}
1076 	rcu_read_unlock();
1077 
1078 	hard_iface->if_num = -1;
1079 	return 0;
1080 
1081 err:
1082 	rcu_read_unlock();
1083 	return -ENOMEM;
1084 }
1085