1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2011-2018  B.A.T.M.A.N. contributors:
3  *
4  * Antonio Quartulli
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "distributed-arp-table.h"
20 #include "main.h"
21 
22 #include <linux/atomic.h>
23 #include <linux/bitops.h>
24 #include <linux/byteorder/generic.h>
25 #include <linux/errno.h>
26 #include <linux/etherdevice.h>
27 #include <linux/gfp.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/in.h>
32 #include <linux/jiffies.h>
33 #include <linux/kernel.h>
34 #include <linux/kref.h>
35 #include <linux/list.h>
36 #include <linux/netlink.h>
37 #include <linux/rculist.h>
38 #include <linux/rcupdate.h>
39 #include <linux/seq_file.h>
40 #include <linux/skbuff.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/stddef.h>
44 #include <linux/string.h>
45 #include <linux/workqueue.h>
46 #include <net/arp.h>
47 #include <net/genetlink.h>
48 #include <net/netlink.h>
49 #include <net/sock.h>
50 #include <uapi/linux/batman_adv.h>
51 
52 #include "bridge_loop_avoidance.h"
53 #include "hard-interface.h"
54 #include "hash.h"
55 #include "log.h"
56 #include "netlink.h"
57 #include "originator.h"
58 #include "send.h"
59 #include "soft-interface.h"
60 #include "translation-table.h"
61 #include "tvlv.h"
62 
63 static void batadv_dat_purge(struct work_struct *work);
64 
65 /**
66  * batadv_dat_start_timer() - initialise the DAT periodic worker
67  * @bat_priv: the bat priv with all the soft interface information
68  */
69 static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
70 {
71 	INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
72 	queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
73 			   msecs_to_jiffies(10000));
74 }
75 
76 /**
77  * batadv_dat_entry_release() - release dat_entry from lists and queue for free
78  *  after rcu grace period
79  * @ref: kref pointer of the dat_entry
80  */
81 static void batadv_dat_entry_release(struct kref *ref)
82 {
83 	struct batadv_dat_entry *dat_entry;
84 
85 	dat_entry = container_of(ref, struct batadv_dat_entry, refcount);
86 
87 	kfree_rcu(dat_entry, rcu);
88 }
89 
90 /**
91  * batadv_dat_entry_put() - decrement the dat_entry refcounter and possibly
92  *  release it
93  * @dat_entry: dat_entry to be free'd
94  */
95 static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
96 {
97 	kref_put(&dat_entry->refcount, batadv_dat_entry_release);
98 }
99 
100 /**
101  * batadv_dat_to_purge() - check whether a dat_entry has to be purged or not
102  * @dat_entry: the entry to check
103  *
104  * Return: true if the entry has to be purged now, false otherwise.
105  */
106 static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
107 {
108 	return batadv_has_timed_out(dat_entry->last_update,
109 				    BATADV_DAT_ENTRY_TIMEOUT);
110 }
111 
112 /**
113  * __batadv_dat_purge() - delete entries from the DAT local storage
114  * @bat_priv: the bat priv with all the soft interface information
115  * @to_purge: function in charge to decide whether an entry has to be purged or
116  *	      not. This function takes the dat_entry as argument and has to
117  *	      returns a boolean value: true is the entry has to be deleted,
118  *	      false otherwise
119  *
120  * Loops over each entry in the DAT local storage and deletes it if and only if
121  * the to_purge function passed as argument returns true.
122  */
123 static void __batadv_dat_purge(struct batadv_priv *bat_priv,
124 			       bool (*to_purge)(struct batadv_dat_entry *))
125 {
126 	spinlock_t *list_lock; /* protects write access to the hash lists */
127 	struct batadv_dat_entry *dat_entry;
128 	struct hlist_node *node_tmp;
129 	struct hlist_head *head;
130 	u32 i;
131 
132 	if (!bat_priv->dat.hash)
133 		return;
134 
135 	for (i = 0; i < bat_priv->dat.hash->size; i++) {
136 		head = &bat_priv->dat.hash->table[i];
137 		list_lock = &bat_priv->dat.hash->list_locks[i];
138 
139 		spin_lock_bh(list_lock);
140 		hlist_for_each_entry_safe(dat_entry, node_tmp, head,
141 					  hash_entry) {
142 			/* if a helper function has been passed as parameter,
143 			 * ask it if the entry has to be purged or not
144 			 */
145 			if (to_purge && !to_purge(dat_entry))
146 				continue;
147 
148 			hlist_del_rcu(&dat_entry->hash_entry);
149 			batadv_dat_entry_put(dat_entry);
150 		}
151 		spin_unlock_bh(list_lock);
152 	}
153 }
154 
155 /**
156  * batadv_dat_purge() - periodic task that deletes old entries from the local
157  *  DAT hash table
158  * @work: kernel work struct
159  */
160 static void batadv_dat_purge(struct work_struct *work)
161 {
162 	struct delayed_work *delayed_work;
163 	struct batadv_priv_dat *priv_dat;
164 	struct batadv_priv *bat_priv;
165 
166 	delayed_work = to_delayed_work(work);
167 	priv_dat = container_of(delayed_work, struct batadv_priv_dat, work);
168 	bat_priv = container_of(priv_dat, struct batadv_priv, dat);
169 
170 	__batadv_dat_purge(bat_priv, batadv_dat_to_purge);
171 	batadv_dat_start_timer(bat_priv);
172 }
173 
174 /**
175  * batadv_compare_dat() - comparing function used in the local DAT hash table
176  * @node: node in the local table
177  * @data2: second object to compare the node to
178  *
179  * Return: true if the two entries are the same, false otherwise.
180  */
181 static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
182 {
183 	const void *data1 = container_of(node, struct batadv_dat_entry,
184 					 hash_entry);
185 
186 	return memcmp(data1, data2, sizeof(__be32)) == 0;
187 }
188 
189 /**
190  * batadv_arp_hw_src() - extract the hw_src field from an ARP packet
191  * @skb: ARP packet
192  * @hdr_size: size of the possible header before the ARP packet
193  *
194  * Return: the value of the hw_src field in the ARP packet.
195  */
196 static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
197 {
198 	u8 *addr;
199 
200 	addr = (u8 *)(skb->data + hdr_size);
201 	addr += ETH_HLEN + sizeof(struct arphdr);
202 
203 	return addr;
204 }
205 
206 /**
207  * batadv_arp_ip_src() - extract the ip_src field from an ARP packet
208  * @skb: ARP packet
209  * @hdr_size: size of the possible header before the ARP packet
210  *
211  * Return: the value of the ip_src field in the ARP packet.
212  */
213 static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
214 {
215 	return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN);
216 }
217 
218 /**
219  * batadv_arp_hw_dst() - extract the hw_dst field from an ARP packet
220  * @skb: ARP packet
221  * @hdr_size: size of the possible header before the ARP packet
222  *
223  * Return: the value of the hw_dst field in the ARP packet.
224  */
225 static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
226 {
227 	return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
228 }
229 
230 /**
231  * batadv_arp_ip_dst() - extract the ip_dst field from an ARP packet
232  * @skb: ARP packet
233  * @hdr_size: size of the possible header before the ARP packet
234  *
235  * Return: the value of the ip_dst field in the ARP packet.
236  */
237 static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
238 {
239 	return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4);
240 }
241 
242 /**
243  * batadv_hash_dat() - compute the hash value for an IP address
244  * @data: data to hash
245  * @size: size of the hash table
246  *
247  * Return: the selected index in the hash table for the given data.
248  */
249 static u32 batadv_hash_dat(const void *data, u32 size)
250 {
251 	u32 hash = 0;
252 	const struct batadv_dat_entry *dat = data;
253 	const unsigned char *key;
254 	u32 i;
255 
256 	key = (const unsigned char *)&dat->ip;
257 	for (i = 0; i < sizeof(dat->ip); i++) {
258 		hash += key[i];
259 		hash += (hash << 10);
260 		hash ^= (hash >> 6);
261 	}
262 
263 	key = (const unsigned char *)&dat->vid;
264 	for (i = 0; i < sizeof(dat->vid); i++) {
265 		hash += key[i];
266 		hash += (hash << 10);
267 		hash ^= (hash >> 6);
268 	}
269 
270 	hash += (hash << 3);
271 	hash ^= (hash >> 11);
272 	hash += (hash << 15);
273 
274 	return hash % size;
275 }
276 
277 /**
278  * batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash
279  * table
280  * @bat_priv: the bat priv with all the soft interface information
281  * @ip: search key
282  * @vid: VLAN identifier
283  *
284  * Return: the dat_entry if found, NULL otherwise.
285  */
286 static struct batadv_dat_entry *
287 batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
288 			   unsigned short vid)
289 {
290 	struct hlist_head *head;
291 	struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
292 	struct batadv_hashtable *hash = bat_priv->dat.hash;
293 	u32 index;
294 
295 	if (!hash)
296 		return NULL;
297 
298 	to_find.ip = ip;
299 	to_find.vid = vid;
300 
301 	index = batadv_hash_dat(&to_find, hash->size);
302 	head = &hash->table[index];
303 
304 	rcu_read_lock();
305 	hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
306 		if (dat_entry->ip != ip)
307 			continue;
308 
309 		if (!kref_get_unless_zero(&dat_entry->refcount))
310 			continue;
311 
312 		dat_entry_tmp = dat_entry;
313 		break;
314 	}
315 	rcu_read_unlock();
316 
317 	return dat_entry_tmp;
318 }
319 
320 /**
321  * batadv_dat_entry_add() - add a new dat entry or update it if already exists
322  * @bat_priv: the bat priv with all the soft interface information
323  * @ip: ipv4 to add/edit
324  * @mac_addr: mac address to assign to the given ipv4
325  * @vid: VLAN identifier
326  */
327 static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
328 				 u8 *mac_addr, unsigned short vid)
329 {
330 	struct batadv_dat_entry *dat_entry;
331 	int hash_added;
332 
333 	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid);
334 	/* if this entry is already known, just update it */
335 	if (dat_entry) {
336 		if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
337 			ether_addr_copy(dat_entry->mac_addr, mac_addr);
338 		dat_entry->last_update = jiffies;
339 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
340 			   "Entry updated: %pI4 %pM (vid: %d)\n",
341 			   &dat_entry->ip, dat_entry->mac_addr,
342 			   batadv_print_vid(vid));
343 		goto out;
344 	}
345 
346 	dat_entry = kmalloc(sizeof(*dat_entry), GFP_ATOMIC);
347 	if (!dat_entry)
348 		goto out;
349 
350 	dat_entry->ip = ip;
351 	dat_entry->vid = vid;
352 	ether_addr_copy(dat_entry->mac_addr, mac_addr);
353 	dat_entry->last_update = jiffies;
354 	kref_init(&dat_entry->refcount);
355 
356 	kref_get(&dat_entry->refcount);
357 	hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
358 				     batadv_hash_dat, dat_entry,
359 				     &dat_entry->hash_entry);
360 
361 	if (unlikely(hash_added != 0)) {
362 		/* remove the reference for the hash */
363 		batadv_dat_entry_put(dat_entry);
364 		goto out;
365 	}
366 
367 	batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n",
368 		   &dat_entry->ip, dat_entry->mac_addr, batadv_print_vid(vid));
369 
370 out:
371 	if (dat_entry)
372 		batadv_dat_entry_put(dat_entry);
373 }
374 
375 #ifdef CONFIG_BATMAN_ADV_DEBUG
376 
377 /**
378  * batadv_dbg_arp() - print a debug message containing all the ARP packet
379  *  details
380  * @bat_priv: the bat priv with all the soft interface information
381  * @skb: ARP packet
382  * @hdr_size: size of the possible header before the ARP packet
383  * @msg: message to print together with the debugging information
384  */
385 static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
386 			   int hdr_size, char *msg)
387 {
388 	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
389 	struct batadv_bcast_packet *bcast_pkt;
390 	u8 *orig_addr;
391 	__be32 ip_src, ip_dst;
392 
393 	if (msg)
394 		batadv_dbg(BATADV_DBG_DAT, bat_priv, "%s\n", msg);
395 
396 	ip_src = batadv_arp_ip_src(skb, hdr_size);
397 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
398 	batadv_dbg(BATADV_DBG_DAT, bat_priv,
399 		   "ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]\n",
400 		   batadv_arp_hw_src(skb, hdr_size), &ip_src,
401 		   batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
402 
403 	if (hdr_size < sizeof(struct batadv_unicast_packet))
404 		return;
405 
406 	unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
407 
408 	switch (unicast_4addr_packet->u.packet_type) {
409 	case BATADV_UNICAST:
410 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
411 			   "* encapsulated within a UNICAST packet\n");
412 		break;
413 	case BATADV_UNICAST_4ADDR:
414 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
415 			   "* encapsulated within a UNICAST_4ADDR packet (src: %pM)\n",
416 			   unicast_4addr_packet->src);
417 		switch (unicast_4addr_packet->subtype) {
418 		case BATADV_P_DAT_DHT_PUT:
419 			batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_PUT\n");
420 			break;
421 		case BATADV_P_DAT_DHT_GET:
422 			batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_GET\n");
423 			break;
424 		case BATADV_P_DAT_CACHE_REPLY:
425 			batadv_dbg(BATADV_DBG_DAT, bat_priv,
426 				   "* type: DAT_CACHE_REPLY\n");
427 			break;
428 		case BATADV_P_DATA:
429 			batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DATA\n");
430 			break;
431 		default:
432 			batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
433 				   unicast_4addr_packet->u.packet_type);
434 		}
435 		break;
436 	case BATADV_BCAST:
437 		bcast_pkt = (struct batadv_bcast_packet *)unicast_4addr_packet;
438 		orig_addr = bcast_pkt->orig;
439 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
440 			   "* encapsulated within a BCAST packet (src: %pM)\n",
441 			   orig_addr);
442 		break;
443 	default:
444 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
445 			   "* encapsulated within an unknown packet type (0x%x)\n",
446 			   unicast_4addr_packet->u.packet_type);
447 	}
448 }
449 
450 #else
451 
452 static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
453 			   int hdr_size, char *msg)
454 {
455 }
456 
457 #endif /* CONFIG_BATMAN_ADV_DEBUG */
458 
459 /**
460  * batadv_is_orig_node_eligible() - check whether a node can be a DHT candidate
461  * @res: the array with the already selected candidates
462  * @select: number of already selected candidates
463  * @tmp_max: address of the currently evaluated node
464  * @max: current round max address
465  * @last_max: address of the last selected candidate
466  * @candidate: orig_node under evaluation
467  * @max_orig_node: last selected candidate
468  *
469  * Return: true if the node has been elected as next candidate or false
470  * otherwise.
471  */
472 static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
473 					 int select, batadv_dat_addr_t tmp_max,
474 					 batadv_dat_addr_t max,
475 					 batadv_dat_addr_t last_max,
476 					 struct batadv_orig_node *candidate,
477 					 struct batadv_orig_node *max_orig_node)
478 {
479 	bool ret = false;
480 	int j;
481 
482 	/* check if orig node candidate is running DAT */
483 	if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
484 		goto out;
485 
486 	/* Check if this node has already been selected... */
487 	for (j = 0; j < select; j++)
488 		if (res[j].orig_node == candidate)
489 			break;
490 	/* ..and possibly skip it */
491 	if (j < select)
492 		goto out;
493 	/* sanity check: has it already been selected? This should not happen */
494 	if (tmp_max > last_max)
495 		goto out;
496 	/* check if during this iteration an originator with a closer dht
497 	 * address has already been found
498 	 */
499 	if (tmp_max < max)
500 		goto out;
501 	/* this is an hash collision with the temporary selected node. Choose
502 	 * the one with the lowest address
503 	 */
504 	if (tmp_max == max && max_orig_node &&
505 	    batadv_compare_eth(candidate->orig, max_orig_node->orig))
506 		goto out;
507 
508 	ret = true;
509 out:
510 	return ret;
511 }
512 
513 /**
514  * batadv_choose_next_candidate() - select the next DHT candidate
515  * @bat_priv: the bat priv with all the soft interface information
516  * @cands: candidates array
517  * @select: number of candidates already present in the array
518  * @ip_key: key to look up in the DHT
519  * @last_max: pointer where the address of the selected candidate will be saved
520  */
521 static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
522 					 struct batadv_dat_candidate *cands,
523 					 int select, batadv_dat_addr_t ip_key,
524 					 batadv_dat_addr_t *last_max)
525 {
526 	batadv_dat_addr_t max = 0;
527 	batadv_dat_addr_t tmp_max = 0;
528 	struct batadv_orig_node *orig_node, *max_orig_node = NULL;
529 	struct batadv_hashtable *hash = bat_priv->orig_hash;
530 	struct hlist_head *head;
531 	int i;
532 
533 	/* if no node is eligible as candidate, leave the candidate type as
534 	 * NOT_FOUND
535 	 */
536 	cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND;
537 
538 	/* iterate over the originator list and find the node with the closest
539 	 * dat_address which has not been selected yet
540 	 */
541 	for (i = 0; i < hash->size; i++) {
542 		head = &hash->table[i];
543 
544 		rcu_read_lock();
545 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
546 			/* the dht space is a ring using unsigned addresses */
547 			tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
548 				  ip_key;
549 
550 			if (!batadv_is_orig_node_eligible(cands, select,
551 							  tmp_max, max,
552 							  *last_max, orig_node,
553 							  max_orig_node))
554 				continue;
555 
556 			if (!kref_get_unless_zero(&orig_node->refcount))
557 				continue;
558 
559 			max = tmp_max;
560 			if (max_orig_node)
561 				batadv_orig_node_put(max_orig_node);
562 			max_orig_node = orig_node;
563 		}
564 		rcu_read_unlock();
565 	}
566 	if (max_orig_node) {
567 		cands[select].type = BATADV_DAT_CANDIDATE_ORIG;
568 		cands[select].orig_node = max_orig_node;
569 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
570 			   "dat_select_candidates() %d: selected %pM addr=%u dist=%u\n",
571 			   select, max_orig_node->orig, max_orig_node->dat_addr,
572 			   max);
573 	}
574 	*last_max = max;
575 }
576 
577 /**
578  * batadv_dat_select_candidates() - select the nodes which the DHT message has
579  *  to be sent to
580  * @bat_priv: the bat priv with all the soft interface information
581  * @ip_dst: ipv4 to look up in the DHT
582  * @vid: VLAN identifier
583  *
584  * An originator O is selected if and only if its DHT_ID value is one of three
585  * closest values (from the LEFT, with wrap around if needed) then the hash
586  * value of the key. ip_dst is the key.
587  *
588  * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
589  */
590 static struct batadv_dat_candidate *
591 batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
592 			     unsigned short vid)
593 {
594 	int select;
595 	batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
596 	struct batadv_dat_candidate *res;
597 	struct batadv_dat_entry dat;
598 
599 	if (!bat_priv->orig_hash)
600 		return NULL;
601 
602 	res = kmalloc_array(BATADV_DAT_CANDIDATES_NUM, sizeof(*res),
603 			    GFP_ATOMIC);
604 	if (!res)
605 		return NULL;
606 
607 	dat.ip = ip_dst;
608 	dat.vid = vid;
609 	ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
610 						    BATADV_DAT_ADDR_MAX);
611 
612 	batadv_dbg(BATADV_DBG_DAT, bat_priv,
613 		   "%s(): IP=%pI4 hash(IP)=%u\n", __func__, &ip_dst,
614 		   ip_key);
615 
616 	for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++)
617 		batadv_choose_next_candidate(bat_priv, res, select, ip_key,
618 					     &last_max);
619 
620 	return res;
621 }
622 
623 /**
624  * batadv_dat_send_data() - send a payload to the selected candidates
625  * @bat_priv: the bat priv with all the soft interface information
626  * @skb: payload to send
627  * @ip: the DHT key
628  * @vid: VLAN identifier
629  * @packet_subtype: unicast4addr packet subtype to use
630  *
631  * This function copies the skb with pskb_copy() and is sent as unicast packet
632  * to each of the selected candidates.
633  *
634  * Return: true if the packet is sent to at least one candidate, false
635  * otherwise.
636  */
637 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
638 				 struct sk_buff *skb, __be32 ip,
639 				 unsigned short vid, int packet_subtype)
640 {
641 	int i;
642 	bool ret = false;
643 	int send_status;
644 	struct batadv_neigh_node *neigh_node = NULL;
645 	struct sk_buff *tmp_skb;
646 	struct batadv_dat_candidate *cand;
647 
648 	cand = batadv_dat_select_candidates(bat_priv, ip, vid);
649 	if (!cand)
650 		goto out;
651 
652 	batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
653 
654 	for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) {
655 		if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND)
656 			continue;
657 
658 		neigh_node = batadv_orig_router_get(cand[i].orig_node,
659 						    BATADV_IF_DEFAULT);
660 		if (!neigh_node)
661 			goto free_orig;
662 
663 		tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
664 		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
665 							   cand[i].orig_node,
666 							   packet_subtype)) {
667 			kfree_skb(tmp_skb);
668 			goto free_neigh;
669 		}
670 
671 		send_status = batadv_send_unicast_skb(tmp_skb, neigh_node);
672 		if (send_status == NET_XMIT_SUCCESS) {
673 			/* count the sent packet */
674 			switch (packet_subtype) {
675 			case BATADV_P_DAT_DHT_GET:
676 				batadv_inc_counter(bat_priv,
677 						   BATADV_CNT_DAT_GET_TX);
678 				break;
679 			case BATADV_P_DAT_DHT_PUT:
680 				batadv_inc_counter(bat_priv,
681 						   BATADV_CNT_DAT_PUT_TX);
682 				break;
683 			}
684 
685 			/* packet sent to a candidate: return true */
686 			ret = true;
687 		}
688 free_neigh:
689 		batadv_neigh_node_put(neigh_node);
690 free_orig:
691 		batadv_orig_node_put(cand[i].orig_node);
692 	}
693 
694 out:
695 	kfree(cand);
696 	return ret;
697 }
698 
699 /**
700  * batadv_dat_tvlv_container_update() - update the dat tvlv container after dat
701  *  setting change
702  * @bat_priv: the bat priv with all the soft interface information
703  */
704 static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
705 {
706 	char dat_mode;
707 
708 	dat_mode = atomic_read(&bat_priv->distributed_arp_table);
709 
710 	switch (dat_mode) {
711 	case 0:
712 		batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
713 		break;
714 	case 1:
715 		batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1,
716 					       NULL, 0);
717 		break;
718 	}
719 }
720 
721 /**
722  * batadv_dat_status_update() - update the dat tvlv container after dat
723  *  setting change
724  * @net_dev: the soft interface net device
725  */
726 void batadv_dat_status_update(struct net_device *net_dev)
727 {
728 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
729 
730 	batadv_dat_tvlv_container_update(bat_priv);
731 }
732 
733 /**
734  * batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container
735  * @bat_priv: the bat priv with all the soft interface information
736  * @orig: the orig_node of the ogm
737  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
738  * @tvlv_value: tvlv buffer containing the gateway data
739  * @tvlv_value_len: tvlv buffer length
740  */
741 static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
742 					   struct batadv_orig_node *orig,
743 					   u8 flags,
744 					   void *tvlv_value, u16 tvlv_value_len)
745 {
746 	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
747 		clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
748 	else
749 		set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
750 }
751 
752 /**
753  * batadv_dat_hash_free() - free the local DAT hash table
754  * @bat_priv: the bat priv with all the soft interface information
755  */
756 static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
757 {
758 	if (!bat_priv->dat.hash)
759 		return;
760 
761 	__batadv_dat_purge(bat_priv, NULL);
762 
763 	batadv_hash_destroy(bat_priv->dat.hash);
764 
765 	bat_priv->dat.hash = NULL;
766 }
767 
768 /**
769  * batadv_dat_init() - initialise the DAT internals
770  * @bat_priv: the bat priv with all the soft interface information
771  *
772  * Return: 0 in case of success, a negative error code otherwise
773  */
774 int batadv_dat_init(struct batadv_priv *bat_priv)
775 {
776 	if (bat_priv->dat.hash)
777 		return 0;
778 
779 	bat_priv->dat.hash = batadv_hash_new(1024);
780 
781 	if (!bat_priv->dat.hash)
782 		return -ENOMEM;
783 
784 	batadv_dat_start_timer(bat_priv);
785 
786 	batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
787 				     NULL, BATADV_TVLV_DAT, 1,
788 				     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
789 	batadv_dat_tvlv_container_update(bat_priv);
790 	return 0;
791 }
792 
793 /**
794  * batadv_dat_free() - free the DAT internals
795  * @bat_priv: the bat priv with all the soft interface information
796  */
797 void batadv_dat_free(struct batadv_priv *bat_priv)
798 {
799 	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
800 	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1);
801 
802 	cancel_delayed_work_sync(&bat_priv->dat.work);
803 
804 	batadv_dat_hash_free(bat_priv);
805 }
806 
807 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
808 /**
809  * batadv_dat_cache_seq_print_text() - print the local DAT hash table
810  * @seq: seq file to print on
811  * @offset: not used
812  *
813  * Return: always 0
814  */
815 int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
816 {
817 	struct net_device *net_dev = (struct net_device *)seq->private;
818 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
819 	struct batadv_hashtable *hash = bat_priv->dat.hash;
820 	struct batadv_dat_entry *dat_entry;
821 	struct batadv_hard_iface *primary_if;
822 	struct hlist_head *head;
823 	unsigned long last_seen_jiffies;
824 	int last_seen_msecs, last_seen_secs, last_seen_mins;
825 	u32 i;
826 
827 	primary_if = batadv_seq_print_text_primary_if_get(seq);
828 	if (!primary_if)
829 		goto out;
830 
831 	seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
832 	seq_puts(seq,
833 		 "          IPv4             MAC        VID   last-seen\n");
834 
835 	for (i = 0; i < hash->size; i++) {
836 		head = &hash->table[i];
837 
838 		rcu_read_lock();
839 		hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
840 			last_seen_jiffies = jiffies - dat_entry->last_update;
841 			last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
842 			last_seen_mins = last_seen_msecs / 60000;
843 			last_seen_msecs = last_seen_msecs % 60000;
844 			last_seen_secs = last_seen_msecs / 1000;
845 
846 			seq_printf(seq, " * %15pI4 %pM %4i %6i:%02i\n",
847 				   &dat_entry->ip, dat_entry->mac_addr,
848 				   batadv_print_vid(dat_entry->vid),
849 				   last_seen_mins, last_seen_secs);
850 		}
851 		rcu_read_unlock();
852 	}
853 
854 out:
855 	if (primary_if)
856 		batadv_hardif_put(primary_if);
857 	return 0;
858 }
859 #endif
860 
861 /**
862  * batadv_dat_cache_dump_entry() - dump one entry of the DAT cache table to a
863  *  netlink socket
864  * @msg: buffer for the message
865  * @portid: netlink port
866  * @cb: Control block containing additional options
867  * @dat_entry: entry to dump
868  *
869  * Return: 0 or error code.
870  */
871 static int
872 batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid,
873 			    struct netlink_callback *cb,
874 			    struct batadv_dat_entry *dat_entry)
875 {
876 	int msecs;
877 	void *hdr;
878 
879 	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
880 			  &batadv_netlink_family, NLM_F_MULTI,
881 			  BATADV_CMD_GET_DAT_CACHE);
882 	if (!hdr)
883 		return -ENOBUFS;
884 
885 	genl_dump_check_consistent(cb, hdr);
886 
887 	msecs = jiffies_to_msecs(jiffies - dat_entry->last_update);
888 
889 	if (nla_put_in_addr(msg, BATADV_ATTR_DAT_CACHE_IP4ADDRESS,
890 			    dat_entry->ip) ||
891 	    nla_put(msg, BATADV_ATTR_DAT_CACHE_HWADDRESS, ETH_ALEN,
892 		    dat_entry->mac_addr) ||
893 	    nla_put_u16(msg, BATADV_ATTR_DAT_CACHE_VID, dat_entry->vid) ||
894 	    nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
895 		genlmsg_cancel(msg, hdr);
896 		return -EMSGSIZE;
897 	}
898 
899 	genlmsg_end(msg, hdr);
900 	return 0;
901 }
902 
903 /**
904  * batadv_dat_cache_dump_bucket() - dump one bucket of the DAT cache table to
905  *  a netlink socket
906  * @msg: buffer for the message
907  * @portid: netlink port
908  * @cb: Control block containing additional options
909  * @hash: hash to dump
910  * @bucket: bucket index to dump
911  * @idx_skip: How many entries to skip
912  *
913  * Return: 0 or error code.
914  */
915 static int
916 batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid,
917 			     struct netlink_callback *cb,
918 			     struct batadv_hashtable *hash, unsigned int bucket,
919 			     int *idx_skip)
920 {
921 	struct batadv_dat_entry *dat_entry;
922 	int idx = 0;
923 
924 	spin_lock_bh(&hash->list_locks[bucket]);
925 	cb->seq = atomic_read(&hash->generation) << 1 | 1;
926 
927 	hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) {
928 		if (idx < *idx_skip)
929 			goto skip;
930 
931 		if (batadv_dat_cache_dump_entry(msg, portid, cb, dat_entry)) {
932 			spin_unlock_bh(&hash->list_locks[bucket]);
933 			*idx_skip = idx;
934 
935 			return -EMSGSIZE;
936 		}
937 
938 skip:
939 		idx++;
940 	}
941 	spin_unlock_bh(&hash->list_locks[bucket]);
942 
943 	return 0;
944 }
945 
946 /**
947  * batadv_dat_cache_dump() - dump DAT cache table to a netlink socket
948  * @msg: buffer for the message
949  * @cb: callback structure containing arguments
950  *
951  * Return: message length.
952  */
953 int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
954 {
955 	struct batadv_hard_iface *primary_if = NULL;
956 	int portid = NETLINK_CB(cb->skb).portid;
957 	struct net *net = sock_net(cb->skb->sk);
958 	struct net_device *soft_iface;
959 	struct batadv_hashtable *hash;
960 	struct batadv_priv *bat_priv;
961 	int bucket = cb->args[0];
962 	int idx = cb->args[1];
963 	int ifindex;
964 	int ret = 0;
965 
966 	ifindex = batadv_netlink_get_ifindex(cb->nlh,
967 					     BATADV_ATTR_MESH_IFINDEX);
968 	if (!ifindex)
969 		return -EINVAL;
970 
971 	soft_iface = dev_get_by_index(net, ifindex);
972 	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
973 		ret = -ENODEV;
974 		goto out;
975 	}
976 
977 	bat_priv = netdev_priv(soft_iface);
978 	hash = bat_priv->dat.hash;
979 
980 	primary_if = batadv_primary_if_get_selected(bat_priv);
981 	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
982 		ret = -ENOENT;
983 		goto out;
984 	}
985 
986 	while (bucket < hash->size) {
987 		if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket,
988 						 &idx))
989 			break;
990 
991 		bucket++;
992 		idx = 0;
993 	}
994 
995 	cb->args[0] = bucket;
996 	cb->args[1] = idx;
997 
998 	ret = msg->len;
999 
1000 out:
1001 	if (primary_if)
1002 		batadv_hardif_put(primary_if);
1003 
1004 	if (soft_iface)
1005 		dev_put(soft_iface);
1006 
1007 	return ret;
1008 }
1009 
1010 /**
1011  * batadv_arp_get_type() - parse an ARP packet and gets the type
1012  * @bat_priv: the bat priv with all the soft interface information
1013  * @skb: packet to analyse
1014  * @hdr_size: size of the possible header before the ARP packet in the skb
1015  *
1016  * Return: the ARP type if the skb contains a valid ARP packet, 0 otherwise.
1017  */
1018 static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
1019 			       struct sk_buff *skb, int hdr_size)
1020 {
1021 	struct arphdr *arphdr;
1022 	struct ethhdr *ethhdr;
1023 	__be32 ip_src, ip_dst;
1024 	u8 *hw_src, *hw_dst;
1025 	u16 type = 0;
1026 
1027 	/* pull the ethernet header */
1028 	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
1029 		goto out;
1030 
1031 	ethhdr = (struct ethhdr *)(skb->data + hdr_size);
1032 
1033 	if (ethhdr->h_proto != htons(ETH_P_ARP))
1034 		goto out;
1035 
1036 	/* pull the ARP payload */
1037 	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN +
1038 				    arp_hdr_len(skb->dev))))
1039 		goto out;
1040 
1041 	arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN);
1042 
1043 	/* check whether the ARP packet carries a valid IP information */
1044 	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1045 		goto out;
1046 
1047 	if (arphdr->ar_pro != htons(ETH_P_IP))
1048 		goto out;
1049 
1050 	if (arphdr->ar_hln != ETH_ALEN)
1051 		goto out;
1052 
1053 	if (arphdr->ar_pln != 4)
1054 		goto out;
1055 
1056 	/* Check for bad reply/request. If the ARP message is not sane, DAT
1057 	 * will simply ignore it
1058 	 */
1059 	ip_src = batadv_arp_ip_src(skb, hdr_size);
1060 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
1061 	if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
1062 	    ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
1063 	    ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
1064 	    ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
1065 		goto out;
1066 
1067 	hw_src = batadv_arp_hw_src(skb, hdr_size);
1068 	if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
1069 		goto out;
1070 
1071 	/* don't care about the destination MAC address in ARP requests */
1072 	if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
1073 		hw_dst = batadv_arp_hw_dst(skb, hdr_size);
1074 		if (is_zero_ether_addr(hw_dst) ||
1075 		    is_multicast_ether_addr(hw_dst))
1076 			goto out;
1077 	}
1078 
1079 	type = ntohs(arphdr->ar_op);
1080 out:
1081 	return type;
1082 }
1083 
1084 /**
1085  * batadv_dat_get_vid() - extract the VLAN identifier from skb if any
1086  * @skb: the buffer containing the packet to extract the VID from
1087  * @hdr_size: the size of the batman-adv header encapsulating the packet
1088  *
1089  * Return: If the packet embedded in the skb is vlan tagged this function
1090  * returns the VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS
1091  * is returned.
1092  */
1093 static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
1094 {
1095 	unsigned short vid;
1096 
1097 	vid = batadv_get_vid(skb, *hdr_size);
1098 
1099 	/* ARP parsing functions jump forward of hdr_size + ETH_HLEN.
1100 	 * If the header contained in the packet is a VLAN one (which is longer)
1101 	 * hdr_size is updated so that the functions will still skip the
1102 	 * correct amount of bytes.
1103 	 */
1104 	if (vid & BATADV_VLAN_HAS_TAG)
1105 		*hdr_size += VLAN_HLEN;
1106 
1107 	return vid;
1108 }
1109 
1110 /**
1111  * batadv_dat_arp_create_reply() - create an ARP Reply
1112  * @bat_priv: the bat priv with all the soft interface information
1113  * @ip_src: ARP sender IP
1114  * @ip_dst: ARP target IP
1115  * @hw_src: Ethernet source and ARP sender MAC
1116  * @hw_dst: Ethernet destination and ARP target MAC
1117  * @vid: VLAN identifier (optional, set to zero otherwise)
1118  *
1119  * Creates an ARP Reply from the given values, optionally encapsulated in a
1120  * VLAN header.
1121  *
1122  * Return: An skb containing an ARP Reply.
1123  */
1124 static struct sk_buff *
1125 batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
1126 			    __be32 ip_dst, u8 *hw_src, u8 *hw_dst,
1127 			    unsigned short vid)
1128 {
1129 	struct sk_buff *skb;
1130 
1131 	skb = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_dst, bat_priv->soft_iface,
1132 			 ip_src, hw_dst, hw_src, hw_dst);
1133 	if (!skb)
1134 		return NULL;
1135 
1136 	skb_reset_mac_header(skb);
1137 
1138 	if (vid & BATADV_VLAN_HAS_TAG)
1139 		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
1140 				      vid & VLAN_VID_MASK);
1141 
1142 	return skb;
1143 }
1144 
1145 /**
1146  * batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to
1147  * answer using DAT
1148  * @bat_priv: the bat priv with all the soft interface information
1149  * @skb: packet to check
1150  *
1151  * Return: true if the message has been sent to the dht candidates, false
1152  * otherwise. In case of a positive return value the message has to be enqueued
1153  * to permit the fallback.
1154  */
1155 bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
1156 					   struct sk_buff *skb)
1157 {
1158 	u16 type = 0;
1159 	__be32 ip_dst, ip_src;
1160 	u8 *hw_src;
1161 	bool ret = false;
1162 	struct batadv_dat_entry *dat_entry = NULL;
1163 	struct sk_buff *skb_new;
1164 	struct net_device *soft_iface = bat_priv->soft_iface;
1165 	int hdr_size = 0;
1166 	unsigned short vid;
1167 
1168 	if (!atomic_read(&bat_priv->distributed_arp_table))
1169 		goto out;
1170 
1171 	vid = batadv_dat_get_vid(skb, &hdr_size);
1172 
1173 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
1174 	/* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
1175 	 * message to the selected DHT candidates
1176 	 */
1177 	if (type != ARPOP_REQUEST)
1178 		goto out;
1179 
1180 	batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing outgoing ARP REQUEST");
1181 
1182 	ip_src = batadv_arp_ip_src(skb, hdr_size);
1183 	hw_src = batadv_arp_hw_src(skb, hdr_size);
1184 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
1185 
1186 	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
1187 
1188 	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
1189 	if (dat_entry) {
1190 		/* If the ARP request is destined for a local client the local
1191 		 * client will answer itself. DAT would only generate a
1192 		 * duplicate packet.
1193 		 *
1194 		 * Moreover, if the soft-interface is enslaved into a bridge, an
1195 		 * additional DAT answer may trigger kernel warnings about
1196 		 * a packet coming from the wrong port.
1197 		 */
1198 		if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
1199 			ret = true;
1200 			goto out;
1201 		}
1202 
1203 		/* If BLA is enabled, only send ARP replies if we have claimed
1204 		 * the destination for the ARP request or if no one else of
1205 		 * the backbone gws belonging to our backbone has claimed the
1206 		 * destination.
1207 		 */
1208 		if (!batadv_bla_check_claim(bat_priv,
1209 					    dat_entry->mac_addr, vid)) {
1210 			batadv_dbg(BATADV_DBG_DAT, bat_priv,
1211 				   "Device %pM claimed by another backbone gw. Don't send ARP reply!",
1212 				   dat_entry->mac_addr);
1213 			ret = true;
1214 			goto out;
1215 		}
1216 
1217 		skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
1218 						      dat_entry->mac_addr,
1219 						      hw_src, vid);
1220 		if (!skb_new)
1221 			goto out;
1222 
1223 		skb_new->protocol = eth_type_trans(skb_new, soft_iface);
1224 
1225 		batadv_inc_counter(bat_priv, BATADV_CNT_RX);
1226 		batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
1227 				   skb->len + ETH_HLEN + hdr_size);
1228 
1229 		netif_rx(skb_new);
1230 		batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
1231 		ret = true;
1232 	} else {
1233 		/* Send the request to the DHT */
1234 		ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
1235 					   BATADV_P_DAT_DHT_GET);
1236 	}
1237 out:
1238 	if (dat_entry)
1239 		batadv_dat_entry_put(dat_entry);
1240 	return ret;
1241 }
1242 
1243 /**
1244  * batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to
1245  * answer using the local DAT storage
1246  * @bat_priv: the bat priv with all the soft interface information
1247  * @skb: packet to check
1248  * @hdr_size: size of the encapsulation header
1249  *
1250  * Return: true if the request has been answered, false otherwise.
1251  */
1252 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
1253 					   struct sk_buff *skb, int hdr_size)
1254 {
1255 	u16 type;
1256 	__be32 ip_src, ip_dst;
1257 	u8 *hw_src;
1258 	struct sk_buff *skb_new;
1259 	struct batadv_dat_entry *dat_entry = NULL;
1260 	bool ret = false;
1261 	unsigned short vid;
1262 	int err;
1263 
1264 	if (!atomic_read(&bat_priv->distributed_arp_table))
1265 		goto out;
1266 
1267 	vid = batadv_dat_get_vid(skb, &hdr_size);
1268 
1269 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
1270 	if (type != ARPOP_REQUEST)
1271 		goto out;
1272 
1273 	hw_src = batadv_arp_hw_src(skb, hdr_size);
1274 	ip_src = batadv_arp_ip_src(skb, hdr_size);
1275 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
1276 
1277 	batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing incoming ARP REQUEST");
1278 
1279 	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
1280 
1281 	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
1282 	if (!dat_entry)
1283 		goto out;
1284 
1285 	skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
1286 					      dat_entry->mac_addr, hw_src, vid);
1287 	if (!skb_new)
1288 		goto out;
1289 
1290 	/* To preserve backwards compatibility, the node has choose the outgoing
1291 	 * format based on the incoming request packet type. The assumption is
1292 	 * that a node not using the 4addr packet format doesn't support it.
1293 	 */
1294 	if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
1295 		err = batadv_send_skb_via_tt_4addr(bat_priv, skb_new,
1296 						   BATADV_P_DAT_CACHE_REPLY,
1297 						   NULL, vid);
1298 	else
1299 		err = batadv_send_skb_via_tt(bat_priv, skb_new, NULL, vid);
1300 
1301 	if (err != NET_XMIT_DROP) {
1302 		batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
1303 		ret = true;
1304 	}
1305 out:
1306 	if (dat_entry)
1307 		batadv_dat_entry_put(dat_entry);
1308 	if (ret)
1309 		kfree_skb(skb);
1310 	return ret;
1311 }
1312 
1313 /**
1314  * batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT
1315  * @bat_priv: the bat priv with all the soft interface information
1316  * @skb: packet to check
1317  */
1318 void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
1319 					 struct sk_buff *skb)
1320 {
1321 	u16 type;
1322 	__be32 ip_src, ip_dst;
1323 	u8 *hw_src, *hw_dst;
1324 	int hdr_size = 0;
1325 	unsigned short vid;
1326 
1327 	if (!atomic_read(&bat_priv->distributed_arp_table))
1328 		return;
1329 
1330 	vid = batadv_dat_get_vid(skb, &hdr_size);
1331 
1332 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
1333 	if (type != ARPOP_REPLY)
1334 		return;
1335 
1336 	batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing outgoing ARP REPLY");
1337 
1338 	hw_src = batadv_arp_hw_src(skb, hdr_size);
1339 	ip_src = batadv_arp_ip_src(skb, hdr_size);
1340 	hw_dst = batadv_arp_hw_dst(skb, hdr_size);
1341 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
1342 
1343 	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
1344 	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
1345 
1346 	/* Send the ARP reply to the candidates for both the IP addresses that
1347 	 * the node obtained from the ARP reply
1348 	 */
1349 	batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
1350 	batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
1351 }
1352 
1353 /**
1354  * batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the
1355  *  local DAT storage only
1356  * @bat_priv: the bat priv with all the soft interface information
1357  * @skb: packet to check
1358  * @hdr_size: size of the encapsulation header
1359  *
1360  * Return: true if the packet was snooped and consumed by DAT. False if the
1361  * packet has to be delivered to the interface
1362  */
1363 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1364 					 struct sk_buff *skb, int hdr_size)
1365 {
1366 	struct batadv_dat_entry *dat_entry = NULL;
1367 	u16 type;
1368 	__be32 ip_src, ip_dst;
1369 	u8 *hw_src, *hw_dst;
1370 	bool dropped = false;
1371 	unsigned short vid;
1372 
1373 	if (!atomic_read(&bat_priv->distributed_arp_table))
1374 		goto out;
1375 
1376 	vid = batadv_dat_get_vid(skb, &hdr_size);
1377 
1378 	type = batadv_arp_get_type(bat_priv, skb, hdr_size);
1379 	if (type != ARPOP_REPLY)
1380 		goto out;
1381 
1382 	batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing incoming ARP REPLY");
1383 
1384 	hw_src = batadv_arp_hw_src(skb, hdr_size);
1385 	ip_src = batadv_arp_ip_src(skb, hdr_size);
1386 	hw_dst = batadv_arp_hw_dst(skb, hdr_size);
1387 	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
1388 
1389 	/* If ip_dst is already in cache and has the right mac address,
1390 	 * drop this frame if this ARP reply is destined for us because it's
1391 	 * most probably an ARP reply generated by another node of the DHT.
1392 	 * We have most probably received already a reply earlier. Delivering
1393 	 * this frame would lead to doubled receive of an ARP reply.
1394 	 */
1395 	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_src, vid);
1396 	if (dat_entry && batadv_compare_eth(hw_src, dat_entry->mac_addr)) {
1397 		batadv_dbg(BATADV_DBG_DAT, bat_priv, "Doubled ARP reply removed: ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]; dat_entry: %pM-%pI4\n",
1398 			   hw_src, &ip_src, hw_dst, &ip_dst,
1399 			   dat_entry->mac_addr,	&dat_entry->ip);
1400 		dropped = true;
1401 		goto out;
1402 	}
1403 
1404 	/* Update our internal cache with both the IP addresses the node got
1405 	 * within the ARP reply
1406 	 */
1407 	batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
1408 	batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
1409 
1410 	/* If BLA is enabled, only forward ARP replies if we have claimed the
1411 	 * source of the ARP reply or if no one else of the same backbone has
1412 	 * already claimed that client. This prevents that different gateways
1413 	 * to the same backbone all forward the ARP reply leading to multiple
1414 	 * replies in the backbone.
1415 	 */
1416 	if (!batadv_bla_check_claim(bat_priv, hw_src, vid)) {
1417 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
1418 			   "Device %pM claimed by another backbone gw. Drop ARP reply.\n",
1419 			   hw_src);
1420 		dropped = true;
1421 		goto out;
1422 	}
1423 
1424 	/* if this REPLY is directed to a client of mine, let's deliver the
1425 	 * packet to the interface
1426 	 */
1427 	dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
1428 
1429 	/* if this REPLY is sent on behalf of a client of mine, let's drop the
1430 	 * packet because the client will reply by itself
1431 	 */
1432 	dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
1433 out:
1434 	if (dropped)
1435 		kfree_skb(skb);
1436 	if (dat_entry)
1437 		batadv_dat_entry_put(dat_entry);
1438 	/* if dropped == false -> deliver to the interface */
1439 	return dropped;
1440 }
1441 
1442 /**
1443  * batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
1444  *  dropped (because the node has already obtained the reply via DAT) or not
1445  * @bat_priv: the bat priv with all the soft interface information
1446  * @forw_packet: the broadcast packet
1447  *
1448  * Return: true if the node can drop the packet, false otherwise.
1449  */
1450 bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
1451 				      struct batadv_forw_packet *forw_packet)
1452 {
1453 	u16 type;
1454 	__be32 ip_dst;
1455 	struct batadv_dat_entry *dat_entry = NULL;
1456 	bool ret = false;
1457 	int hdr_size = sizeof(struct batadv_bcast_packet);
1458 	unsigned short vid;
1459 
1460 	if (!atomic_read(&bat_priv->distributed_arp_table))
1461 		goto out;
1462 
1463 	/* If this packet is an ARP_REQUEST and the node already has the
1464 	 * information that it is going to ask, then the packet can be dropped
1465 	 */
1466 	if (batadv_forw_packet_is_rebroadcast(forw_packet))
1467 		goto out;
1468 
1469 	vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
1470 
1471 	type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size);
1472 	if (type != ARPOP_REQUEST)
1473 		goto out;
1474 
1475 	ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
1476 	dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
1477 	/* check if the node already got this entry */
1478 	if (!dat_entry) {
1479 		batadv_dbg(BATADV_DBG_DAT, bat_priv,
1480 			   "ARP Request for %pI4: fallback\n", &ip_dst);
1481 		goto out;
1482 	}
1483 
1484 	batadv_dbg(BATADV_DBG_DAT, bat_priv,
1485 		   "ARP Request for %pI4: fallback prevented\n", &ip_dst);
1486 	ret = true;
1487 
1488 out:
1489 	if (dat_entry)
1490 		batadv_dat_entry_put(dat_entry);
1491 	return ret;
1492 }
1493