1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  */
6 
7 #include "soft-interface.h"
8 #include "main.h"
9 
10 #include <linux/atomic.h>
11 #include <linux/byteorder/generic.h>
12 #include <linux/cache.h>
13 #include <linux/compiler.h>
14 #include <linux/container_of.h>
15 #include <linux/cpumask.h>
16 #include <linux/errno.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/gfp.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jiffies.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/lockdep.h>
26 #include <linux/netdevice.h>
27 #include <linux/netlink.h>
28 #include <linux/percpu.h>
29 #include <linux/random.h>
30 #include <linux/rculist.h>
31 #include <linux/rcupdate.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/socket.h>
35 #include <linux/spinlock.h>
36 #include <linux/stddef.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <net/net_namespace.h>
40 #include <net/netlink.h>
41 #include <uapi/linux/batadv_packet.h>
42 #include <uapi/linux/batman_adv.h>
43 
44 #include "bat_algo.h"
45 #include "bridge_loop_avoidance.h"
46 #include "distributed-arp-table.h"
47 #include "gateway_client.h"
48 #include "hard-interface.h"
49 #include "multicast.h"
50 #include "network-coding.h"
51 #include "send.h"
52 #include "translation-table.h"
53 
54 /**
55  * batadv_skb_head_push() - Increase header size and move (push) head pointer
56  * @skb: packet buffer which should be modified
57  * @len: number of bytes to add
58  *
59  * Return: 0 on success or negative error number in case of failure
60  */
61 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
62 {
63 	int result;
64 
65 	/* TODO: We must check if we can release all references to non-payload
66 	 * data using __skb_header_release in our skbs to allow skb_cow_header
67 	 * to work optimally. This means that those skbs are not allowed to read
68 	 * or write any data which is before the current position of skb->data
69 	 * after that call and thus allow other skbs with the same data buffer
70 	 * to write freely in that area.
71 	 */
72 	result = skb_cow_head(skb, len);
73 	if (result < 0)
74 		return result;
75 
76 	skb_push(skb, len);
77 	return 0;
78 }
79 
80 static int batadv_interface_open(struct net_device *dev)
81 {
82 	netif_start_queue(dev);
83 	return 0;
84 }
85 
86 static int batadv_interface_release(struct net_device *dev)
87 {
88 	netif_stop_queue(dev);
89 	return 0;
90 }
91 
92 /**
93  * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
94  * @bat_priv: the bat priv with all the soft interface information
95  * @idx: index of counter to sum up
96  *
97  * Return: sum of all cpu-local counters
98  */
99 static u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
100 {
101 	u64 *counters, sum = 0;
102 	int cpu;
103 
104 	for_each_possible_cpu(cpu) {
105 		counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
106 		sum += counters[idx];
107 	}
108 
109 	return sum;
110 }
111 
112 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
113 {
114 	struct batadv_priv *bat_priv = netdev_priv(dev);
115 	struct net_device_stats *stats = &dev->stats;
116 
117 	stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
118 	stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
119 	stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
120 	stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
121 	stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
122 	return stats;
123 }
124 
125 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
126 {
127 	struct batadv_priv *bat_priv = netdev_priv(dev);
128 	struct batadv_softif_vlan *vlan;
129 	struct sockaddr *addr = p;
130 	u8 old_addr[ETH_ALEN];
131 
132 	if (!is_valid_ether_addr(addr->sa_data))
133 		return -EADDRNOTAVAIL;
134 
135 	ether_addr_copy(old_addr, dev->dev_addr);
136 	eth_hw_addr_set(dev, addr->sa_data);
137 
138 	/* only modify transtable if it has been initialized before */
139 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
140 		return 0;
141 
142 	rcu_read_lock();
143 	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
144 		batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
145 				       "mac address changed", false);
146 		batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
147 				    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
148 	}
149 	rcu_read_unlock();
150 
151 	return 0;
152 }
153 
154 static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
155 {
156 	struct batadv_priv *bat_priv = netdev_priv(dev);
157 
158 	/* check ranges */
159 	if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev))
160 		return -EINVAL;
161 
162 	dev->mtu = new_mtu;
163 	bat_priv->mtu_set_by_user = new_mtu;
164 
165 	return 0;
166 }
167 
168 /**
169  * batadv_interface_set_rx_mode() - set the rx mode of a device
170  * @dev: registered network device to modify
171  *
172  * We do not actually need to set any rx filters for the virtual batman
173  * soft interface. However a dummy handler enables a user to set static
174  * multicast listeners for instance.
175  */
176 static void batadv_interface_set_rx_mode(struct net_device *dev)
177 {
178 }
179 
180 static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
181 				       struct net_device *soft_iface)
182 {
183 	struct ethhdr *ethhdr;
184 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
185 	struct batadv_hard_iface *primary_if = NULL;
186 	struct batadv_bcast_packet *bcast_packet;
187 	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
188 					      0x00, 0x00};
189 	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
190 					       0x00, 0x00};
191 	enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
192 	u8 *dst_hint = NULL, chaddr[ETH_ALEN];
193 	struct vlan_ethhdr *vhdr;
194 	unsigned int header_len = 0;
195 	int data_len = skb->len, ret;
196 	unsigned long brd_delay = 0;
197 	bool do_bcast = false, client_added;
198 	unsigned short vid;
199 	u32 seqno;
200 	int gw_mode;
201 	enum batadv_forw_mode forw_mode = BATADV_FORW_BCAST;
202 	int mcast_is_routable = 0;
203 	int network_offset = ETH_HLEN;
204 	__be16 proto;
205 
206 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
207 		goto dropped;
208 
209 	/* reset control block to avoid left overs from previous users */
210 	memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
211 
212 	netif_trans_update(soft_iface);
213 	vid = batadv_get_vid(skb, 0);
214 
215 	skb_reset_mac_header(skb);
216 	ethhdr = eth_hdr(skb);
217 
218 	proto = ethhdr->h_proto;
219 
220 	switch (ntohs(proto)) {
221 	case ETH_P_8021Q:
222 		if (!pskb_may_pull(skb, sizeof(*vhdr)))
223 			goto dropped;
224 		vhdr = vlan_eth_hdr(skb);
225 		proto = vhdr->h_vlan_encapsulated_proto;
226 
227 		/* drop batman-in-batman packets to prevent loops */
228 		if (proto != htons(ETH_P_BATMAN)) {
229 			network_offset += VLAN_HLEN;
230 			break;
231 		}
232 
233 		fallthrough;
234 	case ETH_P_BATMAN:
235 		goto dropped;
236 	}
237 
238 	skb_set_network_header(skb, network_offset);
239 
240 	if (batadv_bla_tx(bat_priv, skb, vid))
241 		goto dropped;
242 
243 	/* skb->data might have been reallocated by batadv_bla_tx() */
244 	ethhdr = eth_hdr(skb);
245 
246 	/* Register the client MAC in the transtable */
247 	if (!is_multicast_ether_addr(ethhdr->h_source) &&
248 	    !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
249 		client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
250 						   vid, skb->skb_iif,
251 						   skb->mark);
252 		if (!client_added)
253 			goto dropped;
254 	}
255 
256 	/* Snoop address candidates from DHCPACKs for early DAT filling */
257 	batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
258 
259 	/* don't accept stp packets. STP does not help in meshes.
260 	 * better use the bridge loop avoidance ...
261 	 *
262 	 * The same goes for ECTP sent at least by some Cisco Switches,
263 	 * it might confuse the mesh when used with bridge loop avoidance.
264 	 */
265 	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
266 		goto dropped;
267 
268 	if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
269 		goto dropped;
270 
271 	gw_mode = atomic_read(&bat_priv->gw.mode);
272 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
273 		/* if gw mode is off, broadcast every packet */
274 		if (gw_mode == BATADV_GW_MODE_OFF) {
275 			do_bcast = true;
276 			goto send;
277 		}
278 
279 		dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len,
280 							chaddr);
281 		/* skb->data may have been modified by
282 		 * batadv_gw_dhcp_recipient_get()
283 		 */
284 		ethhdr = eth_hdr(skb);
285 		/* if gw_mode is on, broadcast any non-DHCP message.
286 		 * All the DHCP packets are going to be sent as unicast
287 		 */
288 		if (dhcp_rcp == BATADV_DHCP_NO) {
289 			do_bcast = true;
290 			goto send;
291 		}
292 
293 		if (dhcp_rcp == BATADV_DHCP_TO_CLIENT)
294 			dst_hint = chaddr;
295 		else if ((gw_mode == BATADV_GW_MODE_SERVER) &&
296 			 (dhcp_rcp == BATADV_DHCP_TO_SERVER))
297 			/* gateways should not forward any DHCP message if
298 			 * directed to a DHCP server
299 			 */
300 			goto dropped;
301 
302 send:
303 		if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
304 			forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
305 							   &mcast_is_routable);
306 			switch (forw_mode) {
307 			case BATADV_FORW_BCAST:
308 				break;
309 			case BATADV_FORW_UCASTS:
310 				do_bcast = false;
311 				break;
312 			case BATADV_FORW_NONE:
313 				fallthrough;
314 			default:
315 				goto dropped;
316 			}
317 		}
318 	}
319 
320 	batadv_skb_set_priority(skb, 0);
321 
322 	/* ethernet packet should be broadcasted */
323 	if (do_bcast) {
324 		primary_if = batadv_primary_if_get_selected(bat_priv);
325 		if (!primary_if)
326 			goto dropped;
327 
328 		/* in case of ARP request, we do not immediately broadcasti the
329 		 * packet, instead we first wait for DAT to try to retrieve the
330 		 * correct ARP entry
331 		 */
332 		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
333 			brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
334 
335 		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
336 			goto dropped;
337 
338 		bcast_packet = (struct batadv_bcast_packet *)skb->data;
339 		bcast_packet->version = BATADV_COMPAT_VERSION;
340 		bcast_packet->ttl = BATADV_TTL - 1;
341 
342 		/* batman packet type: broadcast */
343 		bcast_packet->packet_type = BATADV_BCAST;
344 		bcast_packet->reserved = 0;
345 
346 		/* hw address of first interface is the orig mac because only
347 		 * this mac is known throughout the mesh
348 		 */
349 		ether_addr_copy(bcast_packet->orig,
350 				primary_if->net_dev->dev_addr);
351 
352 		/* set broadcast sequence number */
353 		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
354 		bcast_packet->seqno = htonl(seqno);
355 
356 		batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
357 	/* unicast packet */
358 	} else {
359 		/* DHCP packets going to a server will use the GW feature */
360 		if (dhcp_rcp == BATADV_DHCP_TO_SERVER) {
361 			ret = batadv_gw_out_of_range(bat_priv, skb);
362 			if (ret)
363 				goto dropped;
364 			ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
365 		} else if (forw_mode == BATADV_FORW_UCASTS) {
366 			ret = batadv_mcast_forw_send(bat_priv, skb, vid,
367 						     mcast_is_routable);
368 		} else {
369 			if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
370 								  skb))
371 				goto dropped;
372 
373 			batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
374 
375 			ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
376 						     vid);
377 		}
378 		if (ret != NET_XMIT_SUCCESS)
379 			goto dropped_freed;
380 	}
381 
382 	batadv_inc_counter(bat_priv, BATADV_CNT_TX);
383 	batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
384 	goto end;
385 
386 dropped:
387 	kfree_skb(skb);
388 dropped_freed:
389 	batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
390 end:
391 	batadv_hardif_put(primary_if);
392 	return NETDEV_TX_OK;
393 }
394 
395 /**
396  * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
397  * @soft_iface: local interface which will receive the ethernet frame
398  * @skb: ethernet frame for @soft_iface
399  * @hdr_size: size of already parsed batman-adv header
400  * @orig_node: originator from which the batman-adv packet was sent
401  *
402  * Sends an ethernet frame to the receive path of the local @soft_iface.
403  * skb->data has still point to the batman-adv header with the size @hdr_size.
404  * The caller has to have parsed this header already and made sure that at least
405  * @hdr_size bytes are still available for pull in @skb.
406  *
407  * The packet may still get dropped. This can happen when the encapsulated
408  * ethernet frame is invalid or contains again an batman-adv packet. Also
409  * unicast packets will be dropped directly when it was sent between two
410  * isolated clients.
411  */
412 void batadv_interface_rx(struct net_device *soft_iface,
413 			 struct sk_buff *skb, int hdr_size,
414 			 struct batadv_orig_node *orig_node)
415 {
416 	struct batadv_bcast_packet *batadv_bcast_packet;
417 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
418 	struct vlan_ethhdr *vhdr;
419 	struct ethhdr *ethhdr;
420 	unsigned short vid;
421 	int packet_type;
422 
423 	batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
424 	packet_type = batadv_bcast_packet->packet_type;
425 
426 	skb_pull_rcsum(skb, hdr_size);
427 	skb_reset_mac_header(skb);
428 
429 	/* clean the netfilter state now that the batman-adv header has been
430 	 * removed
431 	 */
432 	nf_reset_ct(skb);
433 
434 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
435 		goto dropped;
436 
437 	vid = batadv_get_vid(skb, 0);
438 	ethhdr = eth_hdr(skb);
439 
440 	switch (ntohs(ethhdr->h_proto)) {
441 	case ETH_P_8021Q:
442 		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
443 			goto dropped;
444 
445 		vhdr = skb_vlan_eth_hdr(skb);
446 
447 		/* drop batman-in-batman packets to prevent loops */
448 		if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
449 			break;
450 
451 		fallthrough;
452 	case ETH_P_BATMAN:
453 		goto dropped;
454 	}
455 
456 	/* skb->dev & skb->pkt_type are set here */
457 	skb->protocol = eth_type_trans(skb, soft_iface);
458 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
459 
460 	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
461 	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
462 			   skb->len + ETH_HLEN);
463 
464 	/* Let the bridge loop avoidance check the packet. If will
465 	 * not handle it, we can safely push it up.
466 	 */
467 	if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
468 		goto out;
469 
470 	if (orig_node)
471 		batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
472 						     ethhdr->h_source, vid);
473 
474 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
475 		/* set the mark on broadcast packets if AP isolation is ON and
476 		 * the packet is coming from an "isolated" client
477 		 */
478 		if (batadv_vlan_ap_isola_get(bat_priv, vid) &&
479 		    batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source,
480 						 vid)) {
481 			/* save bits in skb->mark not covered by the mask and
482 			 * apply the mark on the rest
483 			 */
484 			skb->mark &= ~bat_priv->isolation_mark_mask;
485 			skb->mark |= bat_priv->isolation_mark;
486 		}
487 	} else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source,
488 					 ethhdr->h_dest, vid)) {
489 		goto dropped;
490 	}
491 
492 	netif_rx(skb);
493 	goto out;
494 
495 dropped:
496 	kfree_skb(skb);
497 out:
498 	return;
499 }
500 
501 /**
502  * batadv_softif_vlan_release() - release vlan from lists and queue for free
503  *  after rcu grace period
504  * @ref: kref pointer of the vlan object
505  */
506 void batadv_softif_vlan_release(struct kref *ref)
507 {
508 	struct batadv_softif_vlan *vlan;
509 
510 	vlan = container_of(ref, struct batadv_softif_vlan, refcount);
511 
512 	spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
513 	hlist_del_rcu(&vlan->list);
514 	spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
515 
516 	kfree_rcu(vlan, rcu);
517 }
518 
519 /**
520  * batadv_softif_vlan_get() - get the vlan object for a specific vid
521  * @bat_priv: the bat priv with all the soft interface information
522  * @vid: the identifier of the vlan object to retrieve
523  *
524  * Return: the private data of the vlan matching the vid passed as argument or
525  * NULL otherwise. The refcounter of the returned object is incremented by 1.
526  */
527 struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
528 						  unsigned short vid)
529 {
530 	struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
531 
532 	rcu_read_lock();
533 	hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
534 		if (vlan_tmp->vid != vid)
535 			continue;
536 
537 		if (!kref_get_unless_zero(&vlan_tmp->refcount))
538 			continue;
539 
540 		vlan = vlan_tmp;
541 		break;
542 	}
543 	rcu_read_unlock();
544 
545 	return vlan;
546 }
547 
548 /**
549  * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
550  * @bat_priv: the bat priv with all the soft interface information
551  * @vid: the VLAN identifier
552  *
553  * Return: 0 on success, a negative error otherwise.
554  */
555 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
556 {
557 	struct batadv_softif_vlan *vlan;
558 
559 	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
560 
561 	vlan = batadv_softif_vlan_get(bat_priv, vid);
562 	if (vlan) {
563 		batadv_softif_vlan_put(vlan);
564 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
565 		return -EEXIST;
566 	}
567 
568 	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
569 	if (!vlan) {
570 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
571 		return -ENOMEM;
572 	}
573 
574 	vlan->bat_priv = bat_priv;
575 	vlan->vid = vid;
576 	kref_init(&vlan->refcount);
577 
578 	atomic_set(&vlan->ap_isolation, 0);
579 
580 	kref_get(&vlan->refcount);
581 	hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
582 	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
583 
584 	/* add a new TT local entry. This one will be marked with the NOPURGE
585 	 * flag
586 	 */
587 	batadv_tt_local_add(bat_priv->soft_iface,
588 			    bat_priv->soft_iface->dev_addr, vid,
589 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
590 
591 	/* don't return reference to new softif_vlan */
592 	batadv_softif_vlan_put(vlan);
593 
594 	return 0;
595 }
596 
597 /**
598  * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
599  * @bat_priv: the bat priv with all the soft interface information
600  * @vlan: the object to remove
601  */
602 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
603 				       struct batadv_softif_vlan *vlan)
604 {
605 	/* explicitly remove the associated TT local entry because it is marked
606 	 * with the NOPURGE flag
607 	 */
608 	batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
609 			       vlan->vid, "vlan interface destroyed", false);
610 
611 	batadv_softif_vlan_put(vlan);
612 }
613 
614 /**
615  * batadv_interface_add_vid() - ndo_add_vid API implementation
616  * @dev: the netdev of the mesh interface
617  * @proto: protocol of the vlan id
618  * @vid: identifier of the new vlan
619  *
620  * Set up all the internal structures for handling the new vlan on top of the
621  * mesh interface
622  *
623  * Return: 0 on success or a negative error code in case of failure.
624  */
625 static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
626 				    unsigned short vid)
627 {
628 	struct batadv_priv *bat_priv = netdev_priv(dev);
629 	struct batadv_softif_vlan *vlan;
630 
631 	/* only 802.1Q vlans are supported.
632 	 * batman-adv does not know how to handle other types
633 	 */
634 	if (proto != htons(ETH_P_8021Q))
635 		return -EINVAL;
636 
637 	vid |= BATADV_VLAN_HAS_TAG;
638 
639 	/* if a new vlan is getting created and it already exists, it means that
640 	 * it was not deleted yet. batadv_softif_vlan_get() increases the
641 	 * refcount in order to revive the object.
642 	 *
643 	 * if it does not exist then create it.
644 	 */
645 	vlan = batadv_softif_vlan_get(bat_priv, vid);
646 	if (!vlan)
647 		return batadv_softif_create_vlan(bat_priv, vid);
648 
649 	/* add a new TT local entry. This one will be marked with the NOPURGE
650 	 * flag. This must be added again, even if the vlan object already
651 	 * exists, because the entry was deleted by kill_vid()
652 	 */
653 	batadv_tt_local_add(bat_priv->soft_iface,
654 			    bat_priv->soft_iface->dev_addr, vid,
655 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
656 
657 	return 0;
658 }
659 
660 /**
661  * batadv_interface_kill_vid() - ndo_kill_vid API implementation
662  * @dev: the netdev of the mesh interface
663  * @proto: protocol of the vlan id
664  * @vid: identifier of the deleted vlan
665  *
666  * Destroy all the internal structures used to handle the vlan identified by vid
667  * on top of the mesh interface
668  *
669  * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
670  * or -ENOENT if the specified vlan id wasn't registered.
671  */
672 static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
673 				     unsigned short vid)
674 {
675 	struct batadv_priv *bat_priv = netdev_priv(dev);
676 	struct batadv_softif_vlan *vlan;
677 
678 	/* only 802.1Q vlans are supported. batman-adv does not know how to
679 	 * handle other types
680 	 */
681 	if (proto != htons(ETH_P_8021Q))
682 		return -EINVAL;
683 
684 	vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
685 	if (!vlan)
686 		return -ENOENT;
687 
688 	batadv_softif_destroy_vlan(bat_priv, vlan);
689 
690 	/* finally free the vlan object */
691 	batadv_softif_vlan_put(vlan);
692 
693 	return 0;
694 }
695 
696 /* batman-adv network devices have devices nesting below it and are a special
697  * "super class" of normal network devices; split their locks off into a
698  * separate class since they always nest.
699  */
700 static struct lock_class_key batadv_netdev_xmit_lock_key;
701 static struct lock_class_key batadv_netdev_addr_lock_key;
702 
703 /**
704  * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
705  * @dev: device which owns the tx queue
706  * @txq: tx queue to modify
707  * @_unused: always NULL
708  */
709 static void batadv_set_lockdep_class_one(struct net_device *dev,
710 					 struct netdev_queue *txq,
711 					 void *_unused)
712 {
713 	lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
714 }
715 
716 /**
717  * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
718  * @dev: network device to modify
719  */
720 static void batadv_set_lockdep_class(struct net_device *dev)
721 {
722 	lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
723 	netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
724 }
725 
726 /**
727  * batadv_softif_init_late() - late stage initialization of soft interface
728  * @dev: registered network device to modify
729  *
730  * Return: error code on failures
731  */
732 static int batadv_softif_init_late(struct net_device *dev)
733 {
734 	struct batadv_priv *bat_priv;
735 	u32 random_seqno;
736 	int ret;
737 	size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
738 
739 	batadv_set_lockdep_class(dev);
740 
741 	bat_priv = netdev_priv(dev);
742 	bat_priv->soft_iface = dev;
743 
744 	/* batadv_interface_stats() needs to be available as soon as
745 	 * register_netdevice() has been called
746 	 */
747 	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
748 	if (!bat_priv->bat_counters)
749 		return -ENOMEM;
750 
751 	atomic_set(&bat_priv->aggregated_ogms, 1);
752 	atomic_set(&bat_priv->bonding, 0);
753 #ifdef CONFIG_BATMAN_ADV_BLA
754 	atomic_set(&bat_priv->bridge_loop_avoidance, 1);
755 #endif
756 #ifdef CONFIG_BATMAN_ADV_DAT
757 	atomic_set(&bat_priv->distributed_arp_table, 1);
758 #endif
759 #ifdef CONFIG_BATMAN_ADV_MCAST
760 	atomic_set(&bat_priv->multicast_mode, 1);
761 	atomic_set(&bat_priv->multicast_fanout, 16);
762 	atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
763 	atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
764 	atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
765 #endif
766 	atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
767 	atomic_set(&bat_priv->gw.bandwidth_down, 100);
768 	atomic_set(&bat_priv->gw.bandwidth_up, 20);
769 	atomic_set(&bat_priv->orig_interval, 1000);
770 	atomic_set(&bat_priv->hop_penalty, 30);
771 #ifdef CONFIG_BATMAN_ADV_DEBUG
772 	atomic_set(&bat_priv->log_level, 0);
773 #endif
774 	atomic_set(&bat_priv->fragmentation, 1);
775 	atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
776 	atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
777 	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
778 
779 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
780 	atomic_set(&bat_priv->bcast_seqno, 1);
781 	atomic_set(&bat_priv->tt.vn, 0);
782 	atomic_set(&bat_priv->tt.local_changes, 0);
783 	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
784 #ifdef CONFIG_BATMAN_ADV_BLA
785 	atomic_set(&bat_priv->bla.num_requests, 0);
786 #endif
787 	atomic_set(&bat_priv->tp_num, 0);
788 
789 	bat_priv->tt.last_changeset = NULL;
790 	bat_priv->tt.last_changeset_len = 0;
791 	bat_priv->isolation_mark = 0;
792 	bat_priv->isolation_mark_mask = 0;
793 
794 	/* randomize initial seqno to avoid collision */
795 	get_random_bytes(&random_seqno, sizeof(random_seqno));
796 	atomic_set(&bat_priv->frag_seqno, random_seqno);
797 
798 	bat_priv->primary_if = NULL;
799 
800 	batadv_nc_init_bat_priv(bat_priv);
801 
802 	if (!bat_priv->algo_ops) {
803 		ret = batadv_algo_select(bat_priv, batadv_routing_algo);
804 		if (ret < 0)
805 			goto free_bat_counters;
806 	}
807 
808 	ret = batadv_mesh_init(dev);
809 	if (ret < 0)
810 		goto free_bat_counters;
811 
812 	return 0;
813 
814 free_bat_counters:
815 	free_percpu(bat_priv->bat_counters);
816 	bat_priv->bat_counters = NULL;
817 
818 	return ret;
819 }
820 
821 /**
822  * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
823  * @dev: batadv_soft_interface used as master interface
824  * @slave_dev: net_device which should become the slave interface
825  * @extack: extended ACK report struct
826  *
827  * Return: 0 if successful or error otherwise.
828  */
829 static int batadv_softif_slave_add(struct net_device *dev,
830 				   struct net_device *slave_dev,
831 				   struct netlink_ext_ack *extack)
832 {
833 	struct batadv_hard_iface *hard_iface;
834 	int ret = -EINVAL;
835 
836 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
837 	if (!hard_iface || hard_iface->soft_iface)
838 		goto out;
839 
840 	ret = batadv_hardif_enable_interface(hard_iface, dev);
841 
842 out:
843 	batadv_hardif_put(hard_iface);
844 	return ret;
845 }
846 
847 /**
848  * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
849  * @dev: batadv_soft_interface used as master interface
850  * @slave_dev: net_device which should be removed from the master interface
851  *
852  * Return: 0 if successful or error otherwise.
853  */
854 static int batadv_softif_slave_del(struct net_device *dev,
855 				   struct net_device *slave_dev)
856 {
857 	struct batadv_hard_iface *hard_iface;
858 	int ret = -EINVAL;
859 
860 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
861 
862 	if (!hard_iface || hard_iface->soft_iface != dev)
863 		goto out;
864 
865 	batadv_hardif_disable_interface(hard_iface);
866 	ret = 0;
867 
868 out:
869 	batadv_hardif_put(hard_iface);
870 	return ret;
871 }
872 
873 static const struct net_device_ops batadv_netdev_ops = {
874 	.ndo_init = batadv_softif_init_late,
875 	.ndo_open = batadv_interface_open,
876 	.ndo_stop = batadv_interface_release,
877 	.ndo_get_stats = batadv_interface_stats,
878 	.ndo_vlan_rx_add_vid = batadv_interface_add_vid,
879 	.ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
880 	.ndo_set_mac_address = batadv_interface_set_mac_addr,
881 	.ndo_change_mtu = batadv_interface_change_mtu,
882 	.ndo_set_rx_mode = batadv_interface_set_rx_mode,
883 	.ndo_start_xmit = batadv_interface_tx,
884 	.ndo_validate_addr = eth_validate_addr,
885 	.ndo_add_slave = batadv_softif_slave_add,
886 	.ndo_del_slave = batadv_softif_slave_del,
887 };
888 
889 static void batadv_get_drvinfo(struct net_device *dev,
890 			       struct ethtool_drvinfo *info)
891 {
892 	strscpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
893 	strscpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
894 	strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
895 	strscpy(info->bus_info, "batman", sizeof(info->bus_info));
896 }
897 
898 /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
899  * Declare each description string in struct.name[] to get fixed sized buffer
900  * and compile time checking for strings longer than ETH_GSTRING_LEN.
901  */
902 static const struct {
903 	const char name[ETH_GSTRING_LEN];
904 } batadv_counters_strings[] = {
905 	{ "tx" },
906 	{ "tx_bytes" },
907 	{ "tx_dropped" },
908 	{ "rx" },
909 	{ "rx_bytes" },
910 	{ "forward" },
911 	{ "forward_bytes" },
912 	{ "mgmt_tx" },
913 	{ "mgmt_tx_bytes" },
914 	{ "mgmt_rx" },
915 	{ "mgmt_rx_bytes" },
916 	{ "frag_tx" },
917 	{ "frag_tx_bytes" },
918 	{ "frag_rx" },
919 	{ "frag_rx_bytes" },
920 	{ "frag_fwd" },
921 	{ "frag_fwd_bytes" },
922 	{ "tt_request_tx" },
923 	{ "tt_request_rx" },
924 	{ "tt_response_tx" },
925 	{ "tt_response_rx" },
926 	{ "tt_roam_adv_tx" },
927 	{ "tt_roam_adv_rx" },
928 #ifdef CONFIG_BATMAN_ADV_DAT
929 	{ "dat_get_tx" },
930 	{ "dat_get_rx" },
931 	{ "dat_put_tx" },
932 	{ "dat_put_rx" },
933 	{ "dat_cached_reply_tx" },
934 #endif
935 #ifdef CONFIG_BATMAN_ADV_NC
936 	{ "nc_code" },
937 	{ "nc_code_bytes" },
938 	{ "nc_recode" },
939 	{ "nc_recode_bytes" },
940 	{ "nc_buffer" },
941 	{ "nc_decode" },
942 	{ "nc_decode_bytes" },
943 	{ "nc_decode_failed" },
944 	{ "nc_sniffed" },
945 #endif
946 };
947 
948 static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
949 {
950 	if (stringset == ETH_SS_STATS)
951 		memcpy(data, batadv_counters_strings,
952 		       sizeof(batadv_counters_strings));
953 }
954 
955 static void batadv_get_ethtool_stats(struct net_device *dev,
956 				     struct ethtool_stats *stats, u64 *data)
957 {
958 	struct batadv_priv *bat_priv = netdev_priv(dev);
959 	int i;
960 
961 	for (i = 0; i < BATADV_CNT_NUM; i++)
962 		data[i] = batadv_sum_counter(bat_priv, i);
963 }
964 
965 static int batadv_get_sset_count(struct net_device *dev, int stringset)
966 {
967 	if (stringset == ETH_SS_STATS)
968 		return BATADV_CNT_NUM;
969 
970 	return -EOPNOTSUPP;
971 }
972 
973 static const struct ethtool_ops batadv_ethtool_ops = {
974 	.get_drvinfo = batadv_get_drvinfo,
975 	.get_link = ethtool_op_get_link,
976 	.get_strings = batadv_get_strings,
977 	.get_ethtool_stats = batadv_get_ethtool_stats,
978 	.get_sset_count = batadv_get_sset_count,
979 };
980 
981 /**
982  * batadv_softif_free() - Deconstructor of batadv_soft_interface
983  * @dev: Device to cleanup and remove
984  */
985 static void batadv_softif_free(struct net_device *dev)
986 {
987 	batadv_mesh_free(dev);
988 
989 	/* some scheduled RCU callbacks need the bat_priv struct to accomplish
990 	 * their tasks. Wait for them all to be finished before freeing the
991 	 * netdev and its private data (bat_priv)
992 	 */
993 	rcu_barrier();
994 }
995 
996 /**
997  * batadv_softif_init_early() - early stage initialization of soft interface
998  * @dev: registered network device to modify
999  */
1000 static void batadv_softif_init_early(struct net_device *dev)
1001 {
1002 	ether_setup(dev);
1003 
1004 	dev->netdev_ops = &batadv_netdev_ops;
1005 	dev->needs_free_netdev = true;
1006 	dev->priv_destructor = batadv_softif_free;
1007 	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
1008 	dev->features |= NETIF_F_LLTX;
1009 	dev->priv_flags |= IFF_NO_QUEUE;
1010 
1011 	/* can't call min_mtu, because the needed variables
1012 	 * have not been initialized yet
1013 	 */
1014 	dev->mtu = ETH_DATA_LEN;
1015 
1016 	/* generate random address */
1017 	eth_hw_addr_random(dev);
1018 
1019 	dev->ethtool_ops = &batadv_ethtool_ops;
1020 }
1021 
1022 /**
1023  * batadv_softif_validate() - validate configuration of new batadv link
1024  * @tb: IFLA_INFO_DATA netlink attributes
1025  * @data: enum batadv_ifla_attrs attributes
1026  * @extack: extended ACK report struct
1027  *
1028  * Return: 0 if successful or error otherwise.
1029  */
1030 static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[],
1031 				  struct netlink_ext_ack *extack)
1032 {
1033 	struct batadv_algo_ops *algo_ops;
1034 
1035 	if (!data)
1036 		return 0;
1037 
1038 	if (data[IFLA_BATADV_ALGO_NAME]) {
1039 		algo_ops = batadv_algo_get(nla_data(data[IFLA_BATADV_ALGO_NAME]));
1040 		if (!algo_ops)
1041 			return -EINVAL;
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 /**
1048  * batadv_softif_newlink() - pre-initialize and register new batadv link
1049  * @src_net: the applicable net namespace
1050  * @dev: network device to register
1051  * @tb: IFLA_INFO_DATA netlink attributes
1052  * @data: enum batadv_ifla_attrs attributes
1053  * @extack: extended ACK report struct
1054  *
1055  * Return: 0 if successful or error otherwise.
1056  */
1057 static int batadv_softif_newlink(struct net *src_net, struct net_device *dev,
1058 				 struct nlattr *tb[], struct nlattr *data[],
1059 				 struct netlink_ext_ack *extack)
1060 {
1061 	struct batadv_priv *bat_priv = netdev_priv(dev);
1062 	const char *algo_name;
1063 	int err;
1064 
1065 	if (data && data[IFLA_BATADV_ALGO_NAME]) {
1066 		algo_name = nla_data(data[IFLA_BATADV_ALGO_NAME]);
1067 		err = batadv_algo_select(bat_priv, algo_name);
1068 		if (err)
1069 			return -EINVAL;
1070 	}
1071 
1072 	return register_netdevice(dev);
1073 }
1074 
1075 /**
1076  * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
1077  *  netlink
1078  * @soft_iface: the to-be-removed batman-adv interface
1079  * @head: list pointer
1080  */
1081 static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
1082 					  struct list_head *head)
1083 {
1084 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
1085 	struct batadv_hard_iface *hard_iface;
1086 	struct batadv_softif_vlan *vlan;
1087 
1088 	list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
1089 		if (hard_iface->soft_iface == soft_iface)
1090 			batadv_hardif_disable_interface(hard_iface);
1091 	}
1092 
1093 	/* destroy the "untagged" VLAN */
1094 	vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
1095 	if (vlan) {
1096 		batadv_softif_destroy_vlan(bat_priv, vlan);
1097 		batadv_softif_vlan_put(vlan);
1098 	}
1099 
1100 	unregister_netdevice_queue(soft_iface, head);
1101 }
1102 
1103 /**
1104  * batadv_softif_is_valid() - Check whether device is a batadv soft interface
1105  * @net_dev: device which should be checked
1106  *
1107  * Return: true when net_dev is a batman-adv interface, false otherwise
1108  */
1109 bool batadv_softif_is_valid(const struct net_device *net_dev)
1110 {
1111 	if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
1112 		return true;
1113 
1114 	return false;
1115 }
1116 
1117 static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = {
1118 	[IFLA_BATADV_ALGO_NAME]	= { .type = NLA_NUL_STRING },
1119 };
1120 
1121 struct rtnl_link_ops batadv_link_ops __read_mostly = {
1122 	.kind		= "batadv",
1123 	.priv_size	= sizeof(struct batadv_priv),
1124 	.setup		= batadv_softif_init_early,
1125 	.maxtype	= IFLA_BATADV_MAX,
1126 	.policy		= batadv_ifla_policy,
1127 	.validate	= batadv_softif_validate,
1128 	.newlink	= batadv_softif_newlink,
1129 	.dellink	= batadv_softif_destroy_netlink,
1130 };
1131