1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  */
6 
7 #include "soft-interface.h"
8 #include "main.h"
9 
10 #include <linux/atomic.h>
11 #include <linux/byteorder/generic.h>
12 #include <linux/cache.h>
13 #include <linux/compiler.h>
14 #include <linux/container_of.h>
15 #include <linux/cpumask.h>
16 #include <linux/errno.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/gfp.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jiffies.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/lockdep.h>
26 #include <linux/netdevice.h>
27 #include <linux/netlink.h>
28 #include <linux/percpu.h>
29 #include <linux/random.h>
30 #include <linux/rculist.h>
31 #include <linux/rcupdate.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/socket.h>
35 #include <linux/spinlock.h>
36 #include <linux/stddef.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <net/net_namespace.h>
40 #include <net/netlink.h>
41 #include <uapi/linux/batadv_packet.h>
42 #include <uapi/linux/batman_adv.h>
43 
44 #include "bat_algo.h"
45 #include "bridge_loop_avoidance.h"
46 #include "distributed-arp-table.h"
47 #include "gateway_client.h"
48 #include "hard-interface.h"
49 #include "multicast.h"
50 #include "network-coding.h"
51 #include "originator.h"
52 #include "send.h"
53 #include "translation-table.h"
54 
55 /**
56  * batadv_skb_head_push() - Increase header size and move (push) head pointer
57  * @skb: packet buffer which should be modified
58  * @len: number of bytes to add
59  *
60  * Return: 0 on success or negative error number in case of failure
61  */
62 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
63 {
64 	int result;
65 
66 	/* TODO: We must check if we can release all references to non-payload
67 	 * data using __skb_header_release in our skbs to allow skb_cow_header
68 	 * to work optimally. This means that those skbs are not allowed to read
69 	 * or write any data which is before the current position of skb->data
70 	 * after that call and thus allow other skbs with the same data buffer
71 	 * to write freely in that area.
72 	 */
73 	result = skb_cow_head(skb, len);
74 	if (result < 0)
75 		return result;
76 
77 	skb_push(skb, len);
78 	return 0;
79 }
80 
81 static int batadv_interface_open(struct net_device *dev)
82 {
83 	netif_start_queue(dev);
84 	return 0;
85 }
86 
87 static int batadv_interface_release(struct net_device *dev)
88 {
89 	netif_stop_queue(dev);
90 	return 0;
91 }
92 
93 /**
94  * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
95  * @bat_priv: the bat priv with all the soft interface information
96  * @idx: index of counter to sum up
97  *
98  * Return: sum of all cpu-local counters
99  */
100 static u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
101 {
102 	u64 *counters, sum = 0;
103 	int cpu;
104 
105 	for_each_possible_cpu(cpu) {
106 		counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
107 		sum += counters[idx];
108 	}
109 
110 	return sum;
111 }
112 
113 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
114 {
115 	struct batadv_priv *bat_priv = netdev_priv(dev);
116 	struct net_device_stats *stats = &dev->stats;
117 
118 	stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
119 	stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
120 	stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
121 	stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
122 	stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
123 	return stats;
124 }
125 
126 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
127 {
128 	struct batadv_priv *bat_priv = netdev_priv(dev);
129 	struct batadv_softif_vlan *vlan;
130 	struct sockaddr *addr = p;
131 	u8 old_addr[ETH_ALEN];
132 
133 	if (!is_valid_ether_addr(addr->sa_data))
134 		return -EADDRNOTAVAIL;
135 
136 	ether_addr_copy(old_addr, dev->dev_addr);
137 	eth_hw_addr_set(dev, addr->sa_data);
138 
139 	/* only modify transtable if it has been initialized before */
140 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
141 		return 0;
142 
143 	rcu_read_lock();
144 	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
145 		batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
146 				       "mac address changed", false);
147 		batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
148 				    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
149 	}
150 	rcu_read_unlock();
151 
152 	return 0;
153 }
154 
155 static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
156 {
157 	/* check ranges */
158 	if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev))
159 		return -EINVAL;
160 
161 	dev->mtu = new_mtu;
162 
163 	return 0;
164 }
165 
166 /**
167  * batadv_interface_set_rx_mode() - set the rx mode of a device
168  * @dev: registered network device to modify
169  *
170  * We do not actually need to set any rx filters for the virtual batman
171  * soft interface. However a dummy handler enables a user to set static
172  * multicast listeners for instance.
173  */
174 static void batadv_interface_set_rx_mode(struct net_device *dev)
175 {
176 }
177 
178 static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
179 				       struct net_device *soft_iface)
180 {
181 	struct ethhdr *ethhdr;
182 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
183 	struct batadv_hard_iface *primary_if = NULL;
184 	struct batadv_bcast_packet *bcast_packet;
185 	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
186 					      0x00, 0x00};
187 	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
188 					       0x00, 0x00};
189 	enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
190 	u8 *dst_hint = NULL, chaddr[ETH_ALEN];
191 	struct vlan_ethhdr *vhdr;
192 	unsigned int header_len = 0;
193 	int data_len = skb->len, ret;
194 	unsigned long brd_delay = 0;
195 	bool do_bcast = false, client_added;
196 	unsigned short vid;
197 	u32 seqno;
198 	int gw_mode;
199 	enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
200 	struct batadv_orig_node *mcast_single_orig = NULL;
201 	int mcast_is_routable = 0;
202 	int network_offset = ETH_HLEN;
203 	__be16 proto;
204 
205 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
206 		goto dropped;
207 
208 	/* reset control block to avoid left overs from previous users */
209 	memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
210 
211 	netif_trans_update(soft_iface);
212 	vid = batadv_get_vid(skb, 0);
213 
214 	skb_reset_mac_header(skb);
215 	ethhdr = eth_hdr(skb);
216 
217 	proto = ethhdr->h_proto;
218 
219 	switch (ntohs(proto)) {
220 	case ETH_P_8021Q:
221 		if (!pskb_may_pull(skb, sizeof(*vhdr)))
222 			goto dropped;
223 		vhdr = vlan_eth_hdr(skb);
224 		proto = vhdr->h_vlan_encapsulated_proto;
225 
226 		/* drop batman-in-batman packets to prevent loops */
227 		if (proto != htons(ETH_P_BATMAN)) {
228 			network_offset += VLAN_HLEN;
229 			break;
230 		}
231 
232 		fallthrough;
233 	case ETH_P_BATMAN:
234 		goto dropped;
235 	}
236 
237 	skb_set_network_header(skb, network_offset);
238 
239 	if (batadv_bla_tx(bat_priv, skb, vid))
240 		goto dropped;
241 
242 	/* skb->data might have been reallocated by batadv_bla_tx() */
243 	ethhdr = eth_hdr(skb);
244 
245 	/* Register the client MAC in the transtable */
246 	if (!is_multicast_ether_addr(ethhdr->h_source) &&
247 	    !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
248 		client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
249 						   vid, skb->skb_iif,
250 						   skb->mark);
251 		if (!client_added)
252 			goto dropped;
253 	}
254 
255 	/* Snoop address candidates from DHCPACKs for early DAT filling */
256 	batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
257 
258 	/* don't accept stp packets. STP does not help in meshes.
259 	 * better use the bridge loop avoidance ...
260 	 *
261 	 * The same goes for ECTP sent at least by some Cisco Switches,
262 	 * it might confuse the mesh when used with bridge loop avoidance.
263 	 */
264 	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
265 		goto dropped;
266 
267 	if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
268 		goto dropped;
269 
270 	gw_mode = atomic_read(&bat_priv->gw.mode);
271 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
272 		/* if gw mode is off, broadcast every packet */
273 		if (gw_mode == BATADV_GW_MODE_OFF) {
274 			do_bcast = true;
275 			goto send;
276 		}
277 
278 		dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len,
279 							chaddr);
280 		/* skb->data may have been modified by
281 		 * batadv_gw_dhcp_recipient_get()
282 		 */
283 		ethhdr = eth_hdr(skb);
284 		/* if gw_mode is on, broadcast any non-DHCP message.
285 		 * All the DHCP packets are going to be sent as unicast
286 		 */
287 		if (dhcp_rcp == BATADV_DHCP_NO) {
288 			do_bcast = true;
289 			goto send;
290 		}
291 
292 		if (dhcp_rcp == BATADV_DHCP_TO_CLIENT)
293 			dst_hint = chaddr;
294 		else if ((gw_mode == BATADV_GW_MODE_SERVER) &&
295 			 (dhcp_rcp == BATADV_DHCP_TO_SERVER))
296 			/* gateways should not forward any DHCP message if
297 			 * directed to a DHCP server
298 			 */
299 			goto dropped;
300 
301 send:
302 		if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
303 			forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
304 							   &mcast_single_orig,
305 							   &mcast_is_routable);
306 			if (forw_mode == BATADV_FORW_NONE)
307 				goto dropped;
308 
309 			if (forw_mode == BATADV_FORW_SINGLE ||
310 			    forw_mode == BATADV_FORW_SOME)
311 				do_bcast = false;
312 		}
313 	}
314 
315 	batadv_skb_set_priority(skb, 0);
316 
317 	/* ethernet packet should be broadcasted */
318 	if (do_bcast) {
319 		primary_if = batadv_primary_if_get_selected(bat_priv);
320 		if (!primary_if)
321 			goto dropped;
322 
323 		/* in case of ARP request, we do not immediately broadcasti the
324 		 * packet, instead we first wait for DAT to try to retrieve the
325 		 * correct ARP entry
326 		 */
327 		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
328 			brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
329 
330 		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
331 			goto dropped;
332 
333 		bcast_packet = (struct batadv_bcast_packet *)skb->data;
334 		bcast_packet->version = BATADV_COMPAT_VERSION;
335 		bcast_packet->ttl = BATADV_TTL - 1;
336 
337 		/* batman packet type: broadcast */
338 		bcast_packet->packet_type = BATADV_BCAST;
339 		bcast_packet->reserved = 0;
340 
341 		/* hw address of first interface is the orig mac because only
342 		 * this mac is known throughout the mesh
343 		 */
344 		ether_addr_copy(bcast_packet->orig,
345 				primary_if->net_dev->dev_addr);
346 
347 		/* set broadcast sequence number */
348 		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
349 		bcast_packet->seqno = htonl(seqno);
350 
351 		batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
352 	/* unicast packet */
353 	} else {
354 		/* DHCP packets going to a server will use the GW feature */
355 		if (dhcp_rcp == BATADV_DHCP_TO_SERVER) {
356 			ret = batadv_gw_out_of_range(bat_priv, skb);
357 			if (ret)
358 				goto dropped;
359 			ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
360 		} else if (mcast_single_orig) {
361 			ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
362 							  mcast_single_orig);
363 		} else if (forw_mode == BATADV_FORW_SOME) {
364 			ret = batadv_mcast_forw_send(bat_priv, skb, vid,
365 						     mcast_is_routable);
366 		} else {
367 			if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
368 								  skb))
369 				goto dropped;
370 
371 			batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
372 
373 			ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
374 						     vid);
375 		}
376 		if (ret != NET_XMIT_SUCCESS)
377 			goto dropped_freed;
378 	}
379 
380 	batadv_inc_counter(bat_priv, BATADV_CNT_TX);
381 	batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
382 	goto end;
383 
384 dropped:
385 	kfree_skb(skb);
386 dropped_freed:
387 	batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
388 end:
389 	batadv_orig_node_put(mcast_single_orig);
390 	batadv_hardif_put(primary_if);
391 	return NETDEV_TX_OK;
392 }
393 
394 /**
395  * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
396  * @soft_iface: local interface which will receive the ethernet frame
397  * @skb: ethernet frame for @soft_iface
398  * @hdr_size: size of already parsed batman-adv header
399  * @orig_node: originator from which the batman-adv packet was sent
400  *
401  * Sends an ethernet frame to the receive path of the local @soft_iface.
402  * skb->data has still point to the batman-adv header with the size @hdr_size.
403  * The caller has to have parsed this header already and made sure that at least
404  * @hdr_size bytes are still available for pull in @skb.
405  *
406  * The packet may still get dropped. This can happen when the encapsulated
407  * ethernet frame is invalid or contains again an batman-adv packet. Also
408  * unicast packets will be dropped directly when it was sent between two
409  * isolated clients.
410  */
411 void batadv_interface_rx(struct net_device *soft_iface,
412 			 struct sk_buff *skb, int hdr_size,
413 			 struct batadv_orig_node *orig_node)
414 {
415 	struct batadv_bcast_packet *batadv_bcast_packet;
416 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
417 	struct vlan_ethhdr *vhdr;
418 	struct ethhdr *ethhdr;
419 	unsigned short vid;
420 	int packet_type;
421 
422 	batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
423 	packet_type = batadv_bcast_packet->packet_type;
424 
425 	skb_pull_rcsum(skb, hdr_size);
426 	skb_reset_mac_header(skb);
427 
428 	/* clean the netfilter state now that the batman-adv header has been
429 	 * removed
430 	 */
431 	nf_reset_ct(skb);
432 
433 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
434 		goto dropped;
435 
436 	vid = batadv_get_vid(skb, 0);
437 	ethhdr = eth_hdr(skb);
438 
439 	switch (ntohs(ethhdr->h_proto)) {
440 	case ETH_P_8021Q:
441 		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
442 			goto dropped;
443 
444 		vhdr = (struct vlan_ethhdr *)skb->data;
445 
446 		/* drop batman-in-batman packets to prevent loops */
447 		if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
448 			break;
449 
450 		fallthrough;
451 	case ETH_P_BATMAN:
452 		goto dropped;
453 	}
454 
455 	/* skb->dev & skb->pkt_type are set here */
456 	skb->protocol = eth_type_trans(skb, soft_iface);
457 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
458 
459 	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
460 	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
461 			   skb->len + ETH_HLEN);
462 
463 	/* Let the bridge loop avoidance check the packet. If will
464 	 * not handle it, we can safely push it up.
465 	 */
466 	if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
467 		goto out;
468 
469 	if (orig_node)
470 		batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
471 						     ethhdr->h_source, vid);
472 
473 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
474 		/* set the mark on broadcast packets if AP isolation is ON and
475 		 * the packet is coming from an "isolated" client
476 		 */
477 		if (batadv_vlan_ap_isola_get(bat_priv, vid) &&
478 		    batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source,
479 						 vid)) {
480 			/* save bits in skb->mark not covered by the mask and
481 			 * apply the mark on the rest
482 			 */
483 			skb->mark &= ~bat_priv->isolation_mark_mask;
484 			skb->mark |= bat_priv->isolation_mark;
485 		}
486 	} else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source,
487 					 ethhdr->h_dest, vid)) {
488 		goto dropped;
489 	}
490 
491 	netif_rx(skb);
492 	goto out;
493 
494 dropped:
495 	kfree_skb(skb);
496 out:
497 	return;
498 }
499 
500 /**
501  * batadv_softif_vlan_release() - release vlan from lists and queue for free
502  *  after rcu grace period
503  * @ref: kref pointer of the vlan object
504  */
505 void batadv_softif_vlan_release(struct kref *ref)
506 {
507 	struct batadv_softif_vlan *vlan;
508 
509 	vlan = container_of(ref, struct batadv_softif_vlan, refcount);
510 
511 	spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
512 	hlist_del_rcu(&vlan->list);
513 	spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
514 
515 	kfree_rcu(vlan, rcu);
516 }
517 
518 /**
519  * batadv_softif_vlan_get() - get the vlan object for a specific vid
520  * @bat_priv: the bat priv with all the soft interface information
521  * @vid: the identifier of the vlan object to retrieve
522  *
523  * Return: the private data of the vlan matching the vid passed as argument or
524  * NULL otherwise. The refcounter of the returned object is incremented by 1.
525  */
526 struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
527 						  unsigned short vid)
528 {
529 	struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
530 
531 	rcu_read_lock();
532 	hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
533 		if (vlan_tmp->vid != vid)
534 			continue;
535 
536 		if (!kref_get_unless_zero(&vlan_tmp->refcount))
537 			continue;
538 
539 		vlan = vlan_tmp;
540 		break;
541 	}
542 	rcu_read_unlock();
543 
544 	return vlan;
545 }
546 
547 /**
548  * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
549  * @bat_priv: the bat priv with all the soft interface information
550  * @vid: the VLAN identifier
551  *
552  * Return: 0 on success, a negative error otherwise.
553  */
554 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
555 {
556 	struct batadv_softif_vlan *vlan;
557 
558 	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
559 
560 	vlan = batadv_softif_vlan_get(bat_priv, vid);
561 	if (vlan) {
562 		batadv_softif_vlan_put(vlan);
563 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
564 		return -EEXIST;
565 	}
566 
567 	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
568 	if (!vlan) {
569 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
570 		return -ENOMEM;
571 	}
572 
573 	vlan->bat_priv = bat_priv;
574 	vlan->vid = vid;
575 	kref_init(&vlan->refcount);
576 
577 	atomic_set(&vlan->ap_isolation, 0);
578 
579 	kref_get(&vlan->refcount);
580 	hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
581 	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
582 
583 	/* add a new TT local entry. This one will be marked with the NOPURGE
584 	 * flag
585 	 */
586 	batadv_tt_local_add(bat_priv->soft_iface,
587 			    bat_priv->soft_iface->dev_addr, vid,
588 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
589 
590 	/* don't return reference to new softif_vlan */
591 	batadv_softif_vlan_put(vlan);
592 
593 	return 0;
594 }
595 
596 /**
597  * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
598  * @bat_priv: the bat priv with all the soft interface information
599  * @vlan: the object to remove
600  */
601 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
602 				       struct batadv_softif_vlan *vlan)
603 {
604 	/* explicitly remove the associated TT local entry because it is marked
605 	 * with the NOPURGE flag
606 	 */
607 	batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
608 			       vlan->vid, "vlan interface destroyed", false);
609 
610 	batadv_softif_vlan_put(vlan);
611 }
612 
613 /**
614  * batadv_interface_add_vid() - ndo_add_vid API implementation
615  * @dev: the netdev of the mesh interface
616  * @proto: protocol of the vlan id
617  * @vid: identifier of the new vlan
618  *
619  * Set up all the internal structures for handling the new vlan on top of the
620  * mesh interface
621  *
622  * Return: 0 on success or a negative error code in case of failure.
623  */
624 static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
625 				    unsigned short vid)
626 {
627 	struct batadv_priv *bat_priv = netdev_priv(dev);
628 	struct batadv_softif_vlan *vlan;
629 
630 	/* only 802.1Q vlans are supported.
631 	 * batman-adv does not know how to handle other types
632 	 */
633 	if (proto != htons(ETH_P_8021Q))
634 		return -EINVAL;
635 
636 	vid |= BATADV_VLAN_HAS_TAG;
637 
638 	/* if a new vlan is getting created and it already exists, it means that
639 	 * it was not deleted yet. batadv_softif_vlan_get() increases the
640 	 * refcount in order to revive the object.
641 	 *
642 	 * if it does not exist then create it.
643 	 */
644 	vlan = batadv_softif_vlan_get(bat_priv, vid);
645 	if (!vlan)
646 		return batadv_softif_create_vlan(bat_priv, vid);
647 
648 	/* add a new TT local entry. This one will be marked with the NOPURGE
649 	 * flag. This must be added again, even if the vlan object already
650 	 * exists, because the entry was deleted by kill_vid()
651 	 */
652 	batadv_tt_local_add(bat_priv->soft_iface,
653 			    bat_priv->soft_iface->dev_addr, vid,
654 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
655 
656 	return 0;
657 }
658 
659 /**
660  * batadv_interface_kill_vid() - ndo_kill_vid API implementation
661  * @dev: the netdev of the mesh interface
662  * @proto: protocol of the vlan id
663  * @vid: identifier of the deleted vlan
664  *
665  * Destroy all the internal structures used to handle the vlan identified by vid
666  * on top of the mesh interface
667  *
668  * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
669  * or -ENOENT if the specified vlan id wasn't registered.
670  */
671 static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
672 				     unsigned short vid)
673 {
674 	struct batadv_priv *bat_priv = netdev_priv(dev);
675 	struct batadv_softif_vlan *vlan;
676 
677 	/* only 802.1Q vlans are supported. batman-adv does not know how to
678 	 * handle other types
679 	 */
680 	if (proto != htons(ETH_P_8021Q))
681 		return -EINVAL;
682 
683 	vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
684 	if (!vlan)
685 		return -ENOENT;
686 
687 	batadv_softif_destroy_vlan(bat_priv, vlan);
688 
689 	/* finally free the vlan object */
690 	batadv_softif_vlan_put(vlan);
691 
692 	return 0;
693 }
694 
695 /* batman-adv network devices have devices nesting below it and are a special
696  * "super class" of normal network devices; split their locks off into a
697  * separate class since they always nest.
698  */
699 static struct lock_class_key batadv_netdev_xmit_lock_key;
700 static struct lock_class_key batadv_netdev_addr_lock_key;
701 
702 /**
703  * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
704  * @dev: device which owns the tx queue
705  * @txq: tx queue to modify
706  * @_unused: always NULL
707  */
708 static void batadv_set_lockdep_class_one(struct net_device *dev,
709 					 struct netdev_queue *txq,
710 					 void *_unused)
711 {
712 	lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
713 }
714 
715 /**
716  * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
717  * @dev: network device to modify
718  */
719 static void batadv_set_lockdep_class(struct net_device *dev)
720 {
721 	lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
722 	netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
723 }
724 
725 /**
726  * batadv_softif_init_late() - late stage initialization of soft interface
727  * @dev: registered network device to modify
728  *
729  * Return: error code on failures
730  */
731 static int batadv_softif_init_late(struct net_device *dev)
732 {
733 	struct batadv_priv *bat_priv;
734 	u32 random_seqno;
735 	int ret;
736 	size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
737 
738 	batadv_set_lockdep_class(dev);
739 
740 	bat_priv = netdev_priv(dev);
741 	bat_priv->soft_iface = dev;
742 
743 	/* batadv_interface_stats() needs to be available as soon as
744 	 * register_netdevice() has been called
745 	 */
746 	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
747 	if (!bat_priv->bat_counters)
748 		return -ENOMEM;
749 
750 	atomic_set(&bat_priv->aggregated_ogms, 1);
751 	atomic_set(&bat_priv->bonding, 0);
752 #ifdef CONFIG_BATMAN_ADV_BLA
753 	atomic_set(&bat_priv->bridge_loop_avoidance, 1);
754 #endif
755 #ifdef CONFIG_BATMAN_ADV_DAT
756 	atomic_set(&bat_priv->distributed_arp_table, 1);
757 #endif
758 #ifdef CONFIG_BATMAN_ADV_MCAST
759 	atomic_set(&bat_priv->multicast_mode, 1);
760 	atomic_set(&bat_priv->multicast_fanout, 16);
761 	atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
762 	atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
763 	atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
764 #endif
765 	atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
766 	atomic_set(&bat_priv->gw.bandwidth_down, 100);
767 	atomic_set(&bat_priv->gw.bandwidth_up, 20);
768 	atomic_set(&bat_priv->orig_interval, 1000);
769 	atomic_set(&bat_priv->hop_penalty, 30);
770 #ifdef CONFIG_BATMAN_ADV_DEBUG
771 	atomic_set(&bat_priv->log_level, 0);
772 #endif
773 	atomic_set(&bat_priv->fragmentation, 1);
774 	atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
775 	atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
776 	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
777 
778 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
779 	atomic_set(&bat_priv->bcast_seqno, 1);
780 	atomic_set(&bat_priv->tt.vn, 0);
781 	atomic_set(&bat_priv->tt.local_changes, 0);
782 	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
783 #ifdef CONFIG_BATMAN_ADV_BLA
784 	atomic_set(&bat_priv->bla.num_requests, 0);
785 #endif
786 	atomic_set(&bat_priv->tp_num, 0);
787 
788 	bat_priv->tt.last_changeset = NULL;
789 	bat_priv->tt.last_changeset_len = 0;
790 	bat_priv->isolation_mark = 0;
791 	bat_priv->isolation_mark_mask = 0;
792 
793 	/* randomize initial seqno to avoid collision */
794 	get_random_bytes(&random_seqno, sizeof(random_seqno));
795 	atomic_set(&bat_priv->frag_seqno, random_seqno);
796 
797 	bat_priv->primary_if = NULL;
798 
799 	batadv_nc_init_bat_priv(bat_priv);
800 
801 	if (!bat_priv->algo_ops) {
802 		ret = batadv_algo_select(bat_priv, batadv_routing_algo);
803 		if (ret < 0)
804 			goto free_bat_counters;
805 	}
806 
807 	ret = batadv_mesh_init(dev);
808 	if (ret < 0)
809 		goto free_bat_counters;
810 
811 	return 0;
812 
813 free_bat_counters:
814 	free_percpu(bat_priv->bat_counters);
815 	bat_priv->bat_counters = NULL;
816 
817 	return ret;
818 }
819 
820 /**
821  * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
822  * @dev: batadv_soft_interface used as master interface
823  * @slave_dev: net_device which should become the slave interface
824  * @extack: extended ACK report struct
825  *
826  * Return: 0 if successful or error otherwise.
827  */
828 static int batadv_softif_slave_add(struct net_device *dev,
829 				   struct net_device *slave_dev,
830 				   struct netlink_ext_ack *extack)
831 {
832 	struct batadv_hard_iface *hard_iface;
833 	int ret = -EINVAL;
834 
835 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
836 	if (!hard_iface || hard_iface->soft_iface)
837 		goto out;
838 
839 	ret = batadv_hardif_enable_interface(hard_iface, dev);
840 
841 out:
842 	batadv_hardif_put(hard_iface);
843 	return ret;
844 }
845 
846 /**
847  * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
848  * @dev: batadv_soft_interface used as master interface
849  * @slave_dev: net_device which should be removed from the master interface
850  *
851  * Return: 0 if successful or error otherwise.
852  */
853 static int batadv_softif_slave_del(struct net_device *dev,
854 				   struct net_device *slave_dev)
855 {
856 	struct batadv_hard_iface *hard_iface;
857 	int ret = -EINVAL;
858 
859 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
860 
861 	if (!hard_iface || hard_iface->soft_iface != dev)
862 		goto out;
863 
864 	batadv_hardif_disable_interface(hard_iface);
865 	ret = 0;
866 
867 out:
868 	batadv_hardif_put(hard_iface);
869 	return ret;
870 }
871 
872 static const struct net_device_ops batadv_netdev_ops = {
873 	.ndo_init = batadv_softif_init_late,
874 	.ndo_open = batadv_interface_open,
875 	.ndo_stop = batadv_interface_release,
876 	.ndo_get_stats = batadv_interface_stats,
877 	.ndo_vlan_rx_add_vid = batadv_interface_add_vid,
878 	.ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
879 	.ndo_set_mac_address = batadv_interface_set_mac_addr,
880 	.ndo_change_mtu = batadv_interface_change_mtu,
881 	.ndo_set_rx_mode = batadv_interface_set_rx_mode,
882 	.ndo_start_xmit = batadv_interface_tx,
883 	.ndo_validate_addr = eth_validate_addr,
884 	.ndo_add_slave = batadv_softif_slave_add,
885 	.ndo_del_slave = batadv_softif_slave_del,
886 };
887 
888 static void batadv_get_drvinfo(struct net_device *dev,
889 			       struct ethtool_drvinfo *info)
890 {
891 	strscpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
892 	strscpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
893 	strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
894 	strscpy(info->bus_info, "batman", sizeof(info->bus_info));
895 }
896 
897 /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
898  * Declare each description string in struct.name[] to get fixed sized buffer
899  * and compile time checking for strings longer than ETH_GSTRING_LEN.
900  */
901 static const struct {
902 	const char name[ETH_GSTRING_LEN];
903 } batadv_counters_strings[] = {
904 	{ "tx" },
905 	{ "tx_bytes" },
906 	{ "tx_dropped" },
907 	{ "rx" },
908 	{ "rx_bytes" },
909 	{ "forward" },
910 	{ "forward_bytes" },
911 	{ "mgmt_tx" },
912 	{ "mgmt_tx_bytes" },
913 	{ "mgmt_rx" },
914 	{ "mgmt_rx_bytes" },
915 	{ "frag_tx" },
916 	{ "frag_tx_bytes" },
917 	{ "frag_rx" },
918 	{ "frag_rx_bytes" },
919 	{ "frag_fwd" },
920 	{ "frag_fwd_bytes" },
921 	{ "tt_request_tx" },
922 	{ "tt_request_rx" },
923 	{ "tt_response_tx" },
924 	{ "tt_response_rx" },
925 	{ "tt_roam_adv_tx" },
926 	{ "tt_roam_adv_rx" },
927 #ifdef CONFIG_BATMAN_ADV_DAT
928 	{ "dat_get_tx" },
929 	{ "dat_get_rx" },
930 	{ "dat_put_tx" },
931 	{ "dat_put_rx" },
932 	{ "dat_cached_reply_tx" },
933 #endif
934 #ifdef CONFIG_BATMAN_ADV_NC
935 	{ "nc_code" },
936 	{ "nc_code_bytes" },
937 	{ "nc_recode" },
938 	{ "nc_recode_bytes" },
939 	{ "nc_buffer" },
940 	{ "nc_decode" },
941 	{ "nc_decode_bytes" },
942 	{ "nc_decode_failed" },
943 	{ "nc_sniffed" },
944 #endif
945 };
946 
947 static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
948 {
949 	if (stringset == ETH_SS_STATS)
950 		memcpy(data, batadv_counters_strings,
951 		       sizeof(batadv_counters_strings));
952 }
953 
954 static void batadv_get_ethtool_stats(struct net_device *dev,
955 				     struct ethtool_stats *stats, u64 *data)
956 {
957 	struct batadv_priv *bat_priv = netdev_priv(dev);
958 	int i;
959 
960 	for (i = 0; i < BATADV_CNT_NUM; i++)
961 		data[i] = batadv_sum_counter(bat_priv, i);
962 }
963 
964 static int batadv_get_sset_count(struct net_device *dev, int stringset)
965 {
966 	if (stringset == ETH_SS_STATS)
967 		return BATADV_CNT_NUM;
968 
969 	return -EOPNOTSUPP;
970 }
971 
972 static const struct ethtool_ops batadv_ethtool_ops = {
973 	.get_drvinfo = batadv_get_drvinfo,
974 	.get_link = ethtool_op_get_link,
975 	.get_strings = batadv_get_strings,
976 	.get_ethtool_stats = batadv_get_ethtool_stats,
977 	.get_sset_count = batadv_get_sset_count,
978 };
979 
980 /**
981  * batadv_softif_free() - Deconstructor of batadv_soft_interface
982  * @dev: Device to cleanup and remove
983  */
984 static void batadv_softif_free(struct net_device *dev)
985 {
986 	batadv_mesh_free(dev);
987 
988 	/* some scheduled RCU callbacks need the bat_priv struct to accomplish
989 	 * their tasks. Wait for them all to be finished before freeing the
990 	 * netdev and its private data (bat_priv)
991 	 */
992 	rcu_barrier();
993 }
994 
995 /**
996  * batadv_softif_init_early() - early stage initialization of soft interface
997  * @dev: registered network device to modify
998  */
999 static void batadv_softif_init_early(struct net_device *dev)
1000 {
1001 	ether_setup(dev);
1002 
1003 	dev->netdev_ops = &batadv_netdev_ops;
1004 	dev->needs_free_netdev = true;
1005 	dev->priv_destructor = batadv_softif_free;
1006 	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
1007 	dev->features |= NETIF_F_LLTX;
1008 	dev->priv_flags |= IFF_NO_QUEUE;
1009 
1010 	/* can't call min_mtu, because the needed variables
1011 	 * have not been initialized yet
1012 	 */
1013 	dev->mtu = ETH_DATA_LEN;
1014 
1015 	/* generate random address */
1016 	eth_hw_addr_random(dev);
1017 
1018 	dev->ethtool_ops = &batadv_ethtool_ops;
1019 }
1020 
1021 /**
1022  * batadv_softif_validate() - validate configuration of new batadv link
1023  * @tb: IFLA_INFO_DATA netlink attributes
1024  * @data: enum batadv_ifla_attrs attributes
1025  * @extack: extended ACK report struct
1026  *
1027  * Return: 0 if successful or error otherwise.
1028  */
1029 static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[],
1030 				  struct netlink_ext_ack *extack)
1031 {
1032 	struct batadv_algo_ops *algo_ops;
1033 
1034 	if (!data)
1035 		return 0;
1036 
1037 	if (data[IFLA_BATADV_ALGO_NAME]) {
1038 		algo_ops = batadv_algo_get(nla_data(data[IFLA_BATADV_ALGO_NAME]));
1039 		if (!algo_ops)
1040 			return -EINVAL;
1041 	}
1042 
1043 	return 0;
1044 }
1045 
1046 /**
1047  * batadv_softif_newlink() - pre-initialize and register new batadv link
1048  * @src_net: the applicable net namespace
1049  * @dev: network device to register
1050  * @tb: IFLA_INFO_DATA netlink attributes
1051  * @data: enum batadv_ifla_attrs attributes
1052  * @extack: extended ACK report struct
1053  *
1054  * Return: 0 if successful or error otherwise.
1055  */
1056 static int batadv_softif_newlink(struct net *src_net, struct net_device *dev,
1057 				 struct nlattr *tb[], struct nlattr *data[],
1058 				 struct netlink_ext_ack *extack)
1059 {
1060 	struct batadv_priv *bat_priv = netdev_priv(dev);
1061 	const char *algo_name;
1062 	int err;
1063 
1064 	if (data && data[IFLA_BATADV_ALGO_NAME]) {
1065 		algo_name = nla_data(data[IFLA_BATADV_ALGO_NAME]);
1066 		err = batadv_algo_select(bat_priv, algo_name);
1067 		if (err)
1068 			return -EINVAL;
1069 	}
1070 
1071 	return register_netdevice(dev);
1072 }
1073 
1074 /**
1075  * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
1076  *  netlink
1077  * @soft_iface: the to-be-removed batman-adv interface
1078  * @head: list pointer
1079  */
1080 static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
1081 					  struct list_head *head)
1082 {
1083 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
1084 	struct batadv_hard_iface *hard_iface;
1085 	struct batadv_softif_vlan *vlan;
1086 
1087 	list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
1088 		if (hard_iface->soft_iface == soft_iface)
1089 			batadv_hardif_disable_interface(hard_iface);
1090 	}
1091 
1092 	/* destroy the "untagged" VLAN */
1093 	vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
1094 	if (vlan) {
1095 		batadv_softif_destroy_vlan(bat_priv, vlan);
1096 		batadv_softif_vlan_put(vlan);
1097 	}
1098 
1099 	unregister_netdevice_queue(soft_iface, head);
1100 }
1101 
1102 /**
1103  * batadv_softif_is_valid() - Check whether device is a batadv soft interface
1104  * @net_dev: device which should be checked
1105  *
1106  * Return: true when net_dev is a batman-adv interface, false otherwise
1107  */
1108 bool batadv_softif_is_valid(const struct net_device *net_dev)
1109 {
1110 	if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
1111 		return true;
1112 
1113 	return false;
1114 }
1115 
1116 static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = {
1117 	[IFLA_BATADV_ALGO_NAME]	= { .type = NLA_NUL_STRING },
1118 };
1119 
1120 struct rtnl_link_ops batadv_link_ops __read_mostly = {
1121 	.kind		= "batadv",
1122 	.priv_size	= sizeof(struct batadv_priv),
1123 	.setup		= batadv_softif_init_early,
1124 	.maxtype	= IFLA_BATADV_MAX,
1125 	.policy		= batadv_ifla_policy,
1126 	.validate	= batadv_softif_validate,
1127 	.newlink	= batadv_softif_newlink,
1128 	.dellink	= batadv_softif_destroy_netlink,
1129 };
1130