xref: /openbmc/linux/net/batman-adv/send.c (revision 3f2fb9a834cb1fcddbae22deca7fde136944dc89)
1 /* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "send.h"
19 #include "main.h"
20 
21 #include <linux/atomic.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/etherdevice.h>
24 #include <linux/fs.h>
25 #include <linux/if_ether.h>
26 #include <linux/if.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/netdevice.h>
31 #include <linux/printk.h>
32 #include <linux/rculist.h>
33 #include <linux/rcupdate.h>
34 #include <linux/skbuff.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <linux/stddef.h>
38 #include <linux/workqueue.h>
39 
40 #include "distributed-arp-table.h"
41 #include "fragmentation.h"
42 #include "gateway_client.h"
43 #include "hard-interface.h"
44 #include "network-coding.h"
45 #include "originator.h"
46 #include "routing.h"
47 #include "soft-interface.h"
48 #include "translation-table.h"
49 
50 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
51 
52 /* send out an already prepared packet to the given address via the
53  * specified batman interface
54  */
55 int batadv_send_skb_packet(struct sk_buff *skb,
56 			   struct batadv_hard_iface *hard_iface,
57 			   const u8 *dst_addr)
58 {
59 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
60 	struct ethhdr *ethhdr;
61 
62 	if (hard_iface->if_status != BATADV_IF_ACTIVE)
63 		goto send_skb_err;
64 
65 	if (unlikely(!hard_iface->net_dev))
66 		goto send_skb_err;
67 
68 	if (!(hard_iface->net_dev->flags & IFF_UP)) {
69 		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
70 			hard_iface->net_dev->name);
71 		goto send_skb_err;
72 	}
73 
74 	/* push to the ethernet header. */
75 	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
76 		goto send_skb_err;
77 
78 	skb_reset_mac_header(skb);
79 
80 	ethhdr = eth_hdr(skb);
81 	ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
82 	ether_addr_copy(ethhdr->h_dest, dst_addr);
83 	ethhdr->h_proto = htons(ETH_P_BATMAN);
84 
85 	skb_set_network_header(skb, ETH_HLEN);
86 	skb->protocol = htons(ETH_P_BATMAN);
87 
88 	skb->dev = hard_iface->net_dev;
89 
90 	/* Save a clone of the skb to use when decoding coded packets */
91 	batadv_nc_skb_store_for_decoding(bat_priv, skb);
92 
93 	/* dev_queue_xmit() returns a negative result on error.	 However on
94 	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 	 * (which is > 0). This will not be treated as an error.
96 	 */
97 	return dev_queue_xmit(skb);
98 send_skb_err:
99 	kfree_skb(skb);
100 	return NET_XMIT_DROP;
101 }
102 
103 /**
104  * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
105  * @skb: Packet to be transmitted.
106  * @orig_node: Final destination of the packet.
107  * @recv_if: Interface used when receiving the packet (can be NULL).
108  *
109  * Looks up the best next-hop towards the passed originator and passes the
110  * skb on for preparation of MAC header. If the packet originated from this
111  * host, NULL can be passed as recv_if and no interface alternating is
112  * attempted.
113  *
114  * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
115  * NET_XMIT_POLICED if the skb is buffered for later transmit.
116  */
117 int batadv_send_skb_to_orig(struct sk_buff *skb,
118 			    struct batadv_orig_node *orig_node,
119 			    struct batadv_hard_iface *recv_if)
120 {
121 	struct batadv_priv *bat_priv = orig_node->bat_priv;
122 	struct batadv_neigh_node *neigh_node;
123 	int ret = NET_XMIT_DROP;
124 
125 	/* batadv_find_router() increases neigh_nodes refcount if found. */
126 	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
127 	if (!neigh_node)
128 		goto out;
129 
130 	/* Check if the skb is too large to send in one piece and fragment
131 	 * it if needed.
132 	 */
133 	if (atomic_read(&bat_priv->fragmentation) &&
134 	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
135 		/* Fragment and send packet. */
136 		if (batadv_frag_send_packet(skb, orig_node, neigh_node))
137 			ret = NET_XMIT_SUCCESS;
138 
139 		goto out;
140 	}
141 
142 	/* try to network code the packet, if it is received on an interface
143 	 * (i.e. being forwarded). If the packet originates from this node or if
144 	 * network coding fails, then send the packet as usual.
145 	 */
146 	if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
147 		ret = NET_XMIT_POLICED;
148 	} else {
149 		batadv_send_skb_packet(skb, neigh_node->if_incoming,
150 				       neigh_node->addr);
151 		ret = NET_XMIT_SUCCESS;
152 	}
153 
154 out:
155 	if (neigh_node)
156 		batadv_neigh_node_put(neigh_node);
157 
158 	return ret;
159 }
160 
161 /**
162  * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
163  *  common fields for unicast packets
164  * @skb: the skb carrying the unicast header to initialize
165  * @hdr_size: amount of bytes to push at the beginning of the skb
166  * @orig_node: the destination node
167  *
168  * Return: false if the buffer extension was not possible or true otherwise.
169  */
170 static bool
171 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
172 				  struct batadv_orig_node *orig_node)
173 {
174 	struct batadv_unicast_packet *unicast_packet;
175 	u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
176 
177 	if (batadv_skb_head_push(skb, hdr_size) < 0)
178 		return false;
179 
180 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
181 	unicast_packet->version = BATADV_COMPAT_VERSION;
182 	/* batman packet type: unicast */
183 	unicast_packet->packet_type = BATADV_UNICAST;
184 	/* set unicast ttl */
185 	unicast_packet->ttl = BATADV_TTL;
186 	/* copy the destination for faster routing */
187 	ether_addr_copy(unicast_packet->dest, orig_node->orig);
188 	/* set the destination tt version number */
189 	unicast_packet->ttvn = ttvn;
190 
191 	return true;
192 }
193 
194 /**
195  * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
196  * @skb: the skb containing the payload to encapsulate
197  * @orig_node: the destination node
198  *
199  * Return: false if the payload could not be encapsulated or true otherwise.
200  */
201 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
202 					    struct batadv_orig_node *orig_node)
203 {
204 	size_t uni_size = sizeof(struct batadv_unicast_packet);
205 
206 	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
207 }
208 
209 /**
210  * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
211  *  unicast 4addr header
212  * @bat_priv: the bat priv with all the soft interface information
213  * @skb: the skb containing the payload to encapsulate
214  * @orig: the destination node
215  * @packet_subtype: the unicast 4addr packet subtype to use
216  *
217  * Return: false if the payload could not be encapsulated or true otherwise.
218  */
219 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
220 					   struct sk_buff *skb,
221 					   struct batadv_orig_node *orig,
222 					   int packet_subtype)
223 {
224 	struct batadv_hard_iface *primary_if;
225 	struct batadv_unicast_4addr_packet *uc_4addr_packet;
226 	bool ret = false;
227 
228 	primary_if = batadv_primary_if_get_selected(bat_priv);
229 	if (!primary_if)
230 		goto out;
231 
232 	/* Pull the header space and fill the unicast_packet substructure.
233 	 * We can do that because the first member of the uc_4addr_packet
234 	 * is of type struct unicast_packet
235 	 */
236 	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
237 					       orig))
238 		goto out;
239 
240 	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
241 	uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
242 	ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
243 	uc_4addr_packet->subtype = packet_subtype;
244 	uc_4addr_packet->reserved = 0;
245 
246 	ret = true;
247 out:
248 	if (primary_if)
249 		batadv_hardif_put(primary_if);
250 	return ret;
251 }
252 
253 /**
254  * batadv_send_skb_unicast - encapsulate and send an skb via unicast
255  * @bat_priv: the bat priv with all the soft interface information
256  * @skb: payload to send
257  * @packet_type: the batman unicast packet type to use
258  * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
259  *  4addr packets)
260  * @orig_node: the originator to send the packet to
261  * @vid: the vid to be used to search the translation table
262  *
263  * Wrap the given skb into a batman-adv unicast or unicast-4addr header
264  * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
265  * as packet_type. Then send this frame to the given orig_node and release a
266  * reference to this orig_node.
267  *
268  * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
269  */
270 int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
271 			    struct sk_buff *skb, int packet_type,
272 			    int packet_subtype,
273 			    struct batadv_orig_node *orig_node,
274 			    unsigned short vid)
275 {
276 	struct batadv_unicast_packet *unicast_packet;
277 	struct ethhdr *ethhdr;
278 	int ret = NET_XMIT_DROP;
279 
280 	if (!orig_node)
281 		goto out;
282 
283 	switch (packet_type) {
284 	case BATADV_UNICAST:
285 		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
286 			goto out;
287 		break;
288 	case BATADV_UNICAST_4ADDR:
289 		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
290 							   orig_node,
291 							   packet_subtype))
292 			goto out;
293 		break;
294 	default:
295 		/* this function supports UNICAST and UNICAST_4ADDR only. It
296 		 * should never be invoked with any other packet type
297 		 */
298 		goto out;
299 	}
300 
301 	/* skb->data might have been reallocated by
302 	 * batadv_send_skb_prepare_unicast{,_4addr}()
303 	 */
304 	ethhdr = eth_hdr(skb);
305 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
306 
307 	/* inform the destination node that we are still missing a correct route
308 	 * for this client. The destination will receive this packet and will
309 	 * try to reroute it because the ttvn contained in the header is less
310 	 * than the current one
311 	 */
312 	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
313 		unicast_packet->ttvn = unicast_packet->ttvn - 1;
314 
315 	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
316 		ret = NET_XMIT_SUCCESS;
317 
318 out:
319 	if (orig_node)
320 		batadv_orig_node_put(orig_node);
321 	if (ret == NET_XMIT_DROP)
322 		kfree_skb(skb);
323 	return ret;
324 }
325 
326 /**
327  * batadv_send_skb_via_tt_generic - send an skb via TT lookup
328  * @bat_priv: the bat priv with all the soft interface information
329  * @skb: payload to send
330  * @packet_type: the batman unicast packet type to use
331  * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
332  *  4addr packets)
333  * @dst_hint: can be used to override the destination contained in the skb
334  * @vid: the vid to be used to search the translation table
335  *
336  * Look up the recipient node for the destination address in the ethernet
337  * header via the translation table. Wrap the given skb into a batman-adv
338  * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
339  * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
340  * to the according destination node.
341  *
342  * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
343  */
344 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
345 				   struct sk_buff *skb, int packet_type,
346 				   int packet_subtype, u8 *dst_hint,
347 				   unsigned short vid)
348 {
349 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
350 	struct batadv_orig_node *orig_node;
351 	u8 *src, *dst;
352 
353 	src = ethhdr->h_source;
354 	dst = ethhdr->h_dest;
355 
356 	/* if we got an hint! let's send the packet to this client (if any) */
357 	if (dst_hint) {
358 		src = NULL;
359 		dst = dst_hint;
360 	}
361 	orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
362 
363 	return batadv_send_skb_unicast(bat_priv, skb, packet_type,
364 				       packet_subtype, orig_node, vid);
365 }
366 
367 /**
368  * batadv_send_skb_via_gw - send an skb via gateway lookup
369  * @bat_priv: the bat priv with all the soft interface information
370  * @skb: payload to send
371  * @vid: the vid to be used to search the translation table
372  *
373  * Look up the currently selected gateway. Wrap the given skb into a batman-adv
374  * unicast header and send this frame to this gateway node.
375  *
376  * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
377  */
378 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
379 			   unsigned short vid)
380 {
381 	struct batadv_orig_node *orig_node;
382 
383 	orig_node = batadv_gw_get_selected_orig(bat_priv);
384 	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
385 				       orig_node, vid);
386 }
387 
388 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
389 {
390 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
391 
392 	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
393 	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
394 		return;
395 
396 	/* the interface gets activated here to avoid race conditions between
397 	 * the moment of activating the interface in
398 	 * hardif_activate_interface() where the originator mac is set and
399 	 * outdated packets (especially uninitialized mac addresses) in the
400 	 * packet queue
401 	 */
402 	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
403 		hard_iface->if_status = BATADV_IF_ACTIVE;
404 
405 	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
406 }
407 
408 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
409 {
410 	kfree_skb(forw_packet->skb);
411 	if (forw_packet->if_incoming)
412 		batadv_hardif_put(forw_packet->if_incoming);
413 	if (forw_packet->if_outgoing)
414 		batadv_hardif_put(forw_packet->if_outgoing);
415 	kfree(forw_packet);
416 }
417 
418 static void
419 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
420 				 struct batadv_forw_packet *forw_packet,
421 				 unsigned long send_time)
422 {
423 	/* add new packet to packet list */
424 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
425 	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
426 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
427 
428 	/* start timer for this packet */
429 	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
430 			   send_time);
431 }
432 
433 /**
434  * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
435  * @bat_priv: the bat priv with all the soft interface information
436  * @skb: broadcast packet to add
437  * @delay: number of jiffies to wait before sending
438  *
439  * add a broadcast packet to the queue and setup timers. broadcast packets
440  * are sent multiple times to increase probability for being received.
441  *
442  * The skb is not consumed, so the caller should make sure that the
443  * skb is freed.
444  *
445  * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
446  */
447 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
448 				    const struct sk_buff *skb,
449 				    unsigned long delay)
450 {
451 	struct batadv_hard_iface *primary_if = NULL;
452 	struct batadv_forw_packet *forw_packet;
453 	struct batadv_bcast_packet *bcast_packet;
454 	struct sk_buff *newskb;
455 
456 	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
457 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
458 			   "bcast packet queue full\n");
459 		goto out;
460 	}
461 
462 	primary_if = batadv_primary_if_get_selected(bat_priv);
463 	if (!primary_if)
464 		goto out_and_inc;
465 
466 	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
467 
468 	if (!forw_packet)
469 		goto out_and_inc;
470 
471 	newskb = skb_copy(skb, GFP_ATOMIC);
472 	if (!newskb)
473 		goto packet_free;
474 
475 	/* as we have a copy now, it is safe to decrease the TTL */
476 	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
477 	bcast_packet->ttl--;
478 
479 	skb_reset_mac_header(newskb);
480 
481 	forw_packet->skb = newskb;
482 	forw_packet->if_incoming = primary_if;
483 	forw_packet->if_outgoing = NULL;
484 
485 	/* how often did we send the bcast packet ? */
486 	forw_packet->num_packets = 0;
487 
488 	INIT_DELAYED_WORK(&forw_packet->delayed_work,
489 			  batadv_send_outstanding_bcast_packet);
490 
491 	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
492 	return NETDEV_TX_OK;
493 
494 packet_free:
495 	kfree(forw_packet);
496 out_and_inc:
497 	atomic_inc(&bat_priv->bcast_queue_left);
498 out:
499 	if (primary_if)
500 		batadv_hardif_put(primary_if);
501 	return NETDEV_TX_BUSY;
502 }
503 
504 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
505 {
506 	struct batadv_hard_iface *hard_iface;
507 	struct delayed_work *delayed_work;
508 	struct batadv_forw_packet *forw_packet;
509 	struct sk_buff *skb1;
510 	struct net_device *soft_iface;
511 	struct batadv_priv *bat_priv;
512 
513 	delayed_work = container_of(work, struct delayed_work, work);
514 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
515 				   delayed_work);
516 	soft_iface = forw_packet->if_incoming->soft_iface;
517 	bat_priv = netdev_priv(soft_iface);
518 
519 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
520 	hlist_del(&forw_packet->list);
521 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
522 
523 	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
524 		goto out;
525 
526 	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
527 		goto out;
528 
529 	/* rebroadcast packet */
530 	rcu_read_lock();
531 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
532 		if (hard_iface->soft_iface != soft_iface)
533 			continue;
534 
535 		if (forw_packet->num_packets >= hard_iface->num_bcasts)
536 			continue;
537 
538 		/* send a copy of the saved skb */
539 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
540 		if (skb1)
541 			batadv_send_skb_packet(skb1, hard_iface,
542 					       batadv_broadcast_addr);
543 	}
544 	rcu_read_unlock();
545 
546 	forw_packet->num_packets++;
547 
548 	/* if we still have some more bcasts to send */
549 	if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
550 		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
551 						 msecs_to_jiffies(5));
552 		return;
553 	}
554 
555 out:
556 	batadv_forw_packet_free(forw_packet);
557 	atomic_inc(&bat_priv->bcast_queue_left);
558 }
559 
560 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
561 {
562 	struct delayed_work *delayed_work;
563 	struct batadv_forw_packet *forw_packet;
564 	struct batadv_priv *bat_priv;
565 
566 	delayed_work = container_of(work, struct delayed_work, work);
567 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
568 				   delayed_work);
569 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
570 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
571 	hlist_del(&forw_packet->list);
572 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
573 
574 	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
575 		goto out;
576 
577 	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
578 
579 	/* we have to have at least one packet in the queue to determine the
580 	 * queues wake up time unless we are shutting down.
581 	 *
582 	 * only re-schedule if this is the "original" copy, e.g. the OGM of the
583 	 * primary interface should only be rescheduled once per period, but
584 	 * this function will be called for the forw_packet instances of the
585 	 * other secondary interfaces as well.
586 	 */
587 	if (forw_packet->own &&
588 	    forw_packet->if_incoming == forw_packet->if_outgoing)
589 		batadv_schedule_bat_ogm(forw_packet->if_incoming);
590 
591 out:
592 	/* don't count own packet */
593 	if (!forw_packet->own)
594 		atomic_inc(&bat_priv->batman_queue_left);
595 
596 	batadv_forw_packet_free(forw_packet);
597 }
598 
599 void
600 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
601 				 const struct batadv_hard_iface *hard_iface)
602 {
603 	struct batadv_forw_packet *forw_packet;
604 	struct hlist_node *safe_tmp_node;
605 	bool pending;
606 
607 	if (hard_iface)
608 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
609 			   "purge_outstanding_packets(): %s\n",
610 			   hard_iface->net_dev->name);
611 	else
612 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
613 			   "purge_outstanding_packets()\n");
614 
615 	/* free bcast list */
616 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
617 	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
618 				  &bat_priv->forw_bcast_list, list) {
619 		/* if purge_outstanding_packets() was called with an argument
620 		 * we delete only packets belonging to the given interface
621 		 */
622 		if ((hard_iface) &&
623 		    (forw_packet->if_incoming != hard_iface) &&
624 		    (forw_packet->if_outgoing != hard_iface))
625 			continue;
626 
627 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
628 
629 		/* batadv_send_outstanding_bcast_packet() will lock the list to
630 		 * delete the item from the list
631 		 */
632 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
633 		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
634 
635 		if (pending) {
636 			hlist_del(&forw_packet->list);
637 			batadv_forw_packet_free(forw_packet);
638 		}
639 	}
640 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
641 
642 	/* free batman packet list */
643 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
644 	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
645 				  &bat_priv->forw_bat_list, list) {
646 		/* if purge_outstanding_packets() was called with an argument
647 		 * we delete only packets belonging to the given interface
648 		 */
649 		if ((hard_iface) &&
650 		    (forw_packet->if_incoming != hard_iface) &&
651 		    (forw_packet->if_outgoing != hard_iface))
652 			continue;
653 
654 		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
655 
656 		/* send_outstanding_bat_packet() will lock the list to
657 		 * delete the item from the list
658 		 */
659 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
660 		spin_lock_bh(&bat_priv->forw_bat_list_lock);
661 
662 		if (pending) {
663 			hlist_del(&forw_packet->list);
664 			batadv_forw_packet_free(forw_packet);
665 		}
666 	}
667 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
668 }
669