xref: /openbmc/linux/net/batman-adv/send.c (revision 1ccd4b7b)
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21 
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "vis.h"
29 #include "aggregation.h"
30 #include "gateway_common.h"
31 #include "originator.h"
32 
33 static void send_outstanding_bcast_packet(struct work_struct *work);
34 
35 /* apply hop penalty for a normal link */
36 static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
37 {
38 	int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 	return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40 }
41 
42 /* when do we schedule our own packet to be sent */
43 static unsigned long own_send_time(const struct bat_priv *bat_priv)
44 {
45 	return jiffies + msecs_to_jiffies(
46 		   atomic_read(&bat_priv->orig_interval) -
47 		   JITTER + (random32() % 2*JITTER));
48 }
49 
50 /* when do we schedule a forwarded packet to be sent */
51 static unsigned long forward_send_time(void)
52 {
53 	return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
54 }
55 
56 /* send out an already prepared packet to the given address via the
57  * specified batman interface */
58 int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
59 		    const uint8_t *dst_addr)
60 {
61 	struct ethhdr *ethhdr;
62 
63 	if (hard_iface->if_status != IF_ACTIVE)
64 		goto send_skb_err;
65 
66 	if (unlikely(!hard_iface->net_dev))
67 		goto send_skb_err;
68 
69 	if (!(hard_iface->net_dev->flags & IFF_UP)) {
70 		pr_warning("Interface %s is not up - can't send packet via "
71 			   "that interface!\n", hard_iface->net_dev->name);
72 		goto send_skb_err;
73 	}
74 
75 	/* push to the ethernet header. */
76 	if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
77 		goto send_skb_err;
78 
79 	skb_reset_mac_header(skb);
80 
81 	ethhdr = (struct ethhdr *) skb_mac_header(skb);
82 	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
83 	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
84 	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
85 
86 	skb_set_network_header(skb, ETH_HLEN);
87 	skb->priority = TC_PRIO_CONTROL;
88 	skb->protocol = __constant_htons(ETH_P_BATMAN);
89 
90 	skb->dev = hard_iface->net_dev;
91 
92 	/* dev_queue_xmit() returns a negative result on error.	 However on
93 	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
94 	 * (which is > 0). This will not be treated as an error. */
95 
96 	return dev_queue_xmit(skb);
97 send_skb_err:
98 	kfree_skb(skb);
99 	return NET_XMIT_DROP;
100 }
101 
102 /* Send a packet to a given interface */
103 static void send_packet_to_if(struct forw_packet *forw_packet,
104 			      struct hard_iface *hard_iface)
105 {
106 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
107 	char *fwd_str;
108 	uint8_t packet_num;
109 	int16_t buff_pos;
110 	struct batman_packet *batman_packet;
111 	struct sk_buff *skb;
112 
113 	if (hard_iface->if_status != IF_ACTIVE)
114 		return;
115 
116 	packet_num = 0;
117 	buff_pos = 0;
118 	batman_packet = (struct batman_packet *)forw_packet->skb->data;
119 
120 	/* adjust all flags and log packets */
121 	while (aggregated_packet(buff_pos,
122 				 forw_packet->packet_len,
123 				 batman_packet->tt_num_changes)) {
124 
125 		/* we might have aggregated direct link packets with an
126 		 * ordinary base packet */
127 		if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
128 		    (forw_packet->if_incoming == hard_iface))
129 			batman_packet->flags |= DIRECTLINK;
130 		else
131 			batman_packet->flags &= ~DIRECTLINK;
132 
133 		fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
134 							    "Sending own" :
135 							    "Forwarding"));
136 		bat_dbg(DBG_BATMAN, bat_priv,
137 			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
138 			" IDF %s, hvn %d) on interface %s [%pM]\n",
139 			fwd_str, (packet_num > 0 ? "aggregated " : ""),
140 			batman_packet->orig, ntohl(batman_packet->seqno),
141 			batman_packet->tq, batman_packet->ttl,
142 			(batman_packet->flags & DIRECTLINK ?
143 			 "on" : "off"),
144 			batman_packet->ttvn, hard_iface->net_dev->name,
145 			hard_iface->net_dev->dev_addr);
146 
147 		buff_pos += sizeof(*batman_packet) +
148 			tt_len(batman_packet->tt_num_changes);
149 		packet_num++;
150 		batman_packet = (struct batman_packet *)
151 			(forw_packet->skb->data + buff_pos);
152 	}
153 
154 	/* create clone because function is called more than once */
155 	skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
156 	if (skb)
157 		send_skb_packet(skb, hard_iface, broadcast_addr);
158 }
159 
160 /* send a batman packet */
161 static void send_packet(struct forw_packet *forw_packet)
162 {
163 	struct hard_iface *hard_iface;
164 	struct net_device *soft_iface;
165 	struct bat_priv *bat_priv;
166 	struct hard_iface *primary_if = NULL;
167 	struct batman_packet *batman_packet =
168 		(struct batman_packet *)(forw_packet->skb->data);
169 	int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170 
171 	if (!forw_packet->if_incoming) {
172 		pr_err("Error - can't forward packet: incoming iface not "
173 		       "specified\n");
174 		goto out;
175 	}
176 
177 	soft_iface = forw_packet->if_incoming->soft_iface;
178 	bat_priv = netdev_priv(soft_iface);
179 
180 	if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181 		goto out;
182 
183 	primary_if = primary_if_get_selected(bat_priv);
184 	if (!primary_if)
185 		goto out;
186 
187 	/* multihomed peer assumed */
188 	/* non-primary OGMs are only broadcasted on their interface */
189 	if ((directlink && (batman_packet->ttl == 1)) ||
190 	    (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
191 
192 		/* FIXME: what about aggregated packets ? */
193 		bat_dbg(DBG_BATMAN, bat_priv,
194 			"%s packet (originator %pM, seqno %d, TTL %d) "
195 			"on interface %s [%pM]\n",
196 			(forw_packet->own ? "Sending own" : "Forwarding"),
197 			batman_packet->orig, ntohl(batman_packet->seqno),
198 			batman_packet->ttl,
199 			forw_packet->if_incoming->net_dev->name,
200 			forw_packet->if_incoming->net_dev->dev_addr);
201 
202 		/* skb is only used once and than forw_packet is free'd */
203 		send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
204 				broadcast_addr);
205 		forw_packet->skb = NULL;
206 
207 		goto out;
208 	}
209 
210 	/* broadcast on every interface */
211 	rcu_read_lock();
212 	list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
213 		if (hard_iface->soft_iface != soft_iface)
214 			continue;
215 
216 		send_packet_to_if(forw_packet, hard_iface);
217 	}
218 	rcu_read_unlock();
219 
220 out:
221 	if (primary_if)
222 		hardif_free_ref(primary_if);
223 }
224 
225 static void realloc_packet_buffer(struct hard_iface *hard_iface,
226 				int new_len)
227 {
228 	unsigned char *new_buff;
229 	struct batman_packet *batman_packet;
230 
231 	new_buff = kmalloc(new_len, GFP_ATOMIC);
232 
233 	/* keep old buffer if kmalloc should fail */
234 	if (new_buff) {
235 		memcpy(new_buff, hard_iface->packet_buff,
236 		       sizeof(*batman_packet));
237 
238 		kfree(hard_iface->packet_buff);
239 		hard_iface->packet_buff = new_buff;
240 		hard_iface->packet_len = new_len;
241 	}
242 }
243 
244 /* when calling this function (hard_iface == primary_if) has to be true */
245 static void prepare_packet_buffer(struct bat_priv *bat_priv,
246 				  struct hard_iface *hard_iface)
247 {
248 	int new_len;
249 	struct batman_packet *batman_packet;
250 
251 	new_len = BAT_PACKET_LEN +
252 		  tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
253 
254 	/* if we have too many changes for one packet don't send any
255 	 * and wait for the tt table request which will be fragmented */
256 	if (new_len > hard_iface->soft_iface->mtu)
257 		new_len = BAT_PACKET_LEN;
258 
259 	realloc_packet_buffer(hard_iface, new_len);
260 	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
261 
262 	atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
263 
264 	/* reset the sending counter */
265 	atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
266 
267 	batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
268 				hard_iface->packet_buff + BAT_PACKET_LEN,
269 				hard_iface->packet_len - BAT_PACKET_LEN);
270 
271 }
272 
273 static void reset_packet_buffer(struct bat_priv *bat_priv,
274 	struct hard_iface *hard_iface)
275 {
276 	struct batman_packet *batman_packet;
277 
278 	realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
279 
280 	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
281 	batman_packet->tt_num_changes = 0;
282 }
283 
284 void schedule_own_packet(struct hard_iface *hard_iface)
285 {
286 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
287 	struct hard_iface *primary_if;
288 	unsigned long send_time;
289 	struct batman_packet *batman_packet;
290 	int vis_server;
291 
292 	if ((hard_iface->if_status == IF_NOT_IN_USE) ||
293 	    (hard_iface->if_status == IF_TO_BE_REMOVED))
294 		return;
295 
296 	vis_server = atomic_read(&bat_priv->vis_mode);
297 	primary_if = primary_if_get_selected(bat_priv);
298 
299 	/**
300 	 * the interface gets activated here to avoid race conditions between
301 	 * the moment of activating the interface in
302 	 * hardif_activate_interface() where the originator mac is set and
303 	 * outdated packets (especially uninitialized mac addresses) in the
304 	 * packet queue
305 	 */
306 	if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
307 		hard_iface->if_status = IF_ACTIVE;
308 
309 	if (hard_iface == primary_if) {
310 		/* if at least one change happened */
311 		if (atomic_read(&bat_priv->tt_local_changes) > 0) {
312 			tt_commit_changes(bat_priv);
313 			prepare_packet_buffer(bat_priv, hard_iface);
314 		}
315 
316 		/* if the changes have been sent enough times */
317 		if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
318 			reset_packet_buffer(bat_priv, hard_iface);
319 	}
320 
321 	/**
322 	 * NOTE: packet_buff might just have been re-allocated in
323 	 * prepare_packet_buffer() or in reset_packet_buffer()
324 	 */
325 	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
326 
327 	/* change sequence number to network order */
328 	batman_packet->seqno =
329 		htonl((uint32_t)atomic_read(&hard_iface->seqno));
330 
331 	batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
332 	batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
333 
334 	if (vis_server == VIS_TYPE_SERVER_SYNC)
335 		batman_packet->flags |= VIS_SERVER;
336 	else
337 		batman_packet->flags &= ~VIS_SERVER;
338 
339 	if ((hard_iface == primary_if) &&
340 	    (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
341 		batman_packet->gw_flags =
342 				(uint8_t)atomic_read(&bat_priv->gw_bandwidth);
343 	else
344 		batman_packet->gw_flags = NO_FLAGS;
345 
346 	atomic_inc(&hard_iface->seqno);
347 
348 	slide_own_bcast_window(hard_iface);
349 	send_time = own_send_time(bat_priv);
350 	add_bat_packet_to_list(bat_priv,
351 			       hard_iface->packet_buff,
352 			       hard_iface->packet_len,
353 			       hard_iface, 1, send_time);
354 
355 	if (primary_if)
356 		hardif_free_ref(primary_if);
357 }
358 
359 void schedule_forward_packet(struct orig_node *orig_node,
360 			     const struct ethhdr *ethhdr,
361 			     struct batman_packet *batman_packet,
362 			     int directlink,
363 			     struct hard_iface *if_incoming)
364 {
365 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
366 	struct neigh_node *router;
367 	uint8_t in_tq, in_ttl, tq_avg = 0;
368 	unsigned long send_time;
369 	uint8_t tt_num_changes;
370 
371 	if (batman_packet->ttl <= 1) {
372 		bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
373 		return;
374 	}
375 
376 	router = orig_node_get_router(orig_node);
377 
378 	in_tq = batman_packet->tq;
379 	in_ttl = batman_packet->ttl;
380 	tt_num_changes = batman_packet->tt_num_changes;
381 
382 	batman_packet->ttl--;
383 	memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
384 
385 	/* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
386 	 * of our best tq value */
387 	if (router && router->tq_avg != 0) {
388 
389 		/* rebroadcast ogm of best ranking neighbor as is */
390 		if (!compare_eth(router->addr, ethhdr->h_source)) {
391 			batman_packet->tq = router->tq_avg;
392 
393 			if (router->last_ttl)
394 				batman_packet->ttl = router->last_ttl - 1;
395 		}
396 
397 		tq_avg = router->tq_avg;
398 	}
399 
400 	if (router)
401 		neigh_node_free_ref(router);
402 
403 	/* apply hop penalty */
404 	batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
405 
406 	bat_dbg(DBG_BATMAN, bat_priv,
407 		"Forwarding packet: tq_orig: %i, tq_avg: %i, "
408 		"tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
409 		in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
410 		batman_packet->ttl);
411 
412 	batman_packet->seqno = htonl(batman_packet->seqno);
413 	batman_packet->tt_crc = htons(batman_packet->tt_crc);
414 
415 	/* switch of primaries first hop flag when forwarding */
416 	batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
417 	if (directlink)
418 		batman_packet->flags |= DIRECTLINK;
419 	else
420 		batman_packet->flags &= ~DIRECTLINK;
421 
422 	send_time = forward_send_time();
423 	add_bat_packet_to_list(bat_priv,
424 			       (unsigned char *)batman_packet,
425 			       sizeof(*batman_packet) + tt_len(tt_num_changes),
426 			       if_incoming, 0, send_time);
427 }
428 
429 static void forw_packet_free(struct forw_packet *forw_packet)
430 {
431 	if (forw_packet->skb)
432 		kfree_skb(forw_packet->skb);
433 	if (forw_packet->if_incoming)
434 		hardif_free_ref(forw_packet->if_incoming);
435 	kfree(forw_packet);
436 }
437 
438 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
439 				      struct forw_packet *forw_packet,
440 				      unsigned long send_time)
441 {
442 	INIT_HLIST_NODE(&forw_packet->list);
443 
444 	/* add new packet to packet list */
445 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
446 	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
447 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
448 
449 	/* start timer for this packet */
450 	INIT_DELAYED_WORK(&forw_packet->delayed_work,
451 			  send_outstanding_bcast_packet);
452 	queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
453 			   send_time);
454 }
455 
456 /* add a broadcast packet to the queue and setup timers. broadcast packets
457  * are sent multiple times to increase probability for beeing received.
458  *
459  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
460  * errors.
461  *
462  * The skb is not consumed, so the caller should make sure that the
463  * skb is freed. */
464 int add_bcast_packet_to_list(struct bat_priv *bat_priv,
465 			     const struct sk_buff *skb, unsigned long delay)
466 {
467 	struct hard_iface *primary_if = NULL;
468 	struct forw_packet *forw_packet;
469 	struct bcast_packet *bcast_packet;
470 	struct sk_buff *newskb;
471 
472 	if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
473 		bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
474 		goto out;
475 	}
476 
477 	primary_if = primary_if_get_selected(bat_priv);
478 	if (!primary_if)
479 		goto out_and_inc;
480 
481 	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
482 
483 	if (!forw_packet)
484 		goto out_and_inc;
485 
486 	newskb = skb_copy(skb, GFP_ATOMIC);
487 	if (!newskb)
488 		goto packet_free;
489 
490 	/* as we have a copy now, it is safe to decrease the TTL */
491 	bcast_packet = (struct bcast_packet *)newskb->data;
492 	bcast_packet->ttl--;
493 
494 	skb_reset_mac_header(newskb);
495 
496 	forw_packet->skb = newskb;
497 	forw_packet->if_incoming = primary_if;
498 
499 	/* how often did we send the bcast packet ? */
500 	forw_packet->num_packets = 0;
501 
502 	_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
503 	return NETDEV_TX_OK;
504 
505 packet_free:
506 	kfree(forw_packet);
507 out_and_inc:
508 	atomic_inc(&bat_priv->bcast_queue_left);
509 out:
510 	if (primary_if)
511 		hardif_free_ref(primary_if);
512 	return NETDEV_TX_BUSY;
513 }
514 
515 static void send_outstanding_bcast_packet(struct work_struct *work)
516 {
517 	struct hard_iface *hard_iface;
518 	struct delayed_work *delayed_work =
519 		container_of(work, struct delayed_work, work);
520 	struct forw_packet *forw_packet =
521 		container_of(delayed_work, struct forw_packet, delayed_work);
522 	struct sk_buff *skb1;
523 	struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
524 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
525 
526 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
527 	hlist_del(&forw_packet->list);
528 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
529 
530 	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
531 		goto out;
532 
533 	/* rebroadcast packet */
534 	rcu_read_lock();
535 	list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
536 		if (hard_iface->soft_iface != soft_iface)
537 			continue;
538 
539 		/* send a copy of the saved skb */
540 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
541 		if (skb1)
542 			send_skb_packet(skb1, hard_iface, broadcast_addr);
543 	}
544 	rcu_read_unlock();
545 
546 	forw_packet->num_packets++;
547 
548 	/* if we still have some more bcasts to send */
549 	if (forw_packet->num_packets < 3) {
550 		_add_bcast_packet_to_list(bat_priv, forw_packet,
551 					  ((5 * HZ) / 1000));
552 		return;
553 	}
554 
555 out:
556 	forw_packet_free(forw_packet);
557 	atomic_inc(&bat_priv->bcast_queue_left);
558 }
559 
560 void send_outstanding_bat_packet(struct work_struct *work)
561 {
562 	struct delayed_work *delayed_work =
563 		container_of(work, struct delayed_work, work);
564 	struct forw_packet *forw_packet =
565 		container_of(delayed_work, struct forw_packet, delayed_work);
566 	struct bat_priv *bat_priv;
567 
568 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
569 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
570 	hlist_del(&forw_packet->list);
571 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
572 
573 	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
574 		goto out;
575 
576 	send_packet(forw_packet);
577 
578 	/**
579 	 * we have to have at least one packet in the queue
580 	 * to determine the queues wake up time unless we are
581 	 * shutting down
582 	 */
583 	if (forw_packet->own)
584 		schedule_own_packet(forw_packet->if_incoming);
585 
586 out:
587 	/* don't count own packet */
588 	if (!forw_packet->own)
589 		atomic_inc(&bat_priv->batman_queue_left);
590 
591 	forw_packet_free(forw_packet);
592 }
593 
594 void purge_outstanding_packets(struct bat_priv *bat_priv,
595 			       const struct hard_iface *hard_iface)
596 {
597 	struct forw_packet *forw_packet;
598 	struct hlist_node *tmp_node, *safe_tmp_node;
599 	bool pending;
600 
601 	if (hard_iface)
602 		bat_dbg(DBG_BATMAN, bat_priv,
603 			"purge_outstanding_packets(): %s\n",
604 			hard_iface->net_dev->name);
605 	else
606 		bat_dbg(DBG_BATMAN, bat_priv,
607 			"purge_outstanding_packets()\n");
608 
609 	/* free bcast list */
610 	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
611 	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
612 				  &bat_priv->forw_bcast_list, list) {
613 
614 		/**
615 		 * if purge_outstanding_packets() was called with an argmument
616 		 * we delete only packets belonging to the given interface
617 		 */
618 		if ((hard_iface) &&
619 		    (forw_packet->if_incoming != hard_iface))
620 			continue;
621 
622 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
623 
624 		/**
625 		 * send_outstanding_bcast_packet() will lock the list to
626 		 * delete the item from the list
627 		 */
628 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
629 		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
630 
631 		if (pending) {
632 			hlist_del(&forw_packet->list);
633 			forw_packet_free(forw_packet);
634 		}
635 	}
636 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
637 
638 	/* free batman packet list */
639 	spin_lock_bh(&bat_priv->forw_bat_list_lock);
640 	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
641 				  &bat_priv->forw_bat_list, list) {
642 
643 		/**
644 		 * if purge_outstanding_packets() was called with an argmument
645 		 * we delete only packets belonging to the given interface
646 		 */
647 		if ((hard_iface) &&
648 		    (forw_packet->if_incoming != hard_iface))
649 			continue;
650 
651 		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
652 
653 		/**
654 		 * send_outstanding_bat_packet() will lock the list to
655 		 * delete the item from the list
656 		 */
657 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
658 		spin_lock_bh(&bat_priv->forw_bat_list_lock);
659 
660 		if (pending) {
661 			hlist_del(&forw_packet->list);
662 			forw_packet_free(forw_packet);
663 		}
664 	}
665 	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
666 }
667