1 /* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: 2 * 3 * Marek Lindner, Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "send.h" 19 #include "main.h" 20 21 #include <linux/atomic.h> 22 #include <linux/bug.h> 23 #include <linux/byteorder/generic.h> 24 #include <linux/errno.h> 25 #include <linux/etherdevice.h> 26 #include <linux/fs.h> 27 #include <linux/if.h> 28 #include <linux/if_ether.h> 29 #include <linux/jiffies.h> 30 #include <linux/kernel.h> 31 #include <linux/kref.h> 32 #include <linux/list.h> 33 #include <linux/netdevice.h> 34 #include <linux/printk.h> 35 #include <linux/rculist.h> 36 #include <linux/rcupdate.h> 37 #include <linux/skbuff.h> 38 #include <linux/slab.h> 39 #include <linux/spinlock.h> 40 #include <linux/stddef.h> 41 #include <linux/workqueue.h> 42 43 #include "distributed-arp-table.h" 44 #include "fragmentation.h" 45 #include "gateway_client.h" 46 #include "hard-interface.h" 47 #include "log.h" 48 #include "network-coding.h" 49 #include "originator.h" 50 #include "routing.h" 51 #include "soft-interface.h" 52 #include "translation-table.h" 53 54 static void batadv_send_outstanding_bcast_packet(struct work_struct *work); 55 56 /** 57 * batadv_send_skb_packet - send an already prepared packet 58 * @skb: the packet to send 59 * @hard_iface: the interface to use to send the broadcast packet 60 * @dst_addr: the payload destination 61 * 62 * Send out an already prepared packet to the given neighbor or broadcast it 63 * using the specified interface. Either hard_iface or neigh_node must be not 64 * NULL. 65 * If neigh_node is NULL, then the packet is broadcasted using hard_iface, 66 * otherwise it is sent as unicast to the given neighbor. 67 * 68 * Regardless of the return value, the skb is consumed. 69 * 70 * Return: A negative errno code is returned on a failure. A success does not 71 * guarantee the frame will be transmitted as it may be dropped due 72 * to congestion or traffic shaping. 73 */ 74 int batadv_send_skb_packet(struct sk_buff *skb, 75 struct batadv_hard_iface *hard_iface, 76 const u8 *dst_addr) 77 { 78 struct batadv_priv *bat_priv; 79 struct ethhdr *ethhdr; 80 81 bat_priv = netdev_priv(hard_iface->soft_iface); 82 83 if (hard_iface->if_status != BATADV_IF_ACTIVE) 84 goto send_skb_err; 85 86 if (unlikely(!hard_iface->net_dev)) 87 goto send_skb_err; 88 89 if (!(hard_iface->net_dev->flags & IFF_UP)) { 90 pr_warn("Interface %s is not up - can't send packet via that interface!\n", 91 hard_iface->net_dev->name); 92 goto send_skb_err; 93 } 94 95 /* push to the ethernet header. */ 96 if (batadv_skb_head_push(skb, ETH_HLEN) < 0) 97 goto send_skb_err; 98 99 skb_reset_mac_header(skb); 100 101 ethhdr = eth_hdr(skb); 102 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr); 103 ether_addr_copy(ethhdr->h_dest, dst_addr); 104 ethhdr->h_proto = htons(ETH_P_BATMAN); 105 106 skb_set_network_header(skb, ETH_HLEN); 107 skb->protocol = htons(ETH_P_BATMAN); 108 109 skb->dev = hard_iface->net_dev; 110 111 /* Save a clone of the skb to use when decoding coded packets */ 112 batadv_nc_skb_store_for_decoding(bat_priv, skb); 113 114 /* dev_queue_xmit() returns a negative result on error. However on 115 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 116 * (which is > 0). This will not be treated as an error. 117 */ 118 return dev_queue_xmit(skb); 119 send_skb_err: 120 kfree_skb(skb); 121 return NET_XMIT_DROP; 122 } 123 124 int batadv_send_broadcast_skb(struct sk_buff *skb, 125 struct batadv_hard_iface *hard_iface) 126 { 127 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr); 128 } 129 130 int batadv_send_unicast_skb(struct sk_buff *skb, 131 struct batadv_neigh_node *neigh) 132 { 133 #ifdef CONFIG_BATMAN_ADV_BATMAN_V 134 struct batadv_hardif_neigh_node *hardif_neigh; 135 #endif 136 int ret; 137 138 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr); 139 140 #ifdef CONFIG_BATMAN_ADV_BATMAN_V 141 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr); 142 143 if ((hardif_neigh) && (ret != NET_XMIT_DROP)) 144 hardif_neigh->bat_v.last_unicast_tx = jiffies; 145 146 if (hardif_neigh) 147 batadv_hardif_neigh_put(hardif_neigh); 148 #endif 149 150 return ret; 151 } 152 153 /** 154 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb. 155 * @skb: Packet to be transmitted. 156 * @orig_node: Final destination of the packet. 157 * @recv_if: Interface used when receiving the packet (can be NULL). 158 * 159 * Looks up the best next-hop towards the passed originator and passes the 160 * skb on for preparation of MAC header. If the packet originated from this 161 * host, NULL can be passed as recv_if and no interface alternating is 162 * attempted. 163 * 164 * Return: negative errno code on a failure, -EINPROGRESS if the skb is 165 * buffered for later transmit or the NET_XMIT status returned by the 166 * lower routine if the packet has been passed down. 167 */ 168 int batadv_send_skb_to_orig(struct sk_buff *skb, 169 struct batadv_orig_node *orig_node, 170 struct batadv_hard_iface *recv_if) 171 { 172 struct batadv_priv *bat_priv = orig_node->bat_priv; 173 struct batadv_neigh_node *neigh_node; 174 int ret; 175 176 /* batadv_find_router() increases neigh_nodes refcount if found. */ 177 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); 178 if (!neigh_node) { 179 ret = -EINVAL; 180 goto free_skb; 181 } 182 183 /* Check if the skb is too large to send in one piece and fragment 184 * it if needed. 185 */ 186 if (atomic_read(&bat_priv->fragmentation) && 187 skb->len > neigh_node->if_incoming->net_dev->mtu) { 188 /* Fragment and send packet. */ 189 ret = batadv_frag_send_packet(skb, orig_node, neigh_node); 190 /* skb was consumed */ 191 skb = NULL; 192 193 goto put_neigh_node; 194 } 195 196 /* try to network code the packet, if it is received on an interface 197 * (i.e. being forwarded). If the packet originates from this node or if 198 * network coding fails, then send the packet as usual. 199 */ 200 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) 201 ret = -EINPROGRESS; 202 else 203 ret = batadv_send_unicast_skb(skb, neigh_node); 204 205 /* skb was consumed */ 206 skb = NULL; 207 208 put_neigh_node: 209 batadv_neigh_node_put(neigh_node); 210 free_skb: 211 kfree_skb(skb); 212 213 return ret; 214 } 215 216 /** 217 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the 218 * common fields for unicast packets 219 * @skb: the skb carrying the unicast header to initialize 220 * @hdr_size: amount of bytes to push at the beginning of the skb 221 * @orig_node: the destination node 222 * 223 * Return: false if the buffer extension was not possible or true otherwise. 224 */ 225 static bool 226 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, 227 struct batadv_orig_node *orig_node) 228 { 229 struct batadv_unicast_packet *unicast_packet; 230 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn); 231 232 if (batadv_skb_head_push(skb, hdr_size) < 0) 233 return false; 234 235 unicast_packet = (struct batadv_unicast_packet *)skb->data; 236 unicast_packet->version = BATADV_COMPAT_VERSION; 237 /* batman packet type: unicast */ 238 unicast_packet->packet_type = BATADV_UNICAST; 239 /* set unicast ttl */ 240 unicast_packet->ttl = BATADV_TTL; 241 /* copy the destination for faster routing */ 242 ether_addr_copy(unicast_packet->dest, orig_node->orig); 243 /* set the destination tt version number */ 244 unicast_packet->ttvn = ttvn; 245 246 return true; 247 } 248 249 /** 250 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header 251 * @skb: the skb containing the payload to encapsulate 252 * @orig_node: the destination node 253 * 254 * Return: false if the payload could not be encapsulated or true otherwise. 255 */ 256 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb, 257 struct batadv_orig_node *orig_node) 258 { 259 size_t uni_size = sizeof(struct batadv_unicast_packet); 260 261 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node); 262 } 263 264 /** 265 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a 266 * unicast 4addr header 267 * @bat_priv: the bat priv with all the soft interface information 268 * @skb: the skb containing the payload to encapsulate 269 * @orig: the destination node 270 * @packet_subtype: the unicast 4addr packet subtype to use 271 * 272 * Return: false if the payload could not be encapsulated or true otherwise. 273 */ 274 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, 275 struct sk_buff *skb, 276 struct batadv_orig_node *orig, 277 int packet_subtype) 278 { 279 struct batadv_hard_iface *primary_if; 280 struct batadv_unicast_4addr_packet *uc_4addr_packet; 281 bool ret = false; 282 283 primary_if = batadv_primary_if_get_selected(bat_priv); 284 if (!primary_if) 285 goto out; 286 287 /* Pull the header space and fill the unicast_packet substructure. 288 * We can do that because the first member of the uc_4addr_packet 289 * is of type struct unicast_packet 290 */ 291 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet), 292 orig)) 293 goto out; 294 295 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 296 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR; 297 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr); 298 uc_4addr_packet->subtype = packet_subtype; 299 uc_4addr_packet->reserved = 0; 300 301 ret = true; 302 out: 303 if (primary_if) 304 batadv_hardif_put(primary_if); 305 return ret; 306 } 307 308 /** 309 * batadv_send_skb_unicast - encapsulate and send an skb via unicast 310 * @bat_priv: the bat priv with all the soft interface information 311 * @skb: payload to send 312 * @packet_type: the batman unicast packet type to use 313 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast 314 * 4addr packets) 315 * @orig_node: the originator to send the packet to 316 * @vid: the vid to be used to search the translation table 317 * 318 * Wrap the given skb into a batman-adv unicast or unicast-4addr header 319 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied 320 * as packet_type. Then send this frame to the given orig_node. 321 * 322 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 323 */ 324 int batadv_send_skb_unicast(struct batadv_priv *bat_priv, 325 struct sk_buff *skb, int packet_type, 326 int packet_subtype, 327 struct batadv_orig_node *orig_node, 328 unsigned short vid) 329 { 330 struct batadv_unicast_packet *unicast_packet; 331 struct ethhdr *ethhdr; 332 int ret = NET_XMIT_DROP; 333 334 if (!orig_node) 335 goto out; 336 337 switch (packet_type) { 338 case BATADV_UNICAST: 339 if (!batadv_send_skb_prepare_unicast(skb, orig_node)) 340 goto out; 341 break; 342 case BATADV_UNICAST_4ADDR: 343 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, 344 orig_node, 345 packet_subtype)) 346 goto out; 347 break; 348 default: 349 /* this function supports UNICAST and UNICAST_4ADDR only. It 350 * should never be invoked with any other packet type 351 */ 352 goto out; 353 } 354 355 /* skb->data might have been reallocated by 356 * batadv_send_skb_prepare_unicast{,_4addr}() 357 */ 358 ethhdr = eth_hdr(skb); 359 unicast_packet = (struct batadv_unicast_packet *)skb->data; 360 361 /* inform the destination node that we are still missing a correct route 362 * for this client. The destination will receive this packet and will 363 * try to reroute it because the ttvn contained in the header is less 364 * than the current one 365 */ 366 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) 367 unicast_packet->ttvn = unicast_packet->ttvn - 1; 368 369 ret = batadv_send_skb_to_orig(skb, orig_node, NULL); 370 /* skb was consumed */ 371 skb = NULL; 372 373 out: 374 kfree_skb(skb); 375 return ret; 376 } 377 378 /** 379 * batadv_send_skb_via_tt_generic - send an skb via TT lookup 380 * @bat_priv: the bat priv with all the soft interface information 381 * @skb: payload to send 382 * @packet_type: the batman unicast packet type to use 383 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast 384 * 4addr packets) 385 * @dst_hint: can be used to override the destination contained in the skb 386 * @vid: the vid to be used to search the translation table 387 * 388 * Look up the recipient node for the destination address in the ethernet 389 * header via the translation table. Wrap the given skb into a batman-adv 390 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or 391 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame 392 * to the according destination node. 393 * 394 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 395 */ 396 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, 397 struct sk_buff *skb, int packet_type, 398 int packet_subtype, u8 *dst_hint, 399 unsigned short vid) 400 { 401 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 402 struct batadv_orig_node *orig_node; 403 u8 *src, *dst; 404 int ret; 405 406 src = ethhdr->h_source; 407 dst = ethhdr->h_dest; 408 409 /* if we got an hint! let's send the packet to this client (if any) */ 410 if (dst_hint) { 411 src = NULL; 412 dst = dst_hint; 413 } 414 orig_node = batadv_transtable_search(bat_priv, src, dst, vid); 415 416 ret = batadv_send_skb_unicast(bat_priv, skb, packet_type, 417 packet_subtype, orig_node, vid); 418 419 if (orig_node) 420 batadv_orig_node_put(orig_node); 421 422 return ret; 423 } 424 425 /** 426 * batadv_send_skb_via_gw - send an skb via gateway lookup 427 * @bat_priv: the bat priv with all the soft interface information 428 * @skb: payload to send 429 * @vid: the vid to be used to search the translation table 430 * 431 * Look up the currently selected gateway. Wrap the given skb into a batman-adv 432 * unicast header and send this frame to this gateway node. 433 * 434 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. 435 */ 436 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, 437 unsigned short vid) 438 { 439 struct batadv_orig_node *orig_node; 440 int ret; 441 442 orig_node = batadv_gw_get_selected_orig(bat_priv); 443 ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, 444 BATADV_P_DATA, orig_node, vid); 445 446 if (orig_node) 447 batadv_orig_node_put(orig_node); 448 449 return ret; 450 } 451 452 /** 453 * batadv_forw_packet_free - free a forwarding packet 454 * @forw_packet: The packet to free 455 * @dropped: whether the packet is freed because is is dropped 456 * 457 * This frees a forwarding packet and releases any resources it might 458 * have claimed. 459 */ 460 void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet, 461 bool dropped) 462 { 463 if (dropped) 464 kfree_skb(forw_packet->skb); 465 else 466 consume_skb(forw_packet->skb); 467 468 if (forw_packet->if_incoming) 469 batadv_hardif_put(forw_packet->if_incoming); 470 if (forw_packet->if_outgoing) 471 batadv_hardif_put(forw_packet->if_outgoing); 472 if (forw_packet->queue_left) 473 atomic_inc(forw_packet->queue_left); 474 kfree(forw_packet); 475 } 476 477 /** 478 * batadv_forw_packet_alloc - allocate a forwarding packet 479 * @if_incoming: The (optional) if_incoming to be grabbed 480 * @if_outgoing: The (optional) if_outgoing to be grabbed 481 * @queue_left: The (optional) queue counter to decrease 482 * @bat_priv: The bat_priv for the mesh of this forw_packet 483 * 484 * Allocates a forwarding packet and tries to get a reference to the 485 * (optional) if_incoming, if_outgoing and queue_left. If queue_left 486 * is NULL then bat_priv is optional, too. 487 * 488 * Return: An allocated forwarding packet on success, NULL otherwise. 489 */ 490 struct batadv_forw_packet * 491 batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, 492 struct batadv_hard_iface *if_outgoing, 493 atomic_t *queue_left, 494 struct batadv_priv *bat_priv) 495 { 496 struct batadv_forw_packet *forw_packet; 497 const char *qname; 498 499 if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) { 500 qname = "unknown"; 501 502 if (queue_left == &bat_priv->bcast_queue_left) 503 qname = "bcast"; 504 505 if (queue_left == &bat_priv->batman_queue_left) 506 qname = "batman"; 507 508 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 509 "%s queue is full\n", qname); 510 511 return NULL; 512 } 513 514 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); 515 if (!forw_packet) 516 goto err; 517 518 if (if_incoming) 519 kref_get(&if_incoming->refcount); 520 521 if (if_outgoing) 522 kref_get(&if_outgoing->refcount); 523 524 INIT_HLIST_NODE(&forw_packet->list); 525 INIT_HLIST_NODE(&forw_packet->cleanup_list); 526 forw_packet->skb = NULL; 527 forw_packet->queue_left = queue_left; 528 forw_packet->if_incoming = if_incoming; 529 forw_packet->if_outgoing = if_outgoing; 530 forw_packet->num_packets = 0; 531 532 return forw_packet; 533 534 err: 535 if (queue_left) 536 atomic_inc(queue_left); 537 538 return NULL; 539 } 540 541 /** 542 * batadv_forw_packet_was_stolen - check whether someone stole this packet 543 * @forw_packet: the forwarding packet to check 544 * 545 * This function checks whether the given forwarding packet was claimed by 546 * someone else for free(). 547 * 548 * Return: True if someone stole it, false otherwise. 549 */ 550 static bool 551 batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet) 552 { 553 return !hlist_unhashed(&forw_packet->cleanup_list); 554 } 555 556 /** 557 * batadv_forw_packet_steal - claim a forw_packet for free() 558 * @forw_packet: the forwarding packet to steal 559 * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock) 560 * 561 * This function tries to steal a specific forw_packet from global 562 * visibility for the purpose of getting it for free(). That means 563 * the caller is *not* allowed to requeue it afterwards. 564 * 565 * Return: True if stealing was successful. False if someone else stole it 566 * before us. 567 */ 568 bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet, 569 spinlock_t *lock) 570 { 571 /* did purging routine steal it earlier? */ 572 spin_lock_bh(lock); 573 if (batadv_forw_packet_was_stolen(forw_packet)) { 574 spin_unlock_bh(lock); 575 return false; 576 } 577 578 hlist_del_init(&forw_packet->list); 579 580 /* Just to spot misuse of this function */ 581 hlist_add_fake(&forw_packet->cleanup_list); 582 583 spin_unlock_bh(lock); 584 return true; 585 } 586 587 /** 588 * batadv_forw_packet_list_steal - claim a list of forward packets for free() 589 * @forw_list: the to be stolen forward packets 590 * @cleanup_list: a backup pointer, to be able to dispose the packet later 591 * @hard_iface: the interface to steal forward packets from 592 * 593 * This function claims responsibility to free any forw_packet queued on the 594 * given hard_iface. If hard_iface is NULL forwarding packets on all hard 595 * interfaces will be claimed. 596 * 597 * The packets are being moved from the forw_list to the cleanup_list and 598 * by that allows already running threads to notice the claiming. 599 */ 600 static void 601 batadv_forw_packet_list_steal(struct hlist_head *forw_list, 602 struct hlist_head *cleanup_list, 603 const struct batadv_hard_iface *hard_iface) 604 { 605 struct batadv_forw_packet *forw_packet; 606 struct hlist_node *safe_tmp_node; 607 608 hlist_for_each_entry_safe(forw_packet, safe_tmp_node, 609 forw_list, list) { 610 /* if purge_outstanding_packets() was called with an argument 611 * we delete only packets belonging to the given interface 612 */ 613 if (hard_iface && 614 (forw_packet->if_incoming != hard_iface) && 615 (forw_packet->if_outgoing != hard_iface)) 616 continue; 617 618 hlist_del(&forw_packet->list); 619 hlist_add_head(&forw_packet->cleanup_list, cleanup_list); 620 } 621 } 622 623 /** 624 * batadv_forw_packet_list_free - free a list of forward packets 625 * @head: a list of to be freed forw_packets 626 * 627 * This function cancels the scheduling of any packet in the provided list, 628 * waits for any possibly running packet forwarding thread to finish and 629 * finally, safely frees this forward packet. 630 * 631 * This function might sleep. 632 */ 633 static void batadv_forw_packet_list_free(struct hlist_head *head) 634 { 635 struct batadv_forw_packet *forw_packet; 636 struct hlist_node *safe_tmp_node; 637 638 hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head, 639 cleanup_list) { 640 cancel_delayed_work_sync(&forw_packet->delayed_work); 641 642 hlist_del(&forw_packet->cleanup_list); 643 batadv_forw_packet_free(forw_packet, true); 644 } 645 } 646 647 /** 648 * batadv_forw_packet_queue - try to queue a forwarding packet 649 * @forw_packet: the forwarding packet to queue 650 * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock) 651 * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list) 652 * @send_time: timestamp (jiffies) when the packet is to be sent 653 * 654 * This function tries to (re)queue a forwarding packet. Requeuing 655 * is prevented if the according interface is shutting down 656 * (e.g. if batadv_forw_packet_list_steal() was called for this 657 * packet earlier). 658 * 659 * Calling batadv_forw_packet_queue() after a call to 660 * batadv_forw_packet_steal() is forbidden! 661 * 662 * Caller needs to ensure that forw_packet->delayed_work was initialized. 663 */ 664 static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet, 665 spinlock_t *lock, struct hlist_head *head, 666 unsigned long send_time) 667 { 668 spin_lock_bh(lock); 669 670 /* did purging routine steal it from us? */ 671 if (batadv_forw_packet_was_stolen(forw_packet)) { 672 /* If you got it for free() without trouble, then 673 * don't get back into the queue after stealing... 674 */ 675 WARN_ONCE(hlist_fake(&forw_packet->cleanup_list), 676 "Requeuing after batadv_forw_packet_steal() not allowed!\n"); 677 678 spin_unlock_bh(lock); 679 return; 680 } 681 682 hlist_del_init(&forw_packet->list); 683 hlist_add_head(&forw_packet->list, head); 684 685 queue_delayed_work(batadv_event_workqueue, 686 &forw_packet->delayed_work, 687 send_time - jiffies); 688 spin_unlock_bh(lock); 689 } 690 691 /** 692 * batadv_forw_packet_bcast_queue - try to queue a broadcast packet 693 * @bat_priv: the bat priv with all the soft interface information 694 * @forw_packet: the forwarding packet to queue 695 * @send_time: timestamp (jiffies) when the packet is to be sent 696 * 697 * This function tries to (re)queue a broadcast packet. 698 * 699 * Caller needs to ensure that forw_packet->delayed_work was initialized. 700 */ 701 static void 702 batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv, 703 struct batadv_forw_packet *forw_packet, 704 unsigned long send_time) 705 { 706 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock, 707 &bat_priv->forw_bcast_list, send_time); 708 } 709 710 /** 711 * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet 712 * @bat_priv: the bat priv with all the soft interface information 713 * @forw_packet: the forwarding packet to queue 714 * @send_time: timestamp (jiffies) when the packet is to be sent 715 * 716 * This function tries to (re)queue an OGMv1 packet. 717 * 718 * Caller needs to ensure that forw_packet->delayed_work was initialized. 719 */ 720 void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv, 721 struct batadv_forw_packet *forw_packet, 722 unsigned long send_time) 723 { 724 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock, 725 &bat_priv->forw_bat_list, send_time); 726 } 727 728 /** 729 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends 730 * @bat_priv: the bat priv with all the soft interface information 731 * @skb: broadcast packet to add 732 * @delay: number of jiffies to wait before sending 733 * @own_packet: true if it is a self-generated broadcast packet 734 * 735 * add a broadcast packet to the queue and setup timers. broadcast packets 736 * are sent multiple times to increase probability for being received. 737 * 738 * The skb is not consumed, so the caller should make sure that the 739 * skb is freed. 740 * 741 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. 742 */ 743 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, 744 const struct sk_buff *skb, 745 unsigned long delay, 746 bool own_packet) 747 { 748 struct batadv_hard_iface *primary_if; 749 struct batadv_forw_packet *forw_packet; 750 struct batadv_bcast_packet *bcast_packet; 751 struct sk_buff *newskb; 752 753 primary_if = batadv_primary_if_get_selected(bat_priv); 754 if (!primary_if) 755 goto err; 756 757 forw_packet = batadv_forw_packet_alloc(primary_if, NULL, 758 &bat_priv->bcast_queue_left, 759 bat_priv); 760 batadv_hardif_put(primary_if); 761 if (!forw_packet) 762 goto err; 763 764 newskb = skb_copy(skb, GFP_ATOMIC); 765 if (!newskb) 766 goto err_packet_free; 767 768 /* as we have a copy now, it is safe to decrease the TTL */ 769 bcast_packet = (struct batadv_bcast_packet *)newskb->data; 770 bcast_packet->ttl--; 771 772 forw_packet->skb = newskb; 773 forw_packet->own = own_packet; 774 775 INIT_DELAYED_WORK(&forw_packet->delayed_work, 776 batadv_send_outstanding_bcast_packet); 777 778 batadv_forw_packet_bcast_queue(bat_priv, forw_packet, jiffies + delay); 779 return NETDEV_TX_OK; 780 781 err_packet_free: 782 batadv_forw_packet_free(forw_packet, true); 783 err: 784 return NETDEV_TX_BUSY; 785 } 786 787 static void batadv_send_outstanding_bcast_packet(struct work_struct *work) 788 { 789 struct batadv_hard_iface *hard_iface; 790 struct batadv_hardif_neigh_node *neigh_node; 791 struct delayed_work *delayed_work; 792 struct batadv_forw_packet *forw_packet; 793 struct batadv_bcast_packet *bcast_packet; 794 struct sk_buff *skb1; 795 struct net_device *soft_iface; 796 struct batadv_priv *bat_priv; 797 unsigned long send_time = jiffies + msecs_to_jiffies(5); 798 bool dropped = false; 799 u8 *neigh_addr; 800 u8 *orig_neigh; 801 int ret = 0; 802 803 delayed_work = to_delayed_work(work); 804 forw_packet = container_of(delayed_work, struct batadv_forw_packet, 805 delayed_work); 806 soft_iface = forw_packet->if_incoming->soft_iface; 807 bat_priv = netdev_priv(soft_iface); 808 809 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) { 810 dropped = true; 811 goto out; 812 } 813 814 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) { 815 dropped = true; 816 goto out; 817 } 818 819 bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data; 820 821 /* rebroadcast packet */ 822 rcu_read_lock(); 823 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 824 if (hard_iface->soft_iface != soft_iface) 825 continue; 826 827 if (forw_packet->num_packets >= hard_iface->num_bcasts) 828 continue; 829 830 if (forw_packet->own) { 831 neigh_node = NULL; 832 } else { 833 neigh_addr = eth_hdr(forw_packet->skb)->h_source; 834 neigh_node = batadv_hardif_neigh_get(hard_iface, 835 neigh_addr); 836 } 837 838 orig_neigh = neigh_node ? neigh_node->orig : NULL; 839 840 ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig, 841 orig_neigh); 842 843 if (ret) { 844 char *type; 845 846 switch (ret) { 847 case BATADV_HARDIF_BCAST_NORECIPIENT: 848 type = "no neighbor"; 849 break; 850 case BATADV_HARDIF_BCAST_DUPFWD: 851 type = "single neighbor is source"; 852 break; 853 case BATADV_HARDIF_BCAST_DUPORIG: 854 type = "single neighbor is originator"; 855 break; 856 default: 857 type = "unknown"; 858 } 859 860 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s surpressed: %s\n", 861 bcast_packet->orig, 862 hard_iface->net_dev->name, type); 863 864 if (neigh_node) 865 batadv_hardif_neigh_put(neigh_node); 866 867 continue; 868 } 869 870 if (neigh_node) 871 batadv_hardif_neigh_put(neigh_node); 872 873 if (!kref_get_unless_zero(&hard_iface->refcount)) 874 continue; 875 876 /* send a copy of the saved skb */ 877 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); 878 if (skb1) 879 batadv_send_broadcast_skb(skb1, hard_iface); 880 881 batadv_hardif_put(hard_iface); 882 } 883 rcu_read_unlock(); 884 885 forw_packet->num_packets++; 886 887 /* if we still have some more bcasts to send */ 888 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) { 889 batadv_forw_packet_bcast_queue(bat_priv, forw_packet, 890 send_time); 891 return; 892 } 893 894 out: 895 /* do we get something for free()? */ 896 if (batadv_forw_packet_steal(forw_packet, 897 &bat_priv->forw_bcast_list_lock)) 898 batadv_forw_packet_free(forw_packet, dropped); 899 } 900 901 /** 902 * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets 903 * @bat_priv: the bat priv with all the soft interface information 904 * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on 905 * 906 * This method cancels and purges any broadcast and OGMv1 packet on the given 907 * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard 908 * interfaces will be canceled and purged. 909 * 910 * This function might sleep. 911 */ 912 void 913 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, 914 const struct batadv_hard_iface *hard_iface) 915 { 916 struct hlist_head head = HLIST_HEAD_INIT; 917 918 if (hard_iface) 919 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 920 "purge_outstanding_packets(): %s\n", 921 hard_iface->net_dev->name); 922 else 923 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 924 "purge_outstanding_packets()\n"); 925 926 /* claim bcast list for free() */ 927 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 928 batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head, 929 hard_iface); 930 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 931 932 /* claim batman packet list for free() */ 933 spin_lock_bh(&bat_priv->forw_bat_list_lock); 934 batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head, 935 hard_iface); 936 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 937 938 /* then cancel or wait for packet workers to finish and free */ 939 batadv_forw_packet_list_free(&head); 940 } 941