1 /* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors: 2 * 3 * Martin Hundebøll <martin@hundeboll.net> 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "main.h" 19 #include "fragmentation.h" 20 #include "send.h" 21 #include "originator.h" 22 #include "routing.h" 23 #include "hard-interface.h" 24 #include "soft-interface.h" 25 26 /** 27 * batadv_frag_clear_chain - delete entries in the fragment buffer chain 28 * @head: head of chain with entries. 29 * 30 * Free fragments in the passed hlist. Should be called with appropriate lock. 31 */ 32 static void batadv_frag_clear_chain(struct hlist_head *head) 33 { 34 struct batadv_frag_list_entry *entry; 35 struct hlist_node *node; 36 37 hlist_for_each_entry_safe(entry, node, head, list) { 38 hlist_del(&entry->list); 39 kfree_skb(entry->skb); 40 kfree(entry); 41 } 42 } 43 44 /** 45 * batadv_frag_purge_orig - free fragments associated to an orig 46 * @orig_node: originator to free fragments from 47 * @check_cb: optional function to tell if an entry should be purged 48 */ 49 void batadv_frag_purge_orig(struct batadv_orig_node *orig_node, 50 bool (*check_cb)(struct batadv_frag_table_entry *)) 51 { 52 struct batadv_frag_table_entry *chain; 53 uint8_t i; 54 55 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) { 56 chain = &orig_node->fragments[i]; 57 spin_lock_bh(&orig_node->fragments[i].lock); 58 59 if (!check_cb || check_cb(chain)) { 60 batadv_frag_clear_chain(&orig_node->fragments[i].head); 61 orig_node->fragments[i].size = 0; 62 } 63 64 spin_unlock_bh(&orig_node->fragments[i].lock); 65 } 66 } 67 68 /** 69 * batadv_frag_size_limit - maximum possible size of packet to be fragmented 70 * 71 * Returns the maximum size of payload that can be fragmented. 72 */ 73 static int batadv_frag_size_limit(void) 74 { 75 int limit = BATADV_FRAG_MAX_FRAG_SIZE; 76 77 limit -= sizeof(struct batadv_frag_packet); 78 limit *= BATADV_FRAG_MAX_FRAGMENTS; 79 80 return limit; 81 } 82 83 /** 84 * batadv_frag_init_chain - check and prepare fragment chain for new fragment 85 * @chain: chain in fragments table to init 86 * @seqno: sequence number of the received fragment 87 * 88 * Make chain ready for a fragment with sequence number "seqno". Delete existing 89 * entries if they have an "old" sequence number. 90 * 91 * Caller must hold chain->lock. 92 * 93 * Returns true if chain is empty and caller can just insert the new fragment 94 * without searching for the right position. 95 */ 96 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain, 97 uint16_t seqno) 98 { 99 if (chain->seqno == seqno) 100 return false; 101 102 if (!hlist_empty(&chain->head)) 103 batadv_frag_clear_chain(&chain->head); 104 105 chain->size = 0; 106 chain->seqno = seqno; 107 108 return true; 109 } 110 111 /** 112 * batadv_frag_insert_packet - insert a fragment into a fragment chain 113 * @orig_node: originator that the fragment was received from 114 * @skb: skb to insert 115 * @chain_out: list head to attach complete chains of fragments to 116 * 117 * Insert a new fragment into the reverse ordered chain in the right table 118 * entry. The hash table entry is cleared if "old" fragments exist in it. 119 * 120 * Returns true if skb is buffered, false on error. If the chain has all the 121 * fragments needed to merge the packet, the chain is moved to the passed head 122 * to avoid locking the chain in the table. 123 */ 124 static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, 125 struct sk_buff *skb, 126 struct hlist_head *chain_out) 127 { 128 struct batadv_frag_table_entry *chain; 129 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr; 130 struct batadv_frag_list_entry *frag_entry_last = NULL; 131 struct batadv_frag_packet *frag_packet; 132 uint8_t bucket; 133 uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet); 134 bool ret = false; 135 136 /* Linearize packet to avoid linearizing 16 packets in a row when doing 137 * the later merge. Non-linear merge should be added to remove this 138 * linearization. 139 */ 140 if (skb_linearize(skb) < 0) 141 goto err; 142 143 frag_packet = (struct batadv_frag_packet *)skb->data; 144 seqno = ntohs(frag_packet->seqno); 145 bucket = seqno % BATADV_FRAG_BUFFER_COUNT; 146 147 frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC); 148 if (!frag_entry_new) 149 goto err; 150 151 frag_entry_new->skb = skb; 152 frag_entry_new->no = frag_packet->no; 153 154 /* Select entry in the "chain table" and delete any prior fragments 155 * with another sequence number. batadv_frag_init_chain() returns true, 156 * if the list is empty at return. 157 */ 158 chain = &orig_node->fragments[bucket]; 159 spin_lock_bh(&chain->lock); 160 if (batadv_frag_init_chain(chain, seqno)) { 161 hlist_add_head(&frag_entry_new->list, &chain->head); 162 chain->size = skb->len - hdr_size; 163 chain->timestamp = jiffies; 164 ret = true; 165 goto out; 166 } 167 168 /* Find the position for the new fragment. */ 169 hlist_for_each_entry(frag_entry_curr, &chain->head, list) { 170 /* Drop packet if fragment already exists. */ 171 if (frag_entry_curr->no == frag_entry_new->no) 172 goto err_unlock; 173 174 /* Order fragments from highest to lowest. */ 175 if (frag_entry_curr->no < frag_entry_new->no) { 176 hlist_add_before(&frag_entry_new->list, 177 &frag_entry_curr->list); 178 chain->size += skb->len - hdr_size; 179 chain->timestamp = jiffies; 180 ret = true; 181 goto out; 182 } 183 184 /* store current entry because it could be the last in list */ 185 frag_entry_last = frag_entry_curr; 186 } 187 188 /* Reached the end of the list, so insert after 'frag_entry_last'. */ 189 if (likely(frag_entry_last)) { 190 hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list); 191 chain->size += skb->len - hdr_size; 192 chain->timestamp = jiffies; 193 ret = true; 194 } 195 196 out: 197 if (chain->size > batadv_frag_size_limit() || 198 ntohs(frag_packet->total_size) > batadv_frag_size_limit()) { 199 /* Clear chain if total size of either the list or the packet 200 * exceeds the maximum size of one merged packet. 201 */ 202 batadv_frag_clear_chain(&chain->head); 203 chain->size = 0; 204 } else if (ntohs(frag_packet->total_size) == chain->size) { 205 /* All fragments received. Hand over chain to caller. */ 206 hlist_move_list(&chain->head, chain_out); 207 chain->size = 0; 208 } 209 210 err_unlock: 211 spin_unlock_bh(&chain->lock); 212 213 err: 214 if (!ret) 215 kfree(frag_entry_new); 216 217 return ret; 218 } 219 220 /** 221 * batadv_frag_merge_packets - merge a chain of fragments 222 * @chain: head of chain with fragments 223 * @skb: packet with total size of skb after merging 224 * 225 * Expand the first skb in the chain and copy the content of the remaining 226 * skb's into the expanded one. After doing so, clear the chain. 227 * 228 * Returns the merged skb or NULL on error. 229 */ 230 static struct sk_buff * 231 batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) 232 { 233 struct batadv_frag_packet *packet; 234 struct batadv_frag_list_entry *entry; 235 struct sk_buff *skb_out = NULL; 236 int size, hdr_size = sizeof(struct batadv_frag_packet); 237 238 /* Make sure incoming skb has non-bogus data. */ 239 packet = (struct batadv_frag_packet *)skb->data; 240 size = ntohs(packet->total_size); 241 if (size > batadv_frag_size_limit()) 242 goto free; 243 244 /* Remove first entry, as this is the destination for the rest of the 245 * fragments. 246 */ 247 entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list); 248 hlist_del(&entry->list); 249 skb_out = entry->skb; 250 kfree(entry); 251 252 /* Make room for the rest of the fragments. */ 253 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { 254 kfree_skb(skb_out); 255 skb_out = NULL; 256 goto free; 257 } 258 259 /* Move the existing MAC header to just before the payload. (Override 260 * the fragment header.) 261 */ 262 skb_pull_rcsum(skb_out, hdr_size); 263 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); 264 skb_set_mac_header(skb_out, -ETH_HLEN); 265 skb_reset_network_header(skb_out); 266 skb_reset_transport_header(skb_out); 267 268 /* Copy the payload of the each fragment into the last skb */ 269 hlist_for_each_entry(entry, chain, list) { 270 size = entry->skb->len - hdr_size; 271 memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size, 272 size); 273 } 274 275 free: 276 /* Locking is not needed, because 'chain' is not part of any orig. */ 277 batadv_frag_clear_chain(chain); 278 return skb_out; 279 } 280 281 /** 282 * batadv_frag_skb_buffer - buffer fragment for later merge 283 * @skb: skb to buffer 284 * @orig_node_src: originator that the skb is received from 285 * 286 * Add fragment to buffer and merge fragments if possible. 287 * 288 * There are three possible outcomes: 1) Packet is merged: Return true and 289 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb 290 * to NULL; 3) Error: Return false and leave skb as is. 291 */ 292 bool batadv_frag_skb_buffer(struct sk_buff **skb, 293 struct batadv_orig_node *orig_node_src) 294 { 295 struct sk_buff *skb_out = NULL; 296 struct hlist_head head = HLIST_HEAD_INIT; 297 bool ret = false; 298 299 /* Add packet to buffer and table entry if merge is possible. */ 300 if (!batadv_frag_insert_packet(orig_node_src, *skb, &head)) 301 goto out_err; 302 303 /* Leave if more fragments are needed to merge. */ 304 if (hlist_empty(&head)) 305 goto out; 306 307 skb_out = batadv_frag_merge_packets(&head, *skb); 308 if (!skb_out) 309 goto out_err; 310 311 out: 312 *skb = skb_out; 313 ret = true; 314 out_err: 315 return ret; 316 } 317 318 /** 319 * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged 320 * @skb: skb to forward 321 * @recv_if: interface that the skb is received on 322 * @orig_node_src: originator that the skb is received from 323 * 324 * Look up the next-hop of the fragments payload and check if the merged packet 325 * will exceed the MTU towards the next-hop. If so, the fragment is forwarded 326 * without merging it. 327 * 328 * Returns true if the fragment is consumed/forwarded, false otherwise. 329 */ 330 bool batadv_frag_skb_fwd(struct sk_buff *skb, 331 struct batadv_hard_iface *recv_if, 332 struct batadv_orig_node *orig_node_src) 333 { 334 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); 335 struct batadv_orig_node *orig_node_dst = NULL; 336 struct batadv_neigh_node *neigh_node = NULL; 337 struct batadv_frag_packet *packet; 338 uint16_t total_size; 339 bool ret = false; 340 341 packet = (struct batadv_frag_packet *)skb->data; 342 orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest); 343 if (!orig_node_dst) 344 goto out; 345 346 neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if); 347 if (!neigh_node) 348 goto out; 349 350 /* Forward the fragment, if the merged packet would be too big to 351 * be assembled. 352 */ 353 total_size = ntohs(packet->total_size); 354 if (total_size > neigh_node->if_incoming->net_dev->mtu) { 355 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD); 356 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, 357 skb->len + ETH_HLEN); 358 359 packet->ttl--; 360 batadv_send_skb_packet(skb, neigh_node->if_incoming, 361 neigh_node->addr); 362 ret = true; 363 } 364 365 out: 366 if (orig_node_dst) 367 batadv_orig_node_free_ref(orig_node_dst); 368 if (neigh_node) 369 batadv_neigh_node_free_ref(neigh_node); 370 return ret; 371 } 372 373 /** 374 * batadv_frag_create - create a fragment from skb 375 * @skb: skb to create fragment from 376 * @frag_head: header to use in new fragment 377 * @mtu: size of new fragment 378 * 379 * Split the passed skb into two fragments: A new one with size matching the 380 * passed mtu and the old one with the rest. The new skb contains data from the 381 * tail of the old skb. 382 * 383 * Returns the new fragment, NULL on error. 384 */ 385 static struct sk_buff *batadv_frag_create(struct sk_buff *skb, 386 struct batadv_frag_packet *frag_head, 387 unsigned int mtu) 388 { 389 struct sk_buff *skb_fragment; 390 unsigned header_size = sizeof(*frag_head); 391 unsigned fragment_size = mtu - header_size; 392 393 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); 394 if (!skb_fragment) 395 goto err; 396 397 skb->priority = TC_PRIO_CONTROL; 398 399 /* Eat the last mtu-bytes of the skb */ 400 skb_reserve(skb_fragment, header_size + ETH_HLEN); 401 skb_split(skb, skb_fragment, skb->len - fragment_size); 402 403 /* Add the header */ 404 skb_push(skb_fragment, header_size); 405 memcpy(skb_fragment->data, frag_head, header_size); 406 407 err: 408 return skb_fragment; 409 } 410 411 /** 412 * batadv_frag_send_packet - create up to 16 fragments from the passed skb 413 * @skb: skb to create fragments from 414 * @orig_node: final destination of the created fragments 415 * @neigh_node: next-hop of the created fragments 416 * 417 * Returns true on success, false otherwise. 418 */ 419 bool batadv_frag_send_packet(struct sk_buff *skb, 420 struct batadv_orig_node *orig_node, 421 struct batadv_neigh_node *neigh_node) 422 { 423 struct batadv_priv *bat_priv; 424 struct batadv_hard_iface *primary_if = NULL; 425 struct batadv_frag_packet frag_header; 426 struct sk_buff *skb_fragment; 427 unsigned mtu = neigh_node->if_incoming->net_dev->mtu; 428 unsigned header_size = sizeof(frag_header); 429 unsigned max_fragment_size, max_packet_size; 430 bool ret = false; 431 432 /* To avoid merge and refragmentation at next-hops we never send 433 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 434 */ 435 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); 436 max_fragment_size = mtu - header_size; 437 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; 438 439 /* Don't even try to fragment, if we need more than 16 fragments */ 440 if (skb->len > max_packet_size) 441 goto out_err; 442 443 bat_priv = orig_node->bat_priv; 444 primary_if = batadv_primary_if_get_selected(bat_priv); 445 if (!primary_if) 446 goto out_err; 447 448 /* Create one header to be copied to all fragments */ 449 frag_header.packet_type = BATADV_UNICAST_FRAG; 450 frag_header.version = BATADV_COMPAT_VERSION; 451 frag_header.ttl = BATADV_TTL; 452 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); 453 frag_header.reserved = 0; 454 frag_header.no = 0; 455 frag_header.total_size = htons(skb->len); 456 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr); 457 ether_addr_copy(frag_header.dest, orig_node->orig); 458 459 /* Eat and send fragments from the tail of skb */ 460 while (skb->len > max_fragment_size) { 461 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 462 if (!skb_fragment) 463 goto out_err; 464 465 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 466 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, 467 skb_fragment->len + ETH_HLEN); 468 batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming, 469 neigh_node->addr); 470 frag_header.no++; 471 472 /* The initial check in this function should cover this case */ 473 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) 474 goto out_err; 475 } 476 477 /* Make room for the fragment header. */ 478 if (batadv_skb_head_push(skb, header_size) < 0 || 479 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) 480 goto out_err; 481 482 memcpy(skb->data, &frag_header, header_size); 483 484 /* Send the last fragment */ 485 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 486 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, 487 skb->len + ETH_HLEN); 488 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 489 490 ret = true; 491 492 out_err: 493 if (primary_if) 494 batadv_hardif_free_ref(primary_if); 495 496 return ret; 497 } 498