1 /* Copyright (C) 2013 B.A.T.M.A.N. contributors: 2 * 3 * Martin Hundebøll <martin@hundeboll.net> 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 17 * 02110-1301, USA 18 */ 19 20 #include "main.h" 21 #include "fragmentation.h" 22 #include "send.h" 23 #include "originator.h" 24 #include "routing.h" 25 #include "hard-interface.h" 26 #include "soft-interface.h" 27 28 29 /** 30 * batadv_frag_clear_chain - delete entries in the fragment buffer chain 31 * @head: head of chain with entries. 32 * 33 * Free fragments in the passed hlist. Should be called with appropriate lock. 34 */ 35 static void batadv_frag_clear_chain(struct hlist_head *head) 36 { 37 struct batadv_frag_list_entry *entry; 38 struct hlist_node *node; 39 40 hlist_for_each_entry_safe(entry, node, head, list) { 41 hlist_del(&entry->list); 42 kfree_skb(entry->skb); 43 kfree(entry); 44 } 45 } 46 47 /** 48 * batadv_frag_purge_orig - free fragments associated to an orig 49 * @orig_node: originator to free fragments from 50 * @check_cb: optional function to tell if an entry should be purged 51 */ 52 void batadv_frag_purge_orig(struct batadv_orig_node *orig_node, 53 bool (*check_cb)(struct batadv_frag_table_entry *)) 54 { 55 struct batadv_frag_table_entry *chain; 56 uint8_t i; 57 58 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) { 59 chain = &orig_node->fragments[i]; 60 spin_lock_bh(&orig_node->fragments[i].lock); 61 62 if (!check_cb || check_cb(chain)) { 63 batadv_frag_clear_chain(&orig_node->fragments[i].head); 64 orig_node->fragments[i].size = 0; 65 } 66 67 spin_unlock_bh(&orig_node->fragments[i].lock); 68 } 69 } 70 71 /** 72 * batadv_frag_size_limit - maximum possible size of packet to be fragmented 73 * 74 * Returns the maximum size of payload that can be fragmented. 75 */ 76 static int batadv_frag_size_limit(void) 77 { 78 int limit = BATADV_FRAG_MAX_FRAG_SIZE; 79 80 limit -= sizeof(struct batadv_frag_packet); 81 limit *= BATADV_FRAG_MAX_FRAGMENTS; 82 83 return limit; 84 } 85 86 /** 87 * batadv_frag_init_chain - check and prepare fragment chain for new fragment 88 * @chain: chain in fragments table to init 89 * @seqno: sequence number of the received fragment 90 * 91 * Make chain ready for a fragment with sequence number "seqno". Delete existing 92 * entries if they have an "old" sequence number. 93 * 94 * Caller must hold chain->lock. 95 * 96 * Returns true if chain is empty and caller can just insert the new fragment 97 * without searching for the right position. 98 */ 99 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain, 100 uint16_t seqno) 101 { 102 if (chain->seqno == seqno) 103 return false; 104 105 if (!hlist_empty(&chain->head)) 106 batadv_frag_clear_chain(&chain->head); 107 108 chain->size = 0; 109 chain->seqno = seqno; 110 111 return true; 112 } 113 114 /** 115 * batadv_frag_insert_packet - insert a fragment into a fragment chain 116 * @orig_node: originator that the fragment was received from 117 * @skb: skb to insert 118 * @chain_out: list head to attach complete chains of fragments to 119 * 120 * Insert a new fragment into the reverse ordered chain in the right table 121 * entry. The hash table entry is cleared if "old" fragments exist in it. 122 * 123 * Returns true if skb is buffered, false on error. If the chain has all the 124 * fragments needed to merge the packet, the chain is moved to the passed head 125 * to avoid locking the chain in the table. 126 */ 127 static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, 128 struct sk_buff *skb, 129 struct hlist_head *chain_out) 130 { 131 struct batadv_frag_table_entry *chain; 132 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr; 133 struct batadv_frag_packet *frag_packet; 134 uint8_t bucket; 135 uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet); 136 bool ret = false; 137 138 /* Linearize packet to avoid linearizing 16 packets in a row when doing 139 * the later merge. Non-linear merge should be added to remove this 140 * linearization. 141 */ 142 if (skb_linearize(skb) < 0) 143 goto err; 144 145 frag_packet = (struct batadv_frag_packet *)skb->data; 146 seqno = ntohs(frag_packet->seqno); 147 bucket = seqno % BATADV_FRAG_BUFFER_COUNT; 148 149 frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC); 150 if (!frag_entry_new) 151 goto err; 152 153 frag_entry_new->skb = skb; 154 frag_entry_new->no = frag_packet->no; 155 156 /* Select entry in the "chain table" and delete any prior fragments 157 * with another sequence number. batadv_frag_init_chain() returns true, 158 * if the list is empty at return. 159 */ 160 chain = &orig_node->fragments[bucket]; 161 spin_lock_bh(&chain->lock); 162 if (batadv_frag_init_chain(chain, seqno)) { 163 hlist_add_head(&frag_entry_new->list, &chain->head); 164 chain->size = skb->len - hdr_size; 165 chain->timestamp = jiffies; 166 ret = true; 167 goto out; 168 } 169 170 /* Find the position for the new fragment. */ 171 hlist_for_each_entry(frag_entry_curr, &chain->head, list) { 172 /* Drop packet if fragment already exists. */ 173 if (frag_entry_curr->no == frag_entry_new->no) 174 goto err_unlock; 175 176 /* Order fragments from highest to lowest. */ 177 if (frag_entry_curr->no < frag_entry_new->no) { 178 hlist_add_before(&frag_entry_new->list, 179 &frag_entry_curr->list); 180 chain->size += skb->len - hdr_size; 181 chain->timestamp = jiffies; 182 ret = true; 183 goto out; 184 } 185 } 186 187 /* Reached the end of the list, so insert after 'frag_entry_curr'. */ 188 if (likely(frag_entry_curr)) { 189 hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list); 190 chain->size += skb->len - hdr_size; 191 chain->timestamp = jiffies; 192 ret = true; 193 } 194 195 out: 196 if (chain->size > batadv_frag_size_limit() || 197 ntohs(frag_packet->total_size) > batadv_frag_size_limit()) { 198 /* Clear chain if total size of either the list or the packet 199 * exceeds the maximum size of one merged packet. 200 */ 201 batadv_frag_clear_chain(&chain->head); 202 chain->size = 0; 203 } else if (ntohs(frag_packet->total_size) == chain->size) { 204 /* All fragments received. Hand over chain to caller. */ 205 hlist_move_list(&chain->head, chain_out); 206 chain->size = 0; 207 } 208 209 err_unlock: 210 spin_unlock_bh(&chain->lock); 211 212 err: 213 if (!ret) 214 kfree(frag_entry_new); 215 216 return ret; 217 } 218 219 /** 220 * batadv_frag_merge_packets - merge a chain of fragments 221 * @chain: head of chain with fragments 222 * @skb: packet with total size of skb after merging 223 * 224 * Expand the first skb in the chain and copy the content of the remaining 225 * skb's into the expanded one. After doing so, clear the chain. 226 * 227 * Returns the merged skb or NULL on error. 228 */ 229 static struct sk_buff * 230 batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) 231 { 232 struct batadv_frag_packet *packet; 233 struct batadv_frag_list_entry *entry; 234 struct sk_buff *skb_out = NULL; 235 int size, hdr_size = sizeof(struct batadv_frag_packet); 236 237 /* Make sure incoming skb has non-bogus data. */ 238 packet = (struct batadv_frag_packet *)skb->data; 239 size = ntohs(packet->total_size); 240 if (size > batadv_frag_size_limit()) 241 goto free; 242 243 /* Remove first entry, as this is the destination for the rest of the 244 * fragments. 245 */ 246 entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list); 247 hlist_del(&entry->list); 248 skb_out = entry->skb; 249 kfree(entry); 250 251 /* Make room for the rest of the fragments. */ 252 if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) { 253 kfree_skb(skb_out); 254 skb_out = NULL; 255 goto free; 256 } 257 258 /* Move the existing MAC header to just before the payload. (Override 259 * the fragment header.) 260 */ 261 skb_pull_rcsum(skb_out, hdr_size); 262 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); 263 skb_set_mac_header(skb_out, -ETH_HLEN); 264 skb_reset_network_header(skb_out); 265 skb_reset_transport_header(skb_out); 266 267 /* Copy the payload of the each fragment into the last skb */ 268 hlist_for_each_entry(entry, chain, list) { 269 size = entry->skb->len - hdr_size; 270 memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size, 271 size); 272 } 273 274 free: 275 /* Locking is not needed, because 'chain' is not part of any orig. */ 276 batadv_frag_clear_chain(chain); 277 return skb_out; 278 } 279 280 /** 281 * batadv_frag_skb_buffer - buffer fragment for later merge 282 * @skb: skb to buffer 283 * @orig_node_src: originator that the skb is received from 284 * 285 * Add fragment to buffer and merge fragments if possible. 286 * 287 * There are three possible outcomes: 1) Packet is merged: Return true and 288 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb 289 * to NULL; 3) Error: Return false and leave skb as is. 290 */ 291 bool batadv_frag_skb_buffer(struct sk_buff **skb, 292 struct batadv_orig_node *orig_node_src) 293 { 294 struct sk_buff *skb_out = NULL; 295 struct hlist_head head = HLIST_HEAD_INIT; 296 bool ret = false; 297 298 /* Add packet to buffer and table entry if merge is possible. */ 299 if (!batadv_frag_insert_packet(orig_node_src, *skb, &head)) 300 goto out_err; 301 302 /* Leave if more fragments are needed to merge. */ 303 if (hlist_empty(&head)) 304 goto out; 305 306 skb_out = batadv_frag_merge_packets(&head, *skb); 307 if (!skb_out) 308 goto out_err; 309 310 out: 311 *skb = skb_out; 312 ret = true; 313 out_err: 314 return ret; 315 } 316 317 /** 318 * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged 319 * @skb: skb to forward 320 * @recv_if: interface that the skb is received on 321 * @orig_node_src: originator that the skb is received from 322 * 323 * Look up the next-hop of the fragments payload and check if the merged packet 324 * will exceed the MTU towards the next-hop. If so, the fragment is forwarded 325 * without merging it. 326 * 327 * Returns true if the fragment is consumed/forwarded, false otherwise. 328 */ 329 bool batadv_frag_skb_fwd(struct sk_buff *skb, 330 struct batadv_hard_iface *recv_if, 331 struct batadv_orig_node *orig_node_src) 332 { 333 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); 334 struct batadv_orig_node *orig_node_dst = NULL; 335 struct batadv_neigh_node *neigh_node = NULL; 336 struct batadv_frag_packet *packet; 337 uint16_t total_size; 338 bool ret = false; 339 340 packet = (struct batadv_frag_packet *)skb->data; 341 orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest); 342 if (!orig_node_dst) 343 goto out; 344 345 neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if); 346 if (!neigh_node) 347 goto out; 348 349 /* Forward the fragment, if the merged packet would be too big to 350 * be assembled. 351 */ 352 total_size = ntohs(packet->total_size); 353 if (total_size > neigh_node->if_incoming->net_dev->mtu) { 354 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD); 355 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, 356 skb->len + ETH_HLEN); 357 358 packet->header.ttl--; 359 batadv_send_skb_packet(skb, neigh_node->if_incoming, 360 neigh_node->addr); 361 ret = true; 362 } 363 364 out: 365 if (orig_node_dst) 366 batadv_orig_node_free_ref(orig_node_dst); 367 if (neigh_node) 368 batadv_neigh_node_free_ref(neigh_node); 369 return ret; 370 } 371 372 /** 373 * batadv_frag_create - create a fragment from skb 374 * @skb: skb to create fragment from 375 * @frag_head: header to use in new fragment 376 * @mtu: size of new fragment 377 * 378 * Split the passed skb into two fragments: A new one with size matching the 379 * passed mtu and the old one with the rest. The new skb contains data from the 380 * tail of the old skb. 381 * 382 * Returns the new fragment, NULL on error. 383 */ 384 static struct sk_buff *batadv_frag_create(struct sk_buff *skb, 385 struct batadv_frag_packet *frag_head, 386 unsigned int mtu) 387 { 388 struct sk_buff *skb_fragment; 389 unsigned header_size = sizeof(*frag_head); 390 unsigned fragment_size = mtu - header_size; 391 392 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); 393 if (!skb_fragment) 394 goto err; 395 396 skb->priority = TC_PRIO_CONTROL; 397 398 /* Eat the last mtu-bytes of the skb */ 399 skb_reserve(skb_fragment, header_size + ETH_HLEN); 400 skb_split(skb, skb_fragment, skb->len - fragment_size); 401 402 /* Add the header */ 403 skb_push(skb_fragment, header_size); 404 memcpy(skb_fragment->data, frag_head, header_size); 405 406 err: 407 return skb_fragment; 408 } 409 410 /** 411 * batadv_frag_send_packet - create up to 16 fragments from the passed skb 412 * @skb: skb to create fragments from 413 * @orig_node: final destination of the created fragments 414 * @neigh_node: next-hop of the created fragments 415 * 416 * Returns true on success, false otherwise. 417 */ 418 bool batadv_frag_send_packet(struct sk_buff *skb, 419 struct batadv_orig_node *orig_node, 420 struct batadv_neigh_node *neigh_node) 421 { 422 struct batadv_priv *bat_priv; 423 struct batadv_hard_iface *primary_if; 424 struct batadv_frag_packet frag_header; 425 struct sk_buff *skb_fragment; 426 unsigned mtu = neigh_node->if_incoming->net_dev->mtu; 427 unsigned header_size = sizeof(frag_header); 428 unsigned max_fragment_size, max_packet_size; 429 430 /* To avoid merge and refragmentation at next-hops we never send 431 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 432 */ 433 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); 434 max_fragment_size = (mtu - header_size - ETH_HLEN); 435 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; 436 437 /* Don't even try to fragment, if we need more than 16 fragments */ 438 if (skb->len > max_packet_size) 439 goto out_err; 440 441 bat_priv = orig_node->bat_priv; 442 primary_if = batadv_primary_if_get_selected(bat_priv); 443 if (!primary_if) 444 goto out_err; 445 446 /* Create one header to be copied to all fragments */ 447 frag_header.header.packet_type = BATADV_UNICAST_FRAG; 448 frag_header.header.version = BATADV_COMPAT_VERSION; 449 frag_header.header.ttl = BATADV_TTL; 450 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); 451 frag_header.reserved = 0; 452 frag_header.no = 0; 453 frag_header.total_size = htons(skb->len); 454 memcpy(frag_header.orig, primary_if->net_dev->dev_addr, ETH_ALEN); 455 memcpy(frag_header.dest, orig_node->orig, ETH_ALEN); 456 457 /* Eat and send fragments from the tail of skb */ 458 while (skb->len > max_fragment_size) { 459 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 460 if (!skb_fragment) 461 goto out_err; 462 463 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 464 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, 465 skb_fragment->len + ETH_HLEN); 466 batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming, 467 neigh_node->addr); 468 frag_header.no++; 469 470 /* The initial check in this function should cover this case */ 471 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) 472 goto out_err; 473 } 474 475 /* Make room for the fragment header. */ 476 if (batadv_skb_head_push(skb, header_size) < 0 || 477 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) 478 goto out_err; 479 480 memcpy(skb->data, &frag_header, header_size); 481 482 /* Send the last fragment */ 483 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 484 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, 485 skb->len + ETH_HLEN); 486 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 487 488 return true; 489 out_err: 490 return false; 491 } 492