1 /* Copyright (C) 2013-2016  B.A.T.M.A.N. contributors:
2  *
3  * Martin Hundebøll <martin@hundeboll.net>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "fragmentation.h"
19 #include "main.h"
20 
21 #include <linux/atomic.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/etherdevice.h>
24 #include <linux/fs.h>
25 #include <linux/if_ether.h>
26 #include <linux/jiffies.h>
27 #include <linux/kernel.h>
28 #include <linux/lockdep.h>
29 #include <linux/netdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34 
35 #include "hard-interface.h"
36 #include "originator.h"
37 #include "packet.h"
38 #include "routing.h"
39 #include "send.h"
40 #include "soft-interface.h"
41 
42 /**
43  * batadv_frag_clear_chain - delete entries in the fragment buffer chain
44  * @head: head of chain with entries.
45  *
46  * Free fragments in the passed hlist. Should be called with appropriate lock.
47  */
48 static void batadv_frag_clear_chain(struct hlist_head *head)
49 {
50 	struct batadv_frag_list_entry *entry;
51 	struct hlist_node *node;
52 
53 	hlist_for_each_entry_safe(entry, node, head, list) {
54 		hlist_del(&entry->list);
55 		kfree_skb(entry->skb);
56 		kfree(entry);
57 	}
58 }
59 
60 /**
61  * batadv_frag_purge_orig - free fragments associated to an orig
62  * @orig_node: originator to free fragments from
63  * @check_cb: optional function to tell if an entry should be purged
64  */
65 void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
66 			    bool (*check_cb)(struct batadv_frag_table_entry *))
67 {
68 	struct batadv_frag_table_entry *chain;
69 	u8 i;
70 
71 	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
72 		chain = &orig_node->fragments[i];
73 		spin_lock_bh(&chain->lock);
74 
75 		if (!check_cb || check_cb(chain)) {
76 			batadv_frag_clear_chain(&chain->head);
77 			chain->size = 0;
78 		}
79 
80 		spin_unlock_bh(&chain->lock);
81 	}
82 }
83 
84 /**
85  * batadv_frag_size_limit - maximum possible size of packet to be fragmented
86  *
87  * Return: the maximum size of payload that can be fragmented.
88  */
89 static int batadv_frag_size_limit(void)
90 {
91 	int limit = BATADV_FRAG_MAX_FRAG_SIZE;
92 
93 	limit -= sizeof(struct batadv_frag_packet);
94 	limit *= BATADV_FRAG_MAX_FRAGMENTS;
95 
96 	return limit;
97 }
98 
99 /**
100  * batadv_frag_init_chain - check and prepare fragment chain for new fragment
101  * @chain: chain in fragments table to init
102  * @seqno: sequence number of the received fragment
103  *
104  * Make chain ready for a fragment with sequence number "seqno". Delete existing
105  * entries if they have an "old" sequence number.
106  *
107  * Caller must hold chain->lock.
108  *
109  * Return: true if chain is empty and caller can just insert the new fragment
110  * without searching for the right position.
111  */
112 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
113 				   u16 seqno)
114 {
115 	lockdep_assert_held(&chain->lock);
116 
117 	if (chain->seqno == seqno)
118 		return false;
119 
120 	if (!hlist_empty(&chain->head))
121 		batadv_frag_clear_chain(&chain->head);
122 
123 	chain->size = 0;
124 	chain->seqno = seqno;
125 
126 	return true;
127 }
128 
129 /**
130  * batadv_frag_insert_packet - insert a fragment into a fragment chain
131  * @orig_node: originator that the fragment was received from
132  * @skb: skb to insert
133  * @chain_out: list head to attach complete chains of fragments to
134  *
135  * Insert a new fragment into the reverse ordered chain in the right table
136  * entry. The hash table entry is cleared if "old" fragments exist in it.
137  *
138  * Return: true if skb is buffered, false on error. If the chain has all the
139  * fragments needed to merge the packet, the chain is moved to the passed head
140  * to avoid locking the chain in the table.
141  */
142 static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
143 				      struct sk_buff *skb,
144 				      struct hlist_head *chain_out)
145 {
146 	struct batadv_frag_table_entry *chain;
147 	struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
148 	struct batadv_frag_list_entry *frag_entry_last = NULL;
149 	struct batadv_frag_packet *frag_packet;
150 	u8 bucket;
151 	u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
152 	bool ret = false;
153 
154 	/* Linearize packet to avoid linearizing 16 packets in a row when doing
155 	 * the later merge. Non-linear merge should be added to remove this
156 	 * linearization.
157 	 */
158 	if (skb_linearize(skb) < 0)
159 		goto err;
160 
161 	frag_packet = (struct batadv_frag_packet *)skb->data;
162 	seqno = ntohs(frag_packet->seqno);
163 	bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
164 
165 	frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
166 	if (!frag_entry_new)
167 		goto err;
168 
169 	frag_entry_new->skb = skb;
170 	frag_entry_new->no = frag_packet->no;
171 
172 	/* Select entry in the "chain table" and delete any prior fragments
173 	 * with another sequence number. batadv_frag_init_chain() returns true,
174 	 * if the list is empty at return.
175 	 */
176 	chain = &orig_node->fragments[bucket];
177 	spin_lock_bh(&chain->lock);
178 	if (batadv_frag_init_chain(chain, seqno)) {
179 		hlist_add_head(&frag_entry_new->list, &chain->head);
180 		chain->size = skb->len - hdr_size;
181 		chain->timestamp = jiffies;
182 		chain->total_size = ntohs(frag_packet->total_size);
183 		ret = true;
184 		goto out;
185 	}
186 
187 	/* Find the position for the new fragment. */
188 	hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
189 		/* Drop packet if fragment already exists. */
190 		if (frag_entry_curr->no == frag_entry_new->no)
191 			goto err_unlock;
192 
193 		/* Order fragments from highest to lowest. */
194 		if (frag_entry_curr->no < frag_entry_new->no) {
195 			hlist_add_before(&frag_entry_new->list,
196 					 &frag_entry_curr->list);
197 			chain->size += skb->len - hdr_size;
198 			chain->timestamp = jiffies;
199 			ret = true;
200 			goto out;
201 		}
202 
203 		/* store current entry because it could be the last in list */
204 		frag_entry_last = frag_entry_curr;
205 	}
206 
207 	/* Reached the end of the list, so insert after 'frag_entry_last'. */
208 	if (likely(frag_entry_last)) {
209 		hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
210 		chain->size += skb->len - hdr_size;
211 		chain->timestamp = jiffies;
212 		ret = true;
213 	}
214 
215 out:
216 	if (chain->size > batadv_frag_size_limit() ||
217 	    chain->total_size != ntohs(frag_packet->total_size) ||
218 	    chain->total_size > batadv_frag_size_limit()) {
219 		/* Clear chain if total size of either the list or the packet
220 		 * exceeds the maximum size of one merged packet. Don't allow
221 		 * packets to have different total_size.
222 		 */
223 		batadv_frag_clear_chain(&chain->head);
224 		chain->size = 0;
225 	} else if (ntohs(frag_packet->total_size) == chain->size) {
226 		/* All fragments received. Hand over chain to caller. */
227 		hlist_move_list(&chain->head, chain_out);
228 		chain->size = 0;
229 	}
230 
231 err_unlock:
232 	spin_unlock_bh(&chain->lock);
233 
234 err:
235 	if (!ret)
236 		kfree(frag_entry_new);
237 
238 	return ret;
239 }
240 
241 /**
242  * batadv_frag_merge_packets - merge a chain of fragments
243  * @chain: head of chain with fragments
244  *
245  * Expand the first skb in the chain and copy the content of the remaining
246  * skb's into the expanded one. After doing so, clear the chain.
247  *
248  * Return: the merged skb or NULL on error.
249  */
250 static struct sk_buff *
251 batadv_frag_merge_packets(struct hlist_head *chain)
252 {
253 	struct batadv_frag_packet *packet;
254 	struct batadv_frag_list_entry *entry;
255 	struct sk_buff *skb_out = NULL;
256 	int size, hdr_size = sizeof(struct batadv_frag_packet);
257 
258 	/* Remove first entry, as this is the destination for the rest of the
259 	 * fragments.
260 	 */
261 	entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
262 	hlist_del(&entry->list);
263 	skb_out = entry->skb;
264 	kfree(entry);
265 
266 	packet = (struct batadv_frag_packet *)skb_out->data;
267 	size = ntohs(packet->total_size);
268 
269 	/* Make room for the rest of the fragments. */
270 	if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
271 		kfree_skb(skb_out);
272 		skb_out = NULL;
273 		goto free;
274 	}
275 
276 	/* Move the existing MAC header to just before the payload. (Override
277 	 * the fragment header.)
278 	 */
279 	skb_pull_rcsum(skb_out, hdr_size);
280 	memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
281 	skb_set_mac_header(skb_out, -ETH_HLEN);
282 	skb_reset_network_header(skb_out);
283 	skb_reset_transport_header(skb_out);
284 
285 	/* Copy the payload of the each fragment into the last skb */
286 	hlist_for_each_entry(entry, chain, list) {
287 		size = entry->skb->len - hdr_size;
288 		memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
289 		       size);
290 	}
291 
292 free:
293 	/* Locking is not needed, because 'chain' is not part of any orig. */
294 	batadv_frag_clear_chain(chain);
295 	return skb_out;
296 }
297 
298 /**
299  * batadv_frag_skb_buffer - buffer fragment for later merge
300  * @skb: skb to buffer
301  * @orig_node_src: originator that the skb is received from
302  *
303  * Add fragment to buffer and merge fragments if possible.
304  *
305  * There are three possible outcomes: 1) Packet is merged: Return true and
306  * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
307  * to NULL; 3) Error: Return false and leave skb as is.
308  *
309  * Return: true when packet is merged or buffered, false when skb is not not
310  * used.
311  */
312 bool batadv_frag_skb_buffer(struct sk_buff **skb,
313 			    struct batadv_orig_node *orig_node_src)
314 {
315 	struct sk_buff *skb_out = NULL;
316 	struct hlist_head head = HLIST_HEAD_INIT;
317 	bool ret = false;
318 
319 	/* Add packet to buffer and table entry if merge is possible. */
320 	if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
321 		goto out_err;
322 
323 	/* Leave if more fragments are needed to merge. */
324 	if (hlist_empty(&head))
325 		goto out;
326 
327 	skb_out = batadv_frag_merge_packets(&head);
328 	if (!skb_out)
329 		goto out_err;
330 
331 out:
332 	*skb = skb_out;
333 	ret = true;
334 out_err:
335 	return ret;
336 }
337 
338 /**
339  * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
340  * @skb: skb to forward
341  * @recv_if: interface that the skb is received on
342  * @orig_node_src: originator that the skb is received from
343  *
344  * Look up the next-hop of the fragments payload and check if the merged packet
345  * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
346  * without merging it.
347  *
348  * Return: true if the fragment is consumed/forwarded, false otherwise.
349  */
350 bool batadv_frag_skb_fwd(struct sk_buff *skb,
351 			 struct batadv_hard_iface *recv_if,
352 			 struct batadv_orig_node *orig_node_src)
353 {
354 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
355 	struct batadv_orig_node *orig_node_dst = NULL;
356 	struct batadv_neigh_node *neigh_node = NULL;
357 	struct batadv_frag_packet *packet;
358 	u16 total_size;
359 	bool ret = false;
360 
361 	packet = (struct batadv_frag_packet *)skb->data;
362 	orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
363 	if (!orig_node_dst)
364 		goto out;
365 
366 	neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
367 	if (!neigh_node)
368 		goto out;
369 
370 	/* Forward the fragment, if the merged packet would be too big to
371 	 * be assembled.
372 	 */
373 	total_size = ntohs(packet->total_size);
374 	if (total_size > neigh_node->if_incoming->net_dev->mtu) {
375 		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
376 		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
377 				   skb->len + ETH_HLEN);
378 
379 		packet->ttl--;
380 		batadv_send_unicast_skb(skb, neigh_node);
381 		ret = true;
382 	}
383 
384 out:
385 	if (orig_node_dst)
386 		batadv_orig_node_put(orig_node_dst);
387 	if (neigh_node)
388 		batadv_neigh_node_put(neigh_node);
389 	return ret;
390 }
391 
392 /**
393  * batadv_frag_create - create a fragment from skb
394  * @skb: skb to create fragment from
395  * @frag_head: header to use in new fragment
396  * @mtu: size of new fragment
397  *
398  * Split the passed skb into two fragments: A new one with size matching the
399  * passed mtu and the old one with the rest. The new skb contains data from the
400  * tail of the old skb.
401  *
402  * Return: the new fragment, NULL on error.
403  */
404 static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
405 					  struct batadv_frag_packet *frag_head,
406 					  unsigned int mtu)
407 {
408 	struct sk_buff *skb_fragment;
409 	unsigned int header_size = sizeof(*frag_head);
410 	unsigned int fragment_size = mtu - header_size;
411 
412 	skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
413 	if (!skb_fragment)
414 		goto err;
415 
416 	skb_fragment->priority = skb->priority;
417 
418 	/* Eat the last mtu-bytes of the skb */
419 	skb_reserve(skb_fragment, header_size + ETH_HLEN);
420 	skb_split(skb, skb_fragment, skb->len - fragment_size);
421 
422 	/* Add the header */
423 	skb_push(skb_fragment, header_size);
424 	memcpy(skb_fragment->data, frag_head, header_size);
425 
426 err:
427 	return skb_fragment;
428 }
429 
430 /**
431  * batadv_frag_send_packet - create up to 16 fragments from the passed skb
432  * @skb: skb to create fragments from
433  * @orig_node: final destination of the created fragments
434  * @neigh_node: next-hop of the created fragments
435  *
436  * Return: the netdev tx status or -1 in case of error.
437  * When -1 is returned the skb is not consumed.
438  */
439 int batadv_frag_send_packet(struct sk_buff *skb,
440 			    struct batadv_orig_node *orig_node,
441 			    struct batadv_neigh_node *neigh_node)
442 {
443 	struct batadv_priv *bat_priv;
444 	struct batadv_hard_iface *primary_if = NULL;
445 	struct batadv_frag_packet frag_header;
446 	struct sk_buff *skb_fragment;
447 	unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
448 	unsigned int header_size = sizeof(frag_header);
449 	unsigned int max_fragment_size, max_packet_size;
450 	int ret = -1;
451 
452 	/* To avoid merge and refragmentation at next-hops we never send
453 	 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
454 	 */
455 	mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
456 	max_fragment_size = mtu - header_size;
457 	max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
458 
459 	/* Don't even try to fragment, if we need more than 16 fragments */
460 	if (skb->len > max_packet_size)
461 		goto out;
462 
463 	bat_priv = orig_node->bat_priv;
464 	primary_if = batadv_primary_if_get_selected(bat_priv);
465 	if (!primary_if)
466 		goto out;
467 
468 	/* Create one header to be copied to all fragments */
469 	frag_header.packet_type = BATADV_UNICAST_FRAG;
470 	frag_header.version = BATADV_COMPAT_VERSION;
471 	frag_header.ttl = BATADV_TTL;
472 	frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
473 	frag_header.reserved = 0;
474 	frag_header.no = 0;
475 	frag_header.total_size = htons(skb->len);
476 
477 	/* skb->priority values from 256->263 are magic values to
478 	 * directly indicate a specific 802.1d priority.  This is used
479 	 * to allow 802.1d priority to be passed directly in from VLAN
480 	 * tags, etc.
481 	 */
482 	if (skb->priority >= 256 && skb->priority <= 263)
483 		frag_header.priority = skb->priority - 256;
484 
485 	ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
486 	ether_addr_copy(frag_header.dest, orig_node->orig);
487 
488 	/* Eat and send fragments from the tail of skb */
489 	while (skb->len > max_fragment_size) {
490 		skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
491 		if (!skb_fragment)
492 			goto out;
493 
494 		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
495 		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
496 				   skb_fragment->len + ETH_HLEN);
497 		ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
498 		if (ret != NET_XMIT_SUCCESS) {
499 			/* return -1 so that the caller can free the original
500 			 * skb
501 			 */
502 			ret = -1;
503 			goto out;
504 		}
505 
506 		frag_header.no++;
507 
508 		/* The initial check in this function should cover this case */
509 		if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
510 			ret = -1;
511 			goto out;
512 		}
513 	}
514 
515 	/* Make room for the fragment header. */
516 	if (batadv_skb_head_push(skb, header_size) < 0 ||
517 	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
518 		goto out;
519 
520 	memcpy(skb->data, &frag_header, header_size);
521 
522 	/* Send the last fragment */
523 	batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
524 	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
525 			   skb->len + ETH_HLEN);
526 	ret = batadv_send_unicast_skb(skb, neigh_node);
527 
528 out:
529 	if (primary_if)
530 		batadv_hardif_put(primary_if);
531 
532 	return ret;
533 }
534