1 /* Copyright (C) 2013-2016  B.A.T.M.A.N. contributors:
2  *
3  * Martin Hundebøll <martin@hundeboll.net>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "fragmentation.h"
19 #include "main.h"
20 
21 #include <linux/atomic.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/etherdevice.h>
24 #include <linux/fs.h>
25 #include <linux/if_ether.h>
26 #include <linux/jiffies.h>
27 #include <linux/kernel.h>
28 #include <linux/lockdep.h>
29 #include <linux/netdevice.h>
30 #include <linux/pkt_sched.h>
31 #include <linux/skbuff.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/string.h>
35 
36 #include "hard-interface.h"
37 #include "originator.h"
38 #include "packet.h"
39 #include "routing.h"
40 #include "send.h"
41 #include "soft-interface.h"
42 
43 /**
44  * batadv_frag_clear_chain - delete entries in the fragment buffer chain
45  * @head: head of chain with entries.
46  *
47  * Free fragments in the passed hlist. Should be called with appropriate lock.
48  */
49 static void batadv_frag_clear_chain(struct hlist_head *head)
50 {
51 	struct batadv_frag_list_entry *entry;
52 	struct hlist_node *node;
53 
54 	hlist_for_each_entry_safe(entry, node, head, list) {
55 		hlist_del(&entry->list);
56 		kfree_skb(entry->skb);
57 		kfree(entry);
58 	}
59 }
60 
61 /**
62  * batadv_frag_purge_orig - free fragments associated to an orig
63  * @orig_node: originator to free fragments from
64  * @check_cb: optional function to tell if an entry should be purged
65  */
66 void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
67 			    bool (*check_cb)(struct batadv_frag_table_entry *))
68 {
69 	struct batadv_frag_table_entry *chain;
70 	u8 i;
71 
72 	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
73 		chain = &orig_node->fragments[i];
74 		spin_lock_bh(&chain->lock);
75 
76 		if (!check_cb || check_cb(chain)) {
77 			batadv_frag_clear_chain(&chain->head);
78 			chain->size = 0;
79 		}
80 
81 		spin_unlock_bh(&chain->lock);
82 	}
83 }
84 
85 /**
86  * batadv_frag_size_limit - maximum possible size of packet to be fragmented
87  *
88  * Return: the maximum size of payload that can be fragmented.
89  */
90 static int batadv_frag_size_limit(void)
91 {
92 	int limit = BATADV_FRAG_MAX_FRAG_SIZE;
93 
94 	limit -= sizeof(struct batadv_frag_packet);
95 	limit *= BATADV_FRAG_MAX_FRAGMENTS;
96 
97 	return limit;
98 }
99 
100 /**
101  * batadv_frag_init_chain - check and prepare fragment chain for new fragment
102  * @chain: chain in fragments table to init
103  * @seqno: sequence number of the received fragment
104  *
105  * Make chain ready for a fragment with sequence number "seqno". Delete existing
106  * entries if they have an "old" sequence number.
107  *
108  * Caller must hold chain->lock.
109  *
110  * Return: true if chain is empty and caller can just insert the new fragment
111  * without searching for the right position.
112  */
113 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
114 				   u16 seqno)
115 {
116 	lockdep_assert_held(&chain->lock);
117 
118 	if (chain->seqno == seqno)
119 		return false;
120 
121 	if (!hlist_empty(&chain->head))
122 		batadv_frag_clear_chain(&chain->head);
123 
124 	chain->size = 0;
125 	chain->seqno = seqno;
126 
127 	return true;
128 }
129 
130 /**
131  * batadv_frag_insert_packet - insert a fragment into a fragment chain
132  * @orig_node: originator that the fragment was received from
133  * @skb: skb to insert
134  * @chain_out: list head to attach complete chains of fragments to
135  *
136  * Insert a new fragment into the reverse ordered chain in the right table
137  * entry. The hash table entry is cleared if "old" fragments exist in it.
138  *
139  * Return: true if skb is buffered, false on error. If the chain has all the
140  * fragments needed to merge the packet, the chain is moved to the passed head
141  * to avoid locking the chain in the table.
142  */
143 static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
144 				      struct sk_buff *skb,
145 				      struct hlist_head *chain_out)
146 {
147 	struct batadv_frag_table_entry *chain;
148 	struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
149 	struct batadv_frag_list_entry *frag_entry_last = NULL;
150 	struct batadv_frag_packet *frag_packet;
151 	u8 bucket;
152 	u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
153 	bool ret = false;
154 
155 	/* Linearize packet to avoid linearizing 16 packets in a row when doing
156 	 * the later merge. Non-linear merge should be added to remove this
157 	 * linearization.
158 	 */
159 	if (skb_linearize(skb) < 0)
160 		goto err;
161 
162 	frag_packet = (struct batadv_frag_packet *)skb->data;
163 	seqno = ntohs(frag_packet->seqno);
164 	bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
165 
166 	frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
167 	if (!frag_entry_new)
168 		goto err;
169 
170 	frag_entry_new->skb = skb;
171 	frag_entry_new->no = frag_packet->no;
172 
173 	/* Select entry in the "chain table" and delete any prior fragments
174 	 * with another sequence number. batadv_frag_init_chain() returns true,
175 	 * if the list is empty at return.
176 	 */
177 	chain = &orig_node->fragments[bucket];
178 	spin_lock_bh(&chain->lock);
179 	if (batadv_frag_init_chain(chain, seqno)) {
180 		hlist_add_head(&frag_entry_new->list, &chain->head);
181 		chain->size = skb->len - hdr_size;
182 		chain->timestamp = jiffies;
183 		chain->total_size = ntohs(frag_packet->total_size);
184 		ret = true;
185 		goto out;
186 	}
187 
188 	/* Find the position for the new fragment. */
189 	hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
190 		/* Drop packet if fragment already exists. */
191 		if (frag_entry_curr->no == frag_entry_new->no)
192 			goto err_unlock;
193 
194 		/* Order fragments from highest to lowest. */
195 		if (frag_entry_curr->no < frag_entry_new->no) {
196 			hlist_add_before(&frag_entry_new->list,
197 					 &frag_entry_curr->list);
198 			chain->size += skb->len - hdr_size;
199 			chain->timestamp = jiffies;
200 			ret = true;
201 			goto out;
202 		}
203 
204 		/* store current entry because it could be the last in list */
205 		frag_entry_last = frag_entry_curr;
206 	}
207 
208 	/* Reached the end of the list, so insert after 'frag_entry_last'. */
209 	if (likely(frag_entry_last)) {
210 		hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
211 		chain->size += skb->len - hdr_size;
212 		chain->timestamp = jiffies;
213 		ret = true;
214 	}
215 
216 out:
217 	if (chain->size > batadv_frag_size_limit() ||
218 	    chain->total_size != ntohs(frag_packet->total_size) ||
219 	    chain->total_size > batadv_frag_size_limit()) {
220 		/* Clear chain if total size of either the list or the packet
221 		 * exceeds the maximum size of one merged packet. Don't allow
222 		 * packets to have different total_size.
223 		 */
224 		batadv_frag_clear_chain(&chain->head);
225 		chain->size = 0;
226 	} else if (ntohs(frag_packet->total_size) == chain->size) {
227 		/* All fragments received. Hand over chain to caller. */
228 		hlist_move_list(&chain->head, chain_out);
229 		chain->size = 0;
230 	}
231 
232 err_unlock:
233 	spin_unlock_bh(&chain->lock);
234 
235 err:
236 	if (!ret)
237 		kfree(frag_entry_new);
238 
239 	return ret;
240 }
241 
242 /**
243  * batadv_frag_merge_packets - merge a chain of fragments
244  * @chain: head of chain with fragments
245  *
246  * Expand the first skb in the chain and copy the content of the remaining
247  * skb's into the expanded one. After doing so, clear the chain.
248  *
249  * Return: the merged skb or NULL on error.
250  */
251 static struct sk_buff *
252 batadv_frag_merge_packets(struct hlist_head *chain)
253 {
254 	struct batadv_frag_packet *packet;
255 	struct batadv_frag_list_entry *entry;
256 	struct sk_buff *skb_out = NULL;
257 	int size, hdr_size = sizeof(struct batadv_frag_packet);
258 
259 	/* Remove first entry, as this is the destination for the rest of the
260 	 * fragments.
261 	 */
262 	entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
263 	hlist_del(&entry->list);
264 	skb_out = entry->skb;
265 	kfree(entry);
266 
267 	packet = (struct batadv_frag_packet *)skb_out->data;
268 	size = ntohs(packet->total_size);
269 
270 	/* Make room for the rest of the fragments. */
271 	if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
272 		kfree_skb(skb_out);
273 		skb_out = NULL;
274 		goto free;
275 	}
276 
277 	/* Move the existing MAC header to just before the payload. (Override
278 	 * the fragment header.)
279 	 */
280 	skb_pull_rcsum(skb_out, hdr_size);
281 	memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
282 	skb_set_mac_header(skb_out, -ETH_HLEN);
283 	skb_reset_network_header(skb_out);
284 	skb_reset_transport_header(skb_out);
285 
286 	/* Copy the payload of the each fragment into the last skb */
287 	hlist_for_each_entry(entry, chain, list) {
288 		size = entry->skb->len - hdr_size;
289 		memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
290 		       size);
291 	}
292 
293 free:
294 	/* Locking is not needed, because 'chain' is not part of any orig. */
295 	batadv_frag_clear_chain(chain);
296 	return skb_out;
297 }
298 
299 /**
300  * batadv_frag_skb_buffer - buffer fragment for later merge
301  * @skb: skb to buffer
302  * @orig_node_src: originator that the skb is received from
303  *
304  * Add fragment to buffer and merge fragments if possible.
305  *
306  * There are three possible outcomes: 1) Packet is merged: Return true and
307  * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
308  * to NULL; 3) Error: Return false and leave skb as is.
309  *
310  * Return: true when packet is merged or buffered, false when skb is not not
311  * used.
312  */
313 bool batadv_frag_skb_buffer(struct sk_buff **skb,
314 			    struct batadv_orig_node *orig_node_src)
315 {
316 	struct sk_buff *skb_out = NULL;
317 	struct hlist_head head = HLIST_HEAD_INIT;
318 	bool ret = false;
319 
320 	/* Add packet to buffer and table entry if merge is possible. */
321 	if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
322 		goto out_err;
323 
324 	/* Leave if more fragments are needed to merge. */
325 	if (hlist_empty(&head))
326 		goto out;
327 
328 	skb_out = batadv_frag_merge_packets(&head);
329 	if (!skb_out)
330 		goto out_err;
331 
332 out:
333 	*skb = skb_out;
334 	ret = true;
335 out_err:
336 	return ret;
337 }
338 
339 /**
340  * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
341  * @skb: skb to forward
342  * @recv_if: interface that the skb is received on
343  * @orig_node_src: originator that the skb is received from
344  *
345  * Look up the next-hop of the fragments payload and check if the merged packet
346  * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
347  * without merging it.
348  *
349  * Return: true if the fragment is consumed/forwarded, false otherwise.
350  */
351 bool batadv_frag_skb_fwd(struct sk_buff *skb,
352 			 struct batadv_hard_iface *recv_if,
353 			 struct batadv_orig_node *orig_node_src)
354 {
355 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
356 	struct batadv_orig_node *orig_node_dst = NULL;
357 	struct batadv_neigh_node *neigh_node = NULL;
358 	struct batadv_frag_packet *packet;
359 	u16 total_size;
360 	bool ret = false;
361 
362 	packet = (struct batadv_frag_packet *)skb->data;
363 	orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
364 	if (!orig_node_dst)
365 		goto out;
366 
367 	neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
368 	if (!neigh_node)
369 		goto out;
370 
371 	/* Forward the fragment, if the merged packet would be too big to
372 	 * be assembled.
373 	 */
374 	total_size = ntohs(packet->total_size);
375 	if (total_size > neigh_node->if_incoming->net_dev->mtu) {
376 		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
377 		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
378 				   skb->len + ETH_HLEN);
379 
380 		packet->ttl--;
381 		batadv_send_unicast_skb(skb, neigh_node);
382 		ret = true;
383 	}
384 
385 out:
386 	if (orig_node_dst)
387 		batadv_orig_node_put(orig_node_dst);
388 	if (neigh_node)
389 		batadv_neigh_node_put(neigh_node);
390 	return ret;
391 }
392 
393 /**
394  * batadv_frag_create - create a fragment from skb
395  * @skb: skb to create fragment from
396  * @frag_head: header to use in new fragment
397  * @mtu: size of new fragment
398  *
399  * Split the passed skb into two fragments: A new one with size matching the
400  * passed mtu and the old one with the rest. The new skb contains data from the
401  * tail of the old skb.
402  *
403  * Return: the new fragment, NULL on error.
404  */
405 static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
406 					  struct batadv_frag_packet *frag_head,
407 					  unsigned int mtu)
408 {
409 	struct sk_buff *skb_fragment;
410 	unsigned header_size = sizeof(*frag_head);
411 	unsigned fragment_size = mtu - header_size;
412 
413 	skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
414 	if (!skb_fragment)
415 		goto err;
416 
417 	skb->priority = TC_PRIO_CONTROL;
418 
419 	/* Eat the last mtu-bytes of the skb */
420 	skb_reserve(skb_fragment, header_size + ETH_HLEN);
421 	skb_split(skb, skb_fragment, skb->len - fragment_size);
422 
423 	/* Add the header */
424 	skb_push(skb_fragment, header_size);
425 	memcpy(skb_fragment->data, frag_head, header_size);
426 
427 err:
428 	return skb_fragment;
429 }
430 
431 /**
432  * batadv_frag_send_packet - create up to 16 fragments from the passed skb
433  * @skb: skb to create fragments from
434  * @orig_node: final destination of the created fragments
435  * @neigh_node: next-hop of the created fragments
436  *
437  * Return: true on success, false otherwise.
438  */
439 bool batadv_frag_send_packet(struct sk_buff *skb,
440 			     struct batadv_orig_node *orig_node,
441 			     struct batadv_neigh_node *neigh_node)
442 {
443 	struct batadv_priv *bat_priv;
444 	struct batadv_hard_iface *primary_if = NULL;
445 	struct batadv_frag_packet frag_header;
446 	struct sk_buff *skb_fragment;
447 	unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
448 	unsigned header_size = sizeof(frag_header);
449 	unsigned max_fragment_size, max_packet_size;
450 	bool ret = false;
451 
452 	/* To avoid merge and refragmentation at next-hops we never send
453 	 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
454 	 */
455 	mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
456 	max_fragment_size = mtu - header_size;
457 	max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
458 
459 	/* Don't even try to fragment, if we need more than 16 fragments */
460 	if (skb->len > max_packet_size)
461 		goto out_err;
462 
463 	bat_priv = orig_node->bat_priv;
464 	primary_if = batadv_primary_if_get_selected(bat_priv);
465 	if (!primary_if)
466 		goto out_err;
467 
468 	/* Create one header to be copied to all fragments */
469 	frag_header.packet_type = BATADV_UNICAST_FRAG;
470 	frag_header.version = BATADV_COMPAT_VERSION;
471 	frag_header.ttl = BATADV_TTL;
472 	frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
473 	frag_header.reserved = 0;
474 	frag_header.no = 0;
475 	frag_header.total_size = htons(skb->len);
476 	ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
477 	ether_addr_copy(frag_header.dest, orig_node->orig);
478 
479 	/* Eat and send fragments from the tail of skb */
480 	while (skb->len > max_fragment_size) {
481 		skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
482 		if (!skb_fragment)
483 			goto out_err;
484 
485 		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
486 		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
487 				   skb_fragment->len + ETH_HLEN);
488 		batadv_send_unicast_skb(skb_fragment, neigh_node);
489 		frag_header.no++;
490 
491 		/* The initial check in this function should cover this case */
492 		if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
493 			goto out_err;
494 	}
495 
496 	/* Make room for the fragment header. */
497 	if (batadv_skb_head_push(skb, header_size) < 0 ||
498 	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
499 		goto out_err;
500 
501 	memcpy(skb->data, &frag_header, header_size);
502 
503 	/* Send the last fragment */
504 	batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
505 	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
506 			   skb->len + ETH_HLEN);
507 	batadv_send_unicast_skb(skb, neigh_node);
508 
509 	ret = true;
510 
511 out_err:
512 	if (primary_if)
513 		batadv_hardif_put(primary_if);
514 
515 	return ret;
516 }
517