xref: /openbmc/linux/net/mac80211/mesh_pathtbl.c (revision 95e9fd10)
1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
20 
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER	2
23 
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN		2
26 
27 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
28 				time_after(jiffies, mpath->exp_time) && \
29 				!(mpath->flags & MESH_PATH_FIXED))
30 
31 struct mpath_node {
32 	struct hlist_node list;
33 	struct rcu_head rcu;
34 	/* This indirection allows two different tables to point to the same
35 	 * mesh_path structure, useful when resizing
36 	 */
37 	struct mesh_path *mpath;
38 };
39 
40 static struct mesh_table __rcu *mesh_paths;
41 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
42 
43 int mesh_paths_generation;
44 
45 /* This lock will have the grow table function as writer and add / delete nodes
46  * as readers. RCU provides sufficient protection only when reading the table
47  * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
48  * the read lock or we risk operating on an old table.  The write lock is only
49  * needed when modifying the number of buckets a table.
50  */
51 static DEFINE_RWLOCK(pathtbl_resize_lock);
52 
53 
54 static inline struct mesh_table *resize_dereference_mesh_paths(void)
55 {
56 	return rcu_dereference_protected(mesh_paths,
57 		lockdep_is_held(&pathtbl_resize_lock));
58 }
59 
60 static inline struct mesh_table *resize_dereference_mpp_paths(void)
61 {
62 	return rcu_dereference_protected(mpp_paths,
63 		lockdep_is_held(&pathtbl_resize_lock));
64 }
65 
66 /*
67  * CAREFUL -- "tbl" must not be an expression,
68  * in particular not an rcu_dereference(), since
69  * it's used twice. So it is illegal to do
70  *	for_each_mesh_entry(rcu_dereference(...), ...)
71  */
72 #define for_each_mesh_entry(tbl, p, node, i) \
73 	for (i = 0; i <= tbl->hash_mask; i++) \
74 		hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
75 
76 
77 static struct mesh_table *mesh_table_alloc(int size_order)
78 {
79 	int i;
80 	struct mesh_table *newtbl;
81 
82 	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
83 	if (!newtbl)
84 		return NULL;
85 
86 	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
87 			(1 << size_order), GFP_ATOMIC);
88 
89 	if (!newtbl->hash_buckets) {
90 		kfree(newtbl);
91 		return NULL;
92 	}
93 
94 	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
95 			(1 << size_order), GFP_ATOMIC);
96 	if (!newtbl->hashwlock) {
97 		kfree(newtbl->hash_buckets);
98 		kfree(newtbl);
99 		return NULL;
100 	}
101 
102 	newtbl->size_order = size_order;
103 	newtbl->hash_mask = (1 << size_order) - 1;
104 	atomic_set(&newtbl->entries,  0);
105 	get_random_bytes(&newtbl->hash_rnd,
106 			sizeof(newtbl->hash_rnd));
107 	for (i = 0; i <= newtbl->hash_mask; i++)
108 		spin_lock_init(&newtbl->hashwlock[i]);
109 	spin_lock_init(&newtbl->gates_lock);
110 
111 	return newtbl;
112 }
113 
114 static void __mesh_table_free(struct mesh_table *tbl)
115 {
116 	kfree(tbl->hash_buckets);
117 	kfree(tbl->hashwlock);
118 	kfree(tbl);
119 }
120 
121 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
122 {
123 	struct hlist_head *mesh_hash;
124 	struct hlist_node *p, *q;
125 	struct mpath_node *gate;
126 	int i;
127 
128 	mesh_hash = tbl->hash_buckets;
129 	for (i = 0; i <= tbl->hash_mask; i++) {
130 		spin_lock_bh(&tbl->hashwlock[i]);
131 		hlist_for_each_safe(p, q, &mesh_hash[i]) {
132 			tbl->free_node(p, free_leafs);
133 			atomic_dec(&tbl->entries);
134 		}
135 		spin_unlock_bh(&tbl->hashwlock[i]);
136 	}
137 	if (free_leafs) {
138 		spin_lock_bh(&tbl->gates_lock);
139 		hlist_for_each_entry_safe(gate, p, q,
140 					 tbl->known_gates, list) {
141 			hlist_del(&gate->list);
142 			kfree(gate);
143 		}
144 		kfree(tbl->known_gates);
145 		spin_unlock_bh(&tbl->gates_lock);
146 	}
147 
148 	__mesh_table_free(tbl);
149 }
150 
151 static int mesh_table_grow(struct mesh_table *oldtbl,
152 			   struct mesh_table *newtbl)
153 {
154 	struct hlist_head *oldhash;
155 	struct hlist_node *p, *q;
156 	int i;
157 
158 	if (atomic_read(&oldtbl->entries)
159 			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
160 		return -EAGAIN;
161 
162 	newtbl->free_node = oldtbl->free_node;
163 	newtbl->mean_chain_len = oldtbl->mean_chain_len;
164 	newtbl->copy_node = oldtbl->copy_node;
165 	newtbl->known_gates = oldtbl->known_gates;
166 	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
167 
168 	oldhash = oldtbl->hash_buckets;
169 	for (i = 0; i <= oldtbl->hash_mask; i++)
170 		hlist_for_each(p, &oldhash[i])
171 			if (oldtbl->copy_node(p, newtbl) < 0)
172 				goto errcopy;
173 
174 	return 0;
175 
176 errcopy:
177 	for (i = 0; i <= newtbl->hash_mask; i++) {
178 		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
179 			oldtbl->free_node(p, 0);
180 	}
181 	return -ENOMEM;
182 }
183 
184 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
185 			   struct mesh_table *tbl)
186 {
187 	/* Use last four bytes of hw addr and interface index as hash index */
188 	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
189 		& tbl->hash_mask;
190 }
191 
192 
193 /**
194  *
195  * mesh_path_assign_nexthop - update mesh path next hop
196  *
197  * @mpath: mesh path to update
198  * @sta: next hop to assign
199  *
200  * Locking: mpath->state_lock must be held when calling this function
201  */
202 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
203 {
204 	struct sk_buff *skb;
205 	struct ieee80211_hdr *hdr;
206 	struct sk_buff_head tmpq;
207 	unsigned long flags;
208 
209 	rcu_assign_pointer(mpath->next_hop, sta);
210 
211 	__skb_queue_head_init(&tmpq);
212 
213 	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
214 
215 	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
216 		hdr = (struct ieee80211_hdr *) skb->data;
217 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
218 		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
219 		__skb_queue_tail(&tmpq, skb);
220 	}
221 
222 	skb_queue_splice(&tmpq, &mpath->frame_queue);
223 	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
224 }
225 
226 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
227 			     struct mesh_path *gate_mpath)
228 {
229 	struct ieee80211_hdr *hdr;
230 	struct ieee80211s_hdr *mshdr;
231 	int mesh_hdrlen, hdrlen;
232 	char *next_hop;
233 
234 	hdr = (struct ieee80211_hdr *) skb->data;
235 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
236 	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
237 
238 	if (!(mshdr->flags & MESH_FLAGS_AE)) {
239 		/* size of the fixed part of the mesh header */
240 		mesh_hdrlen = 6;
241 
242 		/* make room for the two extended addresses */
243 		skb_push(skb, 2 * ETH_ALEN);
244 		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
245 
246 		hdr = (struct ieee80211_hdr *) skb->data;
247 
248 		/* we preserve the previous mesh header and only add
249 		 * the new addreses */
250 		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
251 		mshdr->flags = MESH_FLAGS_AE_A5_A6;
252 		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
253 		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
254 	}
255 
256 	/* update next hop */
257 	hdr = (struct ieee80211_hdr *) skb->data;
258 	rcu_read_lock();
259 	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
260 	memcpy(hdr->addr1, next_hop, ETH_ALEN);
261 	rcu_read_unlock();
262 	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
263 	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
264 }
265 
266 /**
267  *
268  * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
269  *
270  * This function is used to transfer or copy frames from an unresolved mpath to
271  * a gate mpath.  The function also adds the Address Extension field and
272  * updates the next hop.
273  *
274  * If a frame already has an Address Extension field, only the next hop and
275  * destination addresses are updated.
276  *
277  * The gate mpath must be an active mpath with a valid mpath->next_hop.
278  *
279  * @mpath: An active mpath the frames will be sent to (i.e. the gate)
280  * @from_mpath: The failed mpath
281  * @copy: When true, copy all the frames to the new mpath queue.  When false,
282  * move them.
283  */
284 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
285 				    struct mesh_path *from_mpath,
286 				    bool copy)
287 {
288 	struct sk_buff *skb, *cp_skb = NULL;
289 	struct sk_buff_head gateq, failq;
290 	unsigned long flags;
291 	int num_skbs;
292 
293 	BUG_ON(gate_mpath == from_mpath);
294 	BUG_ON(!gate_mpath->next_hop);
295 
296 	__skb_queue_head_init(&gateq);
297 	__skb_queue_head_init(&failq);
298 
299 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
300 	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
301 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
302 
303 	num_skbs = skb_queue_len(&failq);
304 
305 	while (num_skbs--) {
306 		skb = __skb_dequeue(&failq);
307 		if (copy) {
308 			cp_skb = skb_copy(skb, GFP_ATOMIC);
309 			if (cp_skb)
310 				__skb_queue_tail(&failq, cp_skb);
311 		}
312 
313 		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
314 		__skb_queue_tail(&gateq, skb);
315 	}
316 
317 	spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
318 	skb_queue_splice(&gateq, &gate_mpath->frame_queue);
319 	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
320 		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
321 	spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
322 
323 	if (!copy)
324 		return;
325 
326 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
327 	skb_queue_splice(&failq, &from_mpath->frame_queue);
328 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
329 }
330 
331 
332 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
333 					  struct ieee80211_sub_if_data *sdata)
334 {
335 	struct mesh_path *mpath;
336 	struct hlist_node *n;
337 	struct hlist_head *bucket;
338 	struct mpath_node *node;
339 
340 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
341 	hlist_for_each_entry_rcu(node, n, bucket, list) {
342 		mpath = node->mpath;
343 		if (mpath->sdata == sdata &&
344 		    ether_addr_equal(dst, mpath->dst)) {
345 			if (MPATH_EXPIRED(mpath)) {
346 				spin_lock_bh(&mpath->state_lock);
347 				mpath->flags &= ~MESH_PATH_ACTIVE;
348 				spin_unlock_bh(&mpath->state_lock);
349 			}
350 			return mpath;
351 		}
352 	}
353 	return NULL;
354 }
355 
356 /**
357  * mesh_path_lookup - look up a path in the mesh path table
358  * @dst: hardware address (ETH_ALEN length) of destination
359  * @sdata: local subif
360  *
361  * Returns: pointer to the mesh path structure, or NULL if not found
362  *
363  * Locking: must be called within a read rcu section.
364  */
365 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
366 {
367 	return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
368 }
369 
370 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
371 {
372 	return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
373 }
374 
375 
376 /**
377  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
378  * @idx: index
379  * @sdata: local subif, or NULL for all entries
380  *
381  * Returns: pointer to the mesh path structure, or NULL if not found.
382  *
383  * Locking: must be called within a read rcu section.
384  */
385 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
386 {
387 	struct mesh_table *tbl = rcu_dereference(mesh_paths);
388 	struct mpath_node *node;
389 	struct hlist_node *p;
390 	int i;
391 	int j = 0;
392 
393 	for_each_mesh_entry(tbl, p, node, i) {
394 		if (sdata && node->mpath->sdata != sdata)
395 			continue;
396 		if (j++ == idx) {
397 			if (MPATH_EXPIRED(node->mpath)) {
398 				spin_lock_bh(&node->mpath->state_lock);
399 				node->mpath->flags &= ~MESH_PATH_ACTIVE;
400 				spin_unlock_bh(&node->mpath->state_lock);
401 			}
402 			return node->mpath;
403 		}
404 	}
405 
406 	return NULL;
407 }
408 
409 /**
410  * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
411  * @mpath: gate path to add to table
412  */
413 int mesh_path_add_gate(struct mesh_path *mpath)
414 {
415 	struct mesh_table *tbl;
416 	struct mpath_node *gate, *new_gate;
417 	struct hlist_node *n;
418 	int err;
419 
420 	rcu_read_lock();
421 	tbl = rcu_dereference(mesh_paths);
422 
423 	hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
424 		if (gate->mpath == mpath) {
425 			err = -EEXIST;
426 			goto err_rcu;
427 		}
428 
429 	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
430 	if (!new_gate) {
431 		err = -ENOMEM;
432 		goto err_rcu;
433 	}
434 
435 	mpath->is_gate = true;
436 	mpath->sdata->u.mesh.num_gates++;
437 	new_gate->mpath = mpath;
438 	spin_lock_bh(&tbl->gates_lock);
439 	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
440 	spin_unlock_bh(&tbl->gates_lock);
441 	rcu_read_unlock();
442 	mpath_dbg(mpath->sdata,
443 		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
444 		  mpath->dst, mpath->sdata->u.mesh.num_gates);
445 	return 0;
446 err_rcu:
447 	rcu_read_unlock();
448 	return err;
449 }
450 
451 /**
452  * mesh_gate_del - remove a mesh gate from the list of known gates
453  * @tbl: table which holds our list of known gates
454  * @mpath: gate mpath
455  *
456  * Returns: 0 on success
457  *
458  * Locking: must be called inside rcu_read_lock() section
459  */
460 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
461 {
462 	struct mpath_node *gate;
463 	struct hlist_node *p, *q;
464 
465 	hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
466 		if (gate->mpath == mpath) {
467 			spin_lock_bh(&tbl->gates_lock);
468 			hlist_del_rcu(&gate->list);
469 			kfree_rcu(gate, rcu);
470 			spin_unlock_bh(&tbl->gates_lock);
471 			mpath->sdata->u.mesh.num_gates--;
472 			mpath->is_gate = false;
473 			mpath_dbg(mpath->sdata,
474 				  "Mesh path: Deleted gate: %pM. %d known gates\n",
475 				  mpath->dst, mpath->sdata->u.mesh.num_gates);
476 			break;
477 		}
478 
479 	return 0;
480 }
481 
482 /**
483  * mesh_gate_num - number of gates known to this interface
484  * @sdata: subif data
485  */
486 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
487 {
488 	return sdata->u.mesh.num_gates;
489 }
490 
491 /**
492  * mesh_path_add - allocate and add a new path to the mesh path table
493  * @addr: destination address of the path (ETH_ALEN length)
494  * @sdata: local subif
495  *
496  * Returns: 0 on success
497  *
498  * State: the initial state of the new path is set to 0
499  */
500 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
501 {
502 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
503 	struct ieee80211_local *local = sdata->local;
504 	struct mesh_table *tbl;
505 	struct mesh_path *mpath, *new_mpath;
506 	struct mpath_node *node, *new_node;
507 	struct hlist_head *bucket;
508 	struct hlist_node *n;
509 	int grow = 0;
510 	int err = 0;
511 	u32 hash_idx;
512 
513 	if (ether_addr_equal(dst, sdata->vif.addr))
514 		/* never add ourselves as neighbours */
515 		return -ENOTSUPP;
516 
517 	if (is_multicast_ether_addr(dst))
518 		return -ENOTSUPP;
519 
520 	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
521 		return -ENOSPC;
522 
523 	err = -ENOMEM;
524 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
525 	if (!new_mpath)
526 		goto err_path_alloc;
527 
528 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
529 	if (!new_node)
530 		goto err_node_alloc;
531 
532 	read_lock_bh(&pathtbl_resize_lock);
533 	memcpy(new_mpath->dst, dst, ETH_ALEN);
534 	memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
535 	new_mpath->is_root = false;
536 	new_mpath->sdata = sdata;
537 	new_mpath->flags = 0;
538 	skb_queue_head_init(&new_mpath->frame_queue);
539 	new_node->mpath = new_mpath;
540 	new_mpath->timer.data = (unsigned long) new_mpath;
541 	new_mpath->timer.function = mesh_path_timer;
542 	new_mpath->exp_time = jiffies;
543 	spin_lock_init(&new_mpath->state_lock);
544 	init_timer(&new_mpath->timer);
545 
546 	tbl = resize_dereference_mesh_paths();
547 
548 	hash_idx = mesh_table_hash(dst, sdata, tbl);
549 	bucket = &tbl->hash_buckets[hash_idx];
550 
551 	spin_lock(&tbl->hashwlock[hash_idx]);
552 
553 	err = -EEXIST;
554 	hlist_for_each_entry(node, n, bucket, list) {
555 		mpath = node->mpath;
556 		if (mpath->sdata == sdata &&
557 		    ether_addr_equal(dst, mpath->dst))
558 			goto err_exists;
559 	}
560 
561 	hlist_add_head_rcu(&new_node->list, bucket);
562 	if (atomic_inc_return(&tbl->entries) >=
563 	    tbl->mean_chain_len * (tbl->hash_mask + 1))
564 		grow = 1;
565 
566 	mesh_paths_generation++;
567 
568 	spin_unlock(&tbl->hashwlock[hash_idx]);
569 	read_unlock_bh(&pathtbl_resize_lock);
570 	if (grow) {
571 		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
572 		ieee80211_queue_work(&local->hw, &sdata->work);
573 	}
574 	return 0;
575 
576 err_exists:
577 	spin_unlock(&tbl->hashwlock[hash_idx]);
578 	read_unlock_bh(&pathtbl_resize_lock);
579 	kfree(new_node);
580 err_node_alloc:
581 	kfree(new_mpath);
582 err_path_alloc:
583 	atomic_dec(&sdata->u.mesh.mpaths);
584 	return err;
585 }
586 
587 static void mesh_table_free_rcu(struct rcu_head *rcu)
588 {
589 	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
590 
591 	mesh_table_free(tbl, false);
592 }
593 
594 void mesh_mpath_table_grow(void)
595 {
596 	struct mesh_table *oldtbl, *newtbl;
597 
598 	write_lock_bh(&pathtbl_resize_lock);
599 	oldtbl = resize_dereference_mesh_paths();
600 	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
601 	if (!newtbl)
602 		goto out;
603 	if (mesh_table_grow(oldtbl, newtbl) < 0) {
604 		__mesh_table_free(newtbl);
605 		goto out;
606 	}
607 	rcu_assign_pointer(mesh_paths, newtbl);
608 
609 	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
610 
611  out:
612 	write_unlock_bh(&pathtbl_resize_lock);
613 }
614 
615 void mesh_mpp_table_grow(void)
616 {
617 	struct mesh_table *oldtbl, *newtbl;
618 
619 	write_lock_bh(&pathtbl_resize_lock);
620 	oldtbl = resize_dereference_mpp_paths();
621 	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
622 	if (!newtbl)
623 		goto out;
624 	if (mesh_table_grow(oldtbl, newtbl) < 0) {
625 		__mesh_table_free(newtbl);
626 		goto out;
627 	}
628 	rcu_assign_pointer(mpp_paths, newtbl);
629 	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
630 
631  out:
632 	write_unlock_bh(&pathtbl_resize_lock);
633 }
634 
635 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
636 {
637 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
638 	struct ieee80211_local *local = sdata->local;
639 	struct mesh_table *tbl;
640 	struct mesh_path *mpath, *new_mpath;
641 	struct mpath_node *node, *new_node;
642 	struct hlist_head *bucket;
643 	struct hlist_node *n;
644 	int grow = 0;
645 	int err = 0;
646 	u32 hash_idx;
647 
648 	if (ether_addr_equal(dst, sdata->vif.addr))
649 		/* never add ourselves as neighbours */
650 		return -ENOTSUPP;
651 
652 	if (is_multicast_ether_addr(dst))
653 		return -ENOTSUPP;
654 
655 	err = -ENOMEM;
656 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
657 	if (!new_mpath)
658 		goto err_path_alloc;
659 
660 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
661 	if (!new_node)
662 		goto err_node_alloc;
663 
664 	read_lock_bh(&pathtbl_resize_lock);
665 	memcpy(new_mpath->dst, dst, ETH_ALEN);
666 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
667 	new_mpath->sdata = sdata;
668 	new_mpath->flags = 0;
669 	skb_queue_head_init(&new_mpath->frame_queue);
670 	new_node->mpath = new_mpath;
671 	init_timer(&new_mpath->timer);
672 	new_mpath->exp_time = jiffies;
673 	spin_lock_init(&new_mpath->state_lock);
674 
675 	tbl = resize_dereference_mpp_paths();
676 
677 	hash_idx = mesh_table_hash(dst, sdata, tbl);
678 	bucket = &tbl->hash_buckets[hash_idx];
679 
680 	spin_lock(&tbl->hashwlock[hash_idx]);
681 
682 	err = -EEXIST;
683 	hlist_for_each_entry(node, n, bucket, list) {
684 		mpath = node->mpath;
685 		if (mpath->sdata == sdata &&
686 		    ether_addr_equal(dst, mpath->dst))
687 			goto err_exists;
688 	}
689 
690 	hlist_add_head_rcu(&new_node->list, bucket);
691 	if (atomic_inc_return(&tbl->entries) >=
692 	    tbl->mean_chain_len * (tbl->hash_mask + 1))
693 		grow = 1;
694 
695 	spin_unlock(&tbl->hashwlock[hash_idx]);
696 	read_unlock_bh(&pathtbl_resize_lock);
697 	if (grow) {
698 		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
699 		ieee80211_queue_work(&local->hw, &sdata->work);
700 	}
701 	return 0;
702 
703 err_exists:
704 	spin_unlock(&tbl->hashwlock[hash_idx]);
705 	read_unlock_bh(&pathtbl_resize_lock);
706 	kfree(new_node);
707 err_node_alloc:
708 	kfree(new_mpath);
709 err_path_alloc:
710 	return err;
711 }
712 
713 
714 /**
715  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
716  *
717  * @sta: broken peer link
718  *
719  * This function must be called from the rate control algorithm if enough
720  * delivery errors suggest that a peer link is no longer usable.
721  */
722 void mesh_plink_broken(struct sta_info *sta)
723 {
724 	struct mesh_table *tbl;
725 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
726 	struct mesh_path *mpath;
727 	struct mpath_node *node;
728 	struct hlist_node *p;
729 	struct ieee80211_sub_if_data *sdata = sta->sdata;
730 	int i;
731 	__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
732 
733 	rcu_read_lock();
734 	tbl = rcu_dereference(mesh_paths);
735 	for_each_mesh_entry(tbl, p, node, i) {
736 		mpath = node->mpath;
737 		if (rcu_dereference(mpath->next_hop) == sta &&
738 		    mpath->flags & MESH_PATH_ACTIVE &&
739 		    !(mpath->flags & MESH_PATH_FIXED)) {
740 			spin_lock_bh(&mpath->state_lock);
741 			mpath->flags &= ~MESH_PATH_ACTIVE;
742 			++mpath->sn;
743 			spin_unlock_bh(&mpath->state_lock);
744 			mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
745 					mpath->dst, cpu_to_le32(mpath->sn),
746 					reason, bcast, sdata);
747 		}
748 	}
749 	rcu_read_unlock();
750 }
751 
752 static void mesh_path_node_reclaim(struct rcu_head *rp)
753 {
754 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
755 	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
756 
757 	del_timer_sync(&node->mpath->timer);
758 	atomic_dec(&sdata->u.mesh.mpaths);
759 	kfree(node->mpath);
760 	kfree(node);
761 }
762 
763 /* needs to be called with the corresponding hashwlock taken */
764 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
765 {
766 	struct mesh_path *mpath;
767 	mpath = node->mpath;
768 	spin_lock(&mpath->state_lock);
769 	mpath->flags |= MESH_PATH_RESOLVING;
770 	if (mpath->is_gate)
771 		mesh_gate_del(tbl, mpath);
772 	hlist_del_rcu(&node->list);
773 	call_rcu(&node->rcu, mesh_path_node_reclaim);
774 	spin_unlock(&mpath->state_lock);
775 	atomic_dec(&tbl->entries);
776 }
777 
778 /**
779  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
780  *
781  * @sta: mesh peer to match
782  *
783  * RCU notes: this function is called when a mesh plink transitions from
784  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
785  * allows path creation. This will happen before the sta can be freed (because
786  * sta_info_destroy() calls this) so any reader in a rcu read block will be
787  * protected against the plink disappearing.
788  */
789 void mesh_path_flush_by_nexthop(struct sta_info *sta)
790 {
791 	struct mesh_table *tbl;
792 	struct mesh_path *mpath;
793 	struct mpath_node *node;
794 	struct hlist_node *p;
795 	int i;
796 
797 	rcu_read_lock();
798 	read_lock_bh(&pathtbl_resize_lock);
799 	tbl = resize_dereference_mesh_paths();
800 	for_each_mesh_entry(tbl, p, node, i) {
801 		mpath = node->mpath;
802 		if (rcu_dereference(mpath->next_hop) == sta) {
803 			spin_lock(&tbl->hashwlock[i]);
804 			__mesh_path_del(tbl, node);
805 			spin_unlock(&tbl->hashwlock[i]);
806 		}
807 	}
808 	read_unlock_bh(&pathtbl_resize_lock);
809 	rcu_read_unlock();
810 }
811 
812 static void table_flush_by_iface(struct mesh_table *tbl,
813 				 struct ieee80211_sub_if_data *sdata)
814 {
815 	struct mesh_path *mpath;
816 	struct mpath_node *node;
817 	struct hlist_node *p;
818 	int i;
819 
820 	WARN_ON(!rcu_read_lock_held());
821 	for_each_mesh_entry(tbl, p, node, i) {
822 		mpath = node->mpath;
823 		if (mpath->sdata != sdata)
824 			continue;
825 		spin_lock_bh(&tbl->hashwlock[i]);
826 		__mesh_path_del(tbl, node);
827 		spin_unlock_bh(&tbl->hashwlock[i]);
828 	}
829 }
830 
831 /**
832  * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
833  *
834  * This function deletes both mesh paths as well as mesh portal paths.
835  *
836  * @sdata: interface data to match
837  *
838  */
839 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
840 {
841 	struct mesh_table *tbl;
842 
843 	rcu_read_lock();
844 	read_lock_bh(&pathtbl_resize_lock);
845 	tbl = resize_dereference_mesh_paths();
846 	table_flush_by_iface(tbl, sdata);
847 	tbl = resize_dereference_mpp_paths();
848 	table_flush_by_iface(tbl, sdata);
849 	read_unlock_bh(&pathtbl_resize_lock);
850 	rcu_read_unlock();
851 }
852 
853 /**
854  * mesh_path_del - delete a mesh path from the table
855  *
856  * @addr: dst address (ETH_ALEN length)
857  * @sdata: local subif
858  *
859  * Returns: 0 if successful
860  */
861 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
862 {
863 	struct mesh_table *tbl;
864 	struct mesh_path *mpath;
865 	struct mpath_node *node;
866 	struct hlist_head *bucket;
867 	struct hlist_node *n;
868 	int hash_idx;
869 	int err = 0;
870 
871 	read_lock_bh(&pathtbl_resize_lock);
872 	tbl = resize_dereference_mesh_paths();
873 	hash_idx = mesh_table_hash(addr, sdata, tbl);
874 	bucket = &tbl->hash_buckets[hash_idx];
875 
876 	spin_lock(&tbl->hashwlock[hash_idx]);
877 	hlist_for_each_entry(node, n, bucket, list) {
878 		mpath = node->mpath;
879 		if (mpath->sdata == sdata &&
880 		    ether_addr_equal(addr, mpath->dst)) {
881 			__mesh_path_del(tbl, node);
882 			goto enddel;
883 		}
884 	}
885 
886 	err = -ENXIO;
887 enddel:
888 	mesh_paths_generation++;
889 	spin_unlock(&tbl->hashwlock[hash_idx]);
890 	read_unlock_bh(&pathtbl_resize_lock);
891 	return err;
892 }
893 
894 /**
895  * mesh_path_tx_pending - sends pending frames in a mesh path queue
896  *
897  * @mpath: mesh path to activate
898  *
899  * Locking: the state_lock of the mpath structure must NOT be held when calling
900  * this function.
901  */
902 void mesh_path_tx_pending(struct mesh_path *mpath)
903 {
904 	if (mpath->flags & MESH_PATH_ACTIVE)
905 		ieee80211_add_pending_skbs(mpath->sdata->local,
906 				&mpath->frame_queue);
907 }
908 
909 /**
910  * mesh_path_send_to_gates - sends pending frames to all known mesh gates
911  *
912  * @mpath: mesh path whose queue will be emptied
913  *
914  * If there is only one gate, the frames are transferred from the failed mpath
915  * queue to that gate's queue.  If there are more than one gates, the frames
916  * are copied from each gate to the next.  After frames are copied, the
917  * mpath queues are emptied onto the transmission queue.
918  */
919 int mesh_path_send_to_gates(struct mesh_path *mpath)
920 {
921 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
922 	struct hlist_node *n;
923 	struct mesh_table *tbl;
924 	struct mesh_path *from_mpath = mpath;
925 	struct mpath_node *gate = NULL;
926 	bool copy = false;
927 	struct hlist_head *known_gates;
928 
929 	rcu_read_lock();
930 	tbl = rcu_dereference(mesh_paths);
931 	known_gates = tbl->known_gates;
932 	rcu_read_unlock();
933 
934 	if (!known_gates)
935 		return -EHOSTUNREACH;
936 
937 	hlist_for_each_entry_rcu(gate, n, known_gates, list) {
938 		if (gate->mpath->sdata != sdata)
939 			continue;
940 
941 		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
942 			mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
943 			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
944 			from_mpath = gate->mpath;
945 			copy = true;
946 		} else {
947 			mpath_dbg(sdata,
948 				  "Not forwarding %p (flags %#x)\n",
949 				  gate->mpath, gate->mpath->flags);
950 		}
951 	}
952 
953 	hlist_for_each_entry_rcu(gate, n, known_gates, list)
954 		if (gate->mpath->sdata == sdata) {
955 			mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
956 			mesh_path_tx_pending(gate->mpath);
957 		}
958 
959 	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
960 }
961 
962 /**
963  * mesh_path_discard_frame - discard a frame whose path could not be resolved
964  *
965  * @skb: frame to discard
966  * @sdata: network subif the frame was to be sent through
967  *
968  * Locking: the function must me called within a rcu_read_lock region
969  */
970 void mesh_path_discard_frame(struct sk_buff *skb,
971 			     struct ieee80211_sub_if_data *sdata)
972 {
973 	kfree_skb(skb);
974 	sdata->u.mesh.mshstats.dropped_frames_no_route++;
975 }
976 
977 /**
978  * mesh_path_flush_pending - free the pending queue of a mesh path
979  *
980  * @mpath: mesh path whose queue has to be freed
981  *
982  * Locking: the function must me called within a rcu_read_lock region
983  */
984 void mesh_path_flush_pending(struct mesh_path *mpath)
985 {
986 	struct sk_buff *skb;
987 
988 	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
989 		mesh_path_discard_frame(skb, mpath->sdata);
990 }
991 
992 /**
993  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
994  *
995  * @mpath: the mesh path to modify
996  * @next_hop: the next hop to force
997  *
998  * Locking: this function must be called holding mpath->state_lock
999  */
1000 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1001 {
1002 	spin_lock_bh(&mpath->state_lock);
1003 	mesh_path_assign_nexthop(mpath, next_hop);
1004 	mpath->sn = 0xffff;
1005 	mpath->metric = 0;
1006 	mpath->hop_count = 0;
1007 	mpath->exp_time = 0;
1008 	mpath->flags |= MESH_PATH_FIXED;
1009 	mesh_path_activate(mpath);
1010 	spin_unlock_bh(&mpath->state_lock);
1011 	mesh_path_tx_pending(mpath);
1012 }
1013 
1014 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1015 {
1016 	struct mesh_path *mpath;
1017 	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1018 	mpath = node->mpath;
1019 	hlist_del_rcu(p);
1020 	if (free_leafs) {
1021 		del_timer_sync(&mpath->timer);
1022 		kfree(mpath);
1023 	}
1024 	kfree(node);
1025 }
1026 
1027 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1028 {
1029 	struct mesh_path *mpath;
1030 	struct mpath_node *node, *new_node;
1031 	u32 hash_idx;
1032 
1033 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1034 	if (new_node == NULL)
1035 		return -ENOMEM;
1036 
1037 	node = hlist_entry(p, struct mpath_node, list);
1038 	mpath = node->mpath;
1039 	new_node->mpath = mpath;
1040 	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1041 	hlist_add_head(&new_node->list,
1042 			&newtbl->hash_buckets[hash_idx]);
1043 	return 0;
1044 }
1045 
1046 int mesh_pathtbl_init(void)
1047 {
1048 	struct mesh_table *tbl_path, *tbl_mpp;
1049 	int ret;
1050 
1051 	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1052 	if (!tbl_path)
1053 		return -ENOMEM;
1054 	tbl_path->free_node = &mesh_path_node_free;
1055 	tbl_path->copy_node = &mesh_path_node_copy;
1056 	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1057 	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1058 	if (!tbl_path->known_gates) {
1059 		ret = -ENOMEM;
1060 		goto free_path;
1061 	}
1062 	INIT_HLIST_HEAD(tbl_path->known_gates);
1063 
1064 
1065 	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1066 	if (!tbl_mpp) {
1067 		ret = -ENOMEM;
1068 		goto free_path;
1069 	}
1070 	tbl_mpp->free_node = &mesh_path_node_free;
1071 	tbl_mpp->copy_node = &mesh_path_node_copy;
1072 	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1073 	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1074 	if (!tbl_mpp->known_gates) {
1075 		ret = -ENOMEM;
1076 		goto free_mpp;
1077 	}
1078 	INIT_HLIST_HEAD(tbl_mpp->known_gates);
1079 
1080 	/* Need no locking since this is during init */
1081 	RCU_INIT_POINTER(mesh_paths, tbl_path);
1082 	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1083 
1084 	return 0;
1085 
1086 free_mpp:
1087 	mesh_table_free(tbl_mpp, true);
1088 free_path:
1089 	mesh_table_free(tbl_path, true);
1090 	return ret;
1091 }
1092 
1093 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1094 {
1095 	struct mesh_table *tbl;
1096 	struct mesh_path *mpath;
1097 	struct mpath_node *node;
1098 	struct hlist_node *p;
1099 	int i;
1100 
1101 	rcu_read_lock();
1102 	tbl = rcu_dereference(mesh_paths);
1103 	for_each_mesh_entry(tbl, p, node, i) {
1104 		if (node->mpath->sdata != sdata)
1105 			continue;
1106 		mpath = node->mpath;
1107 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1108 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1109 		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1110 			mesh_path_del(mpath->dst, mpath->sdata);
1111 	}
1112 	rcu_read_unlock();
1113 }
1114 
1115 void mesh_pathtbl_unregister(void)
1116 {
1117 	/* no need for locking during exit path */
1118 	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1119 	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1120 }
1121