xref: /openbmc/linux/net/mac80211/mesh_pathtbl.c (revision 84d517f3)
1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
20 
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER	2
23 
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN		2
26 
27 static inline bool mpath_expired(struct mesh_path *mpath)
28 {
29 	return (mpath->flags & MESH_PATH_ACTIVE) &&
30 	       time_after(jiffies, mpath->exp_time) &&
31 	       !(mpath->flags & MESH_PATH_FIXED);
32 }
33 
34 struct mpath_node {
35 	struct hlist_node list;
36 	struct rcu_head rcu;
37 	/* This indirection allows two different tables to point to the same
38 	 * mesh_path structure, useful when resizing
39 	 */
40 	struct mesh_path *mpath;
41 };
42 
43 static struct mesh_table __rcu *mesh_paths;
44 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
45 
46 int mesh_paths_generation;
47 
48 /* This lock will have the grow table function as writer and add / delete nodes
49  * as readers. RCU provides sufficient protection only when reading the table
50  * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
51  * the read lock or we risk operating on an old table.  The write lock is only
52  * needed when modifying the number of buckets a table.
53  */
54 static DEFINE_RWLOCK(pathtbl_resize_lock);
55 
56 
57 static inline struct mesh_table *resize_dereference_mesh_paths(void)
58 {
59 	return rcu_dereference_protected(mesh_paths,
60 		lockdep_is_held(&pathtbl_resize_lock));
61 }
62 
63 static inline struct mesh_table *resize_dereference_mpp_paths(void)
64 {
65 	return rcu_dereference_protected(mpp_paths,
66 		lockdep_is_held(&pathtbl_resize_lock));
67 }
68 
69 /*
70  * CAREFUL -- "tbl" must not be an expression,
71  * in particular not an rcu_dereference(), since
72  * it's used twice. So it is illegal to do
73  *	for_each_mesh_entry(rcu_dereference(...), ...)
74  */
75 #define for_each_mesh_entry(tbl, node, i) \
76 	for (i = 0; i <= tbl->hash_mask; i++) \
77 		hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
78 
79 
80 static struct mesh_table *mesh_table_alloc(int size_order)
81 {
82 	int i;
83 	struct mesh_table *newtbl;
84 
85 	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
86 	if (!newtbl)
87 		return NULL;
88 
89 	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
90 			(1 << size_order), GFP_ATOMIC);
91 
92 	if (!newtbl->hash_buckets) {
93 		kfree(newtbl);
94 		return NULL;
95 	}
96 
97 	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
98 			(1 << size_order), GFP_ATOMIC);
99 	if (!newtbl->hashwlock) {
100 		kfree(newtbl->hash_buckets);
101 		kfree(newtbl);
102 		return NULL;
103 	}
104 
105 	newtbl->size_order = size_order;
106 	newtbl->hash_mask = (1 << size_order) - 1;
107 	atomic_set(&newtbl->entries,  0);
108 	get_random_bytes(&newtbl->hash_rnd,
109 			sizeof(newtbl->hash_rnd));
110 	for (i = 0; i <= newtbl->hash_mask; i++)
111 		spin_lock_init(&newtbl->hashwlock[i]);
112 	spin_lock_init(&newtbl->gates_lock);
113 
114 	return newtbl;
115 }
116 
117 static void __mesh_table_free(struct mesh_table *tbl)
118 {
119 	kfree(tbl->hash_buckets);
120 	kfree(tbl->hashwlock);
121 	kfree(tbl);
122 }
123 
124 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
125 {
126 	struct hlist_head *mesh_hash;
127 	struct hlist_node *p, *q;
128 	struct mpath_node *gate;
129 	int i;
130 
131 	mesh_hash = tbl->hash_buckets;
132 	for (i = 0; i <= tbl->hash_mask; i++) {
133 		spin_lock_bh(&tbl->hashwlock[i]);
134 		hlist_for_each_safe(p, q, &mesh_hash[i]) {
135 			tbl->free_node(p, free_leafs);
136 			atomic_dec(&tbl->entries);
137 		}
138 		spin_unlock_bh(&tbl->hashwlock[i]);
139 	}
140 	if (free_leafs) {
141 		spin_lock_bh(&tbl->gates_lock);
142 		hlist_for_each_entry_safe(gate, q,
143 					 tbl->known_gates, list) {
144 			hlist_del(&gate->list);
145 			kfree(gate);
146 		}
147 		kfree(tbl->known_gates);
148 		spin_unlock_bh(&tbl->gates_lock);
149 	}
150 
151 	__mesh_table_free(tbl);
152 }
153 
154 static int mesh_table_grow(struct mesh_table *oldtbl,
155 			   struct mesh_table *newtbl)
156 {
157 	struct hlist_head *oldhash;
158 	struct hlist_node *p, *q;
159 	int i;
160 
161 	if (atomic_read(&oldtbl->entries)
162 			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
163 		return -EAGAIN;
164 
165 	newtbl->free_node = oldtbl->free_node;
166 	newtbl->mean_chain_len = oldtbl->mean_chain_len;
167 	newtbl->copy_node = oldtbl->copy_node;
168 	newtbl->known_gates = oldtbl->known_gates;
169 	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
170 
171 	oldhash = oldtbl->hash_buckets;
172 	for (i = 0; i <= oldtbl->hash_mask; i++)
173 		hlist_for_each(p, &oldhash[i])
174 			if (oldtbl->copy_node(p, newtbl) < 0)
175 				goto errcopy;
176 
177 	return 0;
178 
179 errcopy:
180 	for (i = 0; i <= newtbl->hash_mask; i++) {
181 		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
182 			oldtbl->free_node(p, 0);
183 	}
184 	return -ENOMEM;
185 }
186 
187 static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
188 			   struct mesh_table *tbl)
189 {
190 	/* Use last four bytes of hw addr and interface index as hash index */
191 	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
192 			    tbl->hash_rnd) & tbl->hash_mask;
193 }
194 
195 
196 /**
197  *
198  * mesh_path_assign_nexthop - update mesh path next hop
199  *
200  * @mpath: mesh path to update
201  * @sta: next hop to assign
202  *
203  * Locking: mpath->state_lock must be held when calling this function
204  */
205 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
206 {
207 	struct sk_buff *skb;
208 	struct ieee80211_hdr *hdr;
209 	unsigned long flags;
210 
211 	rcu_assign_pointer(mpath->next_hop, sta);
212 
213 	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
214 	skb_queue_walk(&mpath->frame_queue, skb) {
215 		hdr = (struct ieee80211_hdr *) skb->data;
216 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
217 		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
218 		ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
219 	}
220 
221 	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
222 }
223 
224 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
225 			     struct mesh_path *gate_mpath)
226 {
227 	struct ieee80211_hdr *hdr;
228 	struct ieee80211s_hdr *mshdr;
229 	int mesh_hdrlen, hdrlen;
230 	char *next_hop;
231 
232 	hdr = (struct ieee80211_hdr *) skb->data;
233 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
234 	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
235 
236 	if (!(mshdr->flags & MESH_FLAGS_AE)) {
237 		/* size of the fixed part of the mesh header */
238 		mesh_hdrlen = 6;
239 
240 		/* make room for the two extended addresses */
241 		skb_push(skb, 2 * ETH_ALEN);
242 		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
243 
244 		hdr = (struct ieee80211_hdr *) skb->data;
245 
246 		/* we preserve the previous mesh header and only add
247 		 * the new addreses */
248 		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
249 		mshdr->flags = MESH_FLAGS_AE_A5_A6;
250 		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
251 		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
252 	}
253 
254 	/* update next hop */
255 	hdr = (struct ieee80211_hdr *) skb->data;
256 	rcu_read_lock();
257 	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
258 	memcpy(hdr->addr1, next_hop, ETH_ALEN);
259 	rcu_read_unlock();
260 	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
261 	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
262 }
263 
264 /**
265  *
266  * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
267  *
268  * This function is used to transfer or copy frames from an unresolved mpath to
269  * a gate mpath.  The function also adds the Address Extension field and
270  * updates the next hop.
271  *
272  * If a frame already has an Address Extension field, only the next hop and
273  * destination addresses are updated.
274  *
275  * The gate mpath must be an active mpath with a valid mpath->next_hop.
276  *
277  * @mpath: An active mpath the frames will be sent to (i.e. the gate)
278  * @from_mpath: The failed mpath
279  * @copy: When true, copy all the frames to the new mpath queue.  When false,
280  * move them.
281  */
282 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
283 				    struct mesh_path *from_mpath,
284 				    bool copy)
285 {
286 	struct sk_buff *skb, *fskb, *tmp;
287 	struct sk_buff_head failq;
288 	unsigned long flags;
289 
290 	BUG_ON(gate_mpath == from_mpath);
291 	BUG_ON(!gate_mpath->next_hop);
292 
293 	__skb_queue_head_init(&failq);
294 
295 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
296 	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
297 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
298 
299 	skb_queue_walk_safe(&failq, fskb, tmp) {
300 		if (skb_queue_len(&gate_mpath->frame_queue) >=
301 				  MESH_FRAME_QUEUE_LEN) {
302 			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
303 			break;
304 		}
305 
306 		skb = skb_copy(fskb, GFP_ATOMIC);
307 		if (WARN_ON(!skb))
308 			break;
309 
310 		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
311 		skb_queue_tail(&gate_mpath->frame_queue, skb);
312 
313 		if (copy)
314 			continue;
315 
316 		__skb_unlink(fskb, &failq);
317 		kfree_skb(fskb);
318 	}
319 
320 	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
321 		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
322 
323 	if (!copy)
324 		return;
325 
326 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
327 	skb_queue_splice(&failq, &from_mpath->frame_queue);
328 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
329 }
330 
331 
332 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
333 				      struct ieee80211_sub_if_data *sdata)
334 {
335 	struct mesh_path *mpath;
336 	struct hlist_head *bucket;
337 	struct mpath_node *node;
338 
339 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
340 	hlist_for_each_entry_rcu(node, bucket, list) {
341 		mpath = node->mpath;
342 		if (mpath->sdata == sdata &&
343 		    ether_addr_equal(dst, mpath->dst)) {
344 			if (mpath_expired(mpath)) {
345 				spin_lock_bh(&mpath->state_lock);
346 				mpath->flags &= ~MESH_PATH_ACTIVE;
347 				spin_unlock_bh(&mpath->state_lock);
348 			}
349 			return mpath;
350 		}
351 	}
352 	return NULL;
353 }
354 
355 /**
356  * mesh_path_lookup - look up a path in the mesh path table
357  * @sdata: local subif
358  * @dst: hardware address (ETH_ALEN length) of destination
359  *
360  * Returns: pointer to the mesh path structure, or NULL if not found
361  *
362  * Locking: must be called within a read rcu section.
363  */
364 struct mesh_path *
365 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
366 {
367 	return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
368 }
369 
370 struct mesh_path *
371 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
372 {
373 	return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
374 }
375 
376 
377 /**
378  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
379  * @idx: index
380  * @sdata: local subif, or NULL for all entries
381  *
382  * Returns: pointer to the mesh path structure, or NULL if not found.
383  *
384  * Locking: must be called within a read rcu section.
385  */
386 struct mesh_path *
387 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
388 {
389 	struct mesh_table *tbl = rcu_dereference(mesh_paths);
390 	struct mpath_node *node;
391 	int i;
392 	int j = 0;
393 
394 	for_each_mesh_entry(tbl, node, i) {
395 		if (sdata && node->mpath->sdata != sdata)
396 			continue;
397 		if (j++ == idx) {
398 			if (mpath_expired(node->mpath)) {
399 				spin_lock_bh(&node->mpath->state_lock);
400 				node->mpath->flags &= ~MESH_PATH_ACTIVE;
401 				spin_unlock_bh(&node->mpath->state_lock);
402 			}
403 			return node->mpath;
404 		}
405 	}
406 
407 	return NULL;
408 }
409 
410 /**
411  * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
412  * @mpath: gate path to add to table
413  */
414 int mesh_path_add_gate(struct mesh_path *mpath)
415 {
416 	struct mesh_table *tbl;
417 	struct mpath_node *gate, *new_gate;
418 	int err;
419 
420 	rcu_read_lock();
421 	tbl = rcu_dereference(mesh_paths);
422 
423 	hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
424 		if (gate->mpath == mpath) {
425 			err = -EEXIST;
426 			goto err_rcu;
427 		}
428 
429 	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
430 	if (!new_gate) {
431 		err = -ENOMEM;
432 		goto err_rcu;
433 	}
434 
435 	mpath->is_gate = true;
436 	mpath->sdata->u.mesh.num_gates++;
437 	new_gate->mpath = mpath;
438 	spin_lock_bh(&tbl->gates_lock);
439 	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
440 	spin_unlock_bh(&tbl->gates_lock);
441 	mpath_dbg(mpath->sdata,
442 		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
443 		  mpath->dst, mpath->sdata->u.mesh.num_gates);
444 	err = 0;
445 err_rcu:
446 	rcu_read_unlock();
447 	return err;
448 }
449 
450 /**
451  * mesh_gate_del - remove a mesh gate from the list of known gates
452  * @tbl: table which holds our list of known gates
453  * @mpath: gate mpath
454  *
455  * Locking: must be called inside rcu_read_lock() section
456  */
457 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
458 {
459 	struct mpath_node *gate;
460 	struct hlist_node *q;
461 
462 	hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
463 		if (gate->mpath != mpath)
464 			continue;
465 		spin_lock_bh(&tbl->gates_lock);
466 		hlist_del_rcu(&gate->list);
467 		kfree_rcu(gate, rcu);
468 		spin_unlock_bh(&tbl->gates_lock);
469 		mpath->sdata->u.mesh.num_gates--;
470 		mpath->is_gate = false;
471 		mpath_dbg(mpath->sdata,
472 			  "Mesh path: Deleted gate: %pM. %d known gates\n",
473 			  mpath->dst, mpath->sdata->u.mesh.num_gates);
474 		break;
475 	}
476 }
477 
478 /**
479  * mesh_gate_num - number of gates known to this interface
480  * @sdata: subif data
481  */
482 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
483 {
484 	return sdata->u.mesh.num_gates;
485 }
486 
487 /**
488  * mesh_path_add - allocate and add a new path to the mesh path table
489  * @dst: destination address of the path (ETH_ALEN length)
490  * @sdata: local subif
491  *
492  * Returns: 0 on success
493  *
494  * State: the initial state of the new path is set to 0
495  */
496 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
497 				const u8 *dst)
498 {
499 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
500 	struct ieee80211_local *local = sdata->local;
501 	struct mesh_table *tbl;
502 	struct mesh_path *mpath, *new_mpath;
503 	struct mpath_node *node, *new_node;
504 	struct hlist_head *bucket;
505 	int grow = 0;
506 	int err;
507 	u32 hash_idx;
508 
509 	if (ether_addr_equal(dst, sdata->vif.addr))
510 		/* never add ourselves as neighbours */
511 		return ERR_PTR(-ENOTSUPP);
512 
513 	if (is_multicast_ether_addr(dst))
514 		return ERR_PTR(-ENOTSUPP);
515 
516 	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
517 		return ERR_PTR(-ENOSPC);
518 
519 	read_lock_bh(&pathtbl_resize_lock);
520 	tbl = resize_dereference_mesh_paths();
521 
522 	hash_idx = mesh_table_hash(dst, sdata, tbl);
523 	bucket = &tbl->hash_buckets[hash_idx];
524 
525 	spin_lock(&tbl->hashwlock[hash_idx]);
526 
527 	hlist_for_each_entry(node, bucket, list) {
528 		mpath = node->mpath;
529 		if (mpath->sdata == sdata &&
530 		    ether_addr_equal(dst, mpath->dst))
531 			goto found;
532 	}
533 
534 	err = -ENOMEM;
535 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
536 	if (!new_mpath)
537 		goto err_path_alloc;
538 
539 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
540 	if (!new_node)
541 		goto err_node_alloc;
542 
543 	memcpy(new_mpath->dst, dst, ETH_ALEN);
544 	eth_broadcast_addr(new_mpath->rann_snd_addr);
545 	new_mpath->is_root = false;
546 	new_mpath->sdata = sdata;
547 	new_mpath->flags = 0;
548 	skb_queue_head_init(&new_mpath->frame_queue);
549 	new_node->mpath = new_mpath;
550 	new_mpath->timer.data = (unsigned long) new_mpath;
551 	new_mpath->timer.function = mesh_path_timer;
552 	new_mpath->exp_time = jiffies;
553 	spin_lock_init(&new_mpath->state_lock);
554 	init_timer(&new_mpath->timer);
555 
556 	hlist_add_head_rcu(&new_node->list, bucket);
557 	if (atomic_inc_return(&tbl->entries) >=
558 	    tbl->mean_chain_len * (tbl->hash_mask + 1))
559 		grow = 1;
560 
561 	mesh_paths_generation++;
562 
563 	if (grow) {
564 		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
565 		ieee80211_queue_work(&local->hw, &sdata->work);
566 	}
567 	mpath = new_mpath;
568 found:
569 	spin_unlock(&tbl->hashwlock[hash_idx]);
570 	read_unlock_bh(&pathtbl_resize_lock);
571 	return mpath;
572 
573 err_node_alloc:
574 	kfree(new_mpath);
575 err_path_alloc:
576 	atomic_dec(&sdata->u.mesh.mpaths);
577 	spin_unlock(&tbl->hashwlock[hash_idx]);
578 	read_unlock_bh(&pathtbl_resize_lock);
579 	return ERR_PTR(err);
580 }
581 
582 static void mesh_table_free_rcu(struct rcu_head *rcu)
583 {
584 	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
585 
586 	mesh_table_free(tbl, false);
587 }
588 
589 void mesh_mpath_table_grow(void)
590 {
591 	struct mesh_table *oldtbl, *newtbl;
592 
593 	write_lock_bh(&pathtbl_resize_lock);
594 	oldtbl = resize_dereference_mesh_paths();
595 	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
596 	if (!newtbl)
597 		goto out;
598 	if (mesh_table_grow(oldtbl, newtbl) < 0) {
599 		__mesh_table_free(newtbl);
600 		goto out;
601 	}
602 	rcu_assign_pointer(mesh_paths, newtbl);
603 
604 	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
605 
606  out:
607 	write_unlock_bh(&pathtbl_resize_lock);
608 }
609 
610 void mesh_mpp_table_grow(void)
611 {
612 	struct mesh_table *oldtbl, *newtbl;
613 
614 	write_lock_bh(&pathtbl_resize_lock);
615 	oldtbl = resize_dereference_mpp_paths();
616 	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
617 	if (!newtbl)
618 		goto out;
619 	if (mesh_table_grow(oldtbl, newtbl) < 0) {
620 		__mesh_table_free(newtbl);
621 		goto out;
622 	}
623 	rcu_assign_pointer(mpp_paths, newtbl);
624 	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
625 
626  out:
627 	write_unlock_bh(&pathtbl_resize_lock);
628 }
629 
630 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
631 		 const u8 *dst, const u8 *mpp)
632 {
633 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
634 	struct ieee80211_local *local = sdata->local;
635 	struct mesh_table *tbl;
636 	struct mesh_path *mpath, *new_mpath;
637 	struct mpath_node *node, *new_node;
638 	struct hlist_head *bucket;
639 	int grow = 0;
640 	int err = 0;
641 	u32 hash_idx;
642 
643 	if (ether_addr_equal(dst, sdata->vif.addr))
644 		/* never add ourselves as neighbours */
645 		return -ENOTSUPP;
646 
647 	if (is_multicast_ether_addr(dst))
648 		return -ENOTSUPP;
649 
650 	err = -ENOMEM;
651 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
652 	if (!new_mpath)
653 		goto err_path_alloc;
654 
655 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
656 	if (!new_node)
657 		goto err_node_alloc;
658 
659 	read_lock_bh(&pathtbl_resize_lock);
660 	memcpy(new_mpath->dst, dst, ETH_ALEN);
661 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
662 	new_mpath->sdata = sdata;
663 	new_mpath->flags = 0;
664 	skb_queue_head_init(&new_mpath->frame_queue);
665 	new_node->mpath = new_mpath;
666 	init_timer(&new_mpath->timer);
667 	new_mpath->exp_time = jiffies;
668 	spin_lock_init(&new_mpath->state_lock);
669 
670 	tbl = resize_dereference_mpp_paths();
671 
672 	hash_idx = mesh_table_hash(dst, sdata, tbl);
673 	bucket = &tbl->hash_buckets[hash_idx];
674 
675 	spin_lock(&tbl->hashwlock[hash_idx]);
676 
677 	err = -EEXIST;
678 	hlist_for_each_entry(node, bucket, list) {
679 		mpath = node->mpath;
680 		if (mpath->sdata == sdata &&
681 		    ether_addr_equal(dst, mpath->dst))
682 			goto err_exists;
683 	}
684 
685 	hlist_add_head_rcu(&new_node->list, bucket);
686 	if (atomic_inc_return(&tbl->entries) >=
687 	    tbl->mean_chain_len * (tbl->hash_mask + 1))
688 		grow = 1;
689 
690 	spin_unlock(&tbl->hashwlock[hash_idx]);
691 	read_unlock_bh(&pathtbl_resize_lock);
692 	if (grow) {
693 		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
694 		ieee80211_queue_work(&local->hw, &sdata->work);
695 	}
696 	return 0;
697 
698 err_exists:
699 	spin_unlock(&tbl->hashwlock[hash_idx]);
700 	read_unlock_bh(&pathtbl_resize_lock);
701 	kfree(new_node);
702 err_node_alloc:
703 	kfree(new_mpath);
704 err_path_alloc:
705 	return err;
706 }
707 
708 
709 /**
710  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
711  *
712  * @sta: broken peer link
713  *
714  * This function must be called from the rate control algorithm if enough
715  * delivery errors suggest that a peer link is no longer usable.
716  */
717 void mesh_plink_broken(struct sta_info *sta)
718 {
719 	struct mesh_table *tbl;
720 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
721 	struct mesh_path *mpath;
722 	struct mpath_node *node;
723 	struct ieee80211_sub_if_data *sdata = sta->sdata;
724 	int i;
725 
726 	rcu_read_lock();
727 	tbl = rcu_dereference(mesh_paths);
728 	for_each_mesh_entry(tbl, node, i) {
729 		mpath = node->mpath;
730 		if (rcu_dereference(mpath->next_hop) == sta &&
731 		    mpath->flags & MESH_PATH_ACTIVE &&
732 		    !(mpath->flags & MESH_PATH_FIXED)) {
733 			spin_lock_bh(&mpath->state_lock);
734 			mpath->flags &= ~MESH_PATH_ACTIVE;
735 			++mpath->sn;
736 			spin_unlock_bh(&mpath->state_lock);
737 			mesh_path_error_tx(sdata,
738 				sdata->u.mesh.mshcfg.element_ttl,
739 				mpath->dst, mpath->sn,
740 				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
741 		}
742 	}
743 	rcu_read_unlock();
744 }
745 
746 static void mesh_path_node_reclaim(struct rcu_head *rp)
747 {
748 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
749 	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
750 
751 	del_timer_sync(&node->mpath->timer);
752 	atomic_dec(&sdata->u.mesh.mpaths);
753 	kfree(node->mpath);
754 	kfree(node);
755 }
756 
757 /* needs to be called with the corresponding hashwlock taken */
758 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
759 {
760 	struct mesh_path *mpath;
761 	mpath = node->mpath;
762 	spin_lock(&mpath->state_lock);
763 	mpath->flags |= MESH_PATH_RESOLVING;
764 	if (mpath->is_gate)
765 		mesh_gate_del(tbl, mpath);
766 	hlist_del_rcu(&node->list);
767 	call_rcu(&node->rcu, mesh_path_node_reclaim);
768 	spin_unlock(&mpath->state_lock);
769 	atomic_dec(&tbl->entries);
770 }
771 
772 /**
773  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
774  *
775  * @sta: mesh peer to match
776  *
777  * RCU notes: this function is called when a mesh plink transitions from
778  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
779  * allows path creation. This will happen before the sta can be freed (because
780  * sta_info_destroy() calls this) so any reader in a rcu read block will be
781  * protected against the plink disappearing.
782  */
783 void mesh_path_flush_by_nexthop(struct sta_info *sta)
784 {
785 	struct mesh_table *tbl;
786 	struct mesh_path *mpath;
787 	struct mpath_node *node;
788 	int i;
789 
790 	rcu_read_lock();
791 	read_lock_bh(&pathtbl_resize_lock);
792 	tbl = resize_dereference_mesh_paths();
793 	for_each_mesh_entry(tbl, node, i) {
794 		mpath = node->mpath;
795 		if (rcu_dereference(mpath->next_hop) == sta) {
796 			spin_lock(&tbl->hashwlock[i]);
797 			__mesh_path_del(tbl, node);
798 			spin_unlock(&tbl->hashwlock[i]);
799 		}
800 	}
801 	read_unlock_bh(&pathtbl_resize_lock);
802 	rcu_read_unlock();
803 }
804 
805 static void table_flush_by_iface(struct mesh_table *tbl,
806 				 struct ieee80211_sub_if_data *sdata)
807 {
808 	struct mesh_path *mpath;
809 	struct mpath_node *node;
810 	int i;
811 
812 	WARN_ON(!rcu_read_lock_held());
813 	for_each_mesh_entry(tbl, node, i) {
814 		mpath = node->mpath;
815 		if (mpath->sdata != sdata)
816 			continue;
817 		spin_lock_bh(&tbl->hashwlock[i]);
818 		__mesh_path_del(tbl, node);
819 		spin_unlock_bh(&tbl->hashwlock[i]);
820 	}
821 }
822 
823 /**
824  * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
825  *
826  * This function deletes both mesh paths as well as mesh portal paths.
827  *
828  * @sdata: interface data to match
829  *
830  */
831 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
832 {
833 	struct mesh_table *tbl;
834 
835 	rcu_read_lock();
836 	read_lock_bh(&pathtbl_resize_lock);
837 	tbl = resize_dereference_mesh_paths();
838 	table_flush_by_iface(tbl, sdata);
839 	tbl = resize_dereference_mpp_paths();
840 	table_flush_by_iface(tbl, sdata);
841 	read_unlock_bh(&pathtbl_resize_lock);
842 	rcu_read_unlock();
843 }
844 
845 /**
846  * mesh_path_del - delete a mesh path from the table
847  *
848  * @addr: dst address (ETH_ALEN length)
849  * @sdata: local subif
850  *
851  * Returns: 0 if successful
852  */
853 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
854 {
855 	struct mesh_table *tbl;
856 	struct mesh_path *mpath;
857 	struct mpath_node *node;
858 	struct hlist_head *bucket;
859 	int hash_idx;
860 	int err = 0;
861 
862 	read_lock_bh(&pathtbl_resize_lock);
863 	tbl = resize_dereference_mesh_paths();
864 	hash_idx = mesh_table_hash(addr, sdata, tbl);
865 	bucket = &tbl->hash_buckets[hash_idx];
866 
867 	spin_lock(&tbl->hashwlock[hash_idx]);
868 	hlist_for_each_entry(node, bucket, list) {
869 		mpath = node->mpath;
870 		if (mpath->sdata == sdata &&
871 		    ether_addr_equal(addr, mpath->dst)) {
872 			__mesh_path_del(tbl, node);
873 			goto enddel;
874 		}
875 	}
876 
877 	err = -ENXIO;
878 enddel:
879 	mesh_paths_generation++;
880 	spin_unlock(&tbl->hashwlock[hash_idx]);
881 	read_unlock_bh(&pathtbl_resize_lock);
882 	return err;
883 }
884 
885 /**
886  * mesh_path_tx_pending - sends pending frames in a mesh path queue
887  *
888  * @mpath: mesh path to activate
889  *
890  * Locking: the state_lock of the mpath structure must NOT be held when calling
891  * this function.
892  */
893 void mesh_path_tx_pending(struct mesh_path *mpath)
894 {
895 	if (mpath->flags & MESH_PATH_ACTIVE)
896 		ieee80211_add_pending_skbs(mpath->sdata->local,
897 				&mpath->frame_queue);
898 }
899 
900 /**
901  * mesh_path_send_to_gates - sends pending frames to all known mesh gates
902  *
903  * @mpath: mesh path whose queue will be emptied
904  *
905  * If there is only one gate, the frames are transferred from the failed mpath
906  * queue to that gate's queue.  If there are more than one gates, the frames
907  * are copied from each gate to the next.  After frames are copied, the
908  * mpath queues are emptied onto the transmission queue.
909  */
910 int mesh_path_send_to_gates(struct mesh_path *mpath)
911 {
912 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
913 	struct mesh_table *tbl;
914 	struct mesh_path *from_mpath = mpath;
915 	struct mpath_node *gate = NULL;
916 	bool copy = false;
917 	struct hlist_head *known_gates;
918 
919 	rcu_read_lock();
920 	tbl = rcu_dereference(mesh_paths);
921 	known_gates = tbl->known_gates;
922 	rcu_read_unlock();
923 
924 	if (!known_gates)
925 		return -EHOSTUNREACH;
926 
927 	hlist_for_each_entry_rcu(gate, known_gates, list) {
928 		if (gate->mpath->sdata != sdata)
929 			continue;
930 
931 		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
932 			mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
933 			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
934 			from_mpath = gate->mpath;
935 			copy = true;
936 		} else {
937 			mpath_dbg(sdata,
938 				  "Not forwarding %p (flags %#x)\n",
939 				  gate->mpath, gate->mpath->flags);
940 		}
941 	}
942 
943 	hlist_for_each_entry_rcu(gate, known_gates, list)
944 		if (gate->mpath->sdata == sdata) {
945 			mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
946 			mesh_path_tx_pending(gate->mpath);
947 		}
948 
949 	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
950 }
951 
952 /**
953  * mesh_path_discard_frame - discard a frame whose path could not be resolved
954  *
955  * @skb: frame to discard
956  * @sdata: network subif the frame was to be sent through
957  *
958  * Locking: the function must me called within a rcu_read_lock region
959  */
960 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
961 			     struct sk_buff *skb)
962 {
963 	kfree_skb(skb);
964 	sdata->u.mesh.mshstats.dropped_frames_no_route++;
965 }
966 
967 /**
968  * mesh_path_flush_pending - free the pending queue of a mesh path
969  *
970  * @mpath: mesh path whose queue has to be freed
971  *
972  * Locking: the function must me called within a rcu_read_lock region
973  */
974 void mesh_path_flush_pending(struct mesh_path *mpath)
975 {
976 	struct sk_buff *skb;
977 
978 	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
979 		mesh_path_discard_frame(mpath->sdata, skb);
980 }
981 
982 /**
983  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
984  *
985  * @mpath: the mesh path to modify
986  * @next_hop: the next hop to force
987  *
988  * Locking: this function must be called holding mpath->state_lock
989  */
990 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
991 {
992 	spin_lock_bh(&mpath->state_lock);
993 	mesh_path_assign_nexthop(mpath, next_hop);
994 	mpath->sn = 0xffff;
995 	mpath->metric = 0;
996 	mpath->hop_count = 0;
997 	mpath->exp_time = 0;
998 	mpath->flags |= MESH_PATH_FIXED;
999 	mesh_path_activate(mpath);
1000 	spin_unlock_bh(&mpath->state_lock);
1001 	mesh_path_tx_pending(mpath);
1002 }
1003 
1004 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1005 {
1006 	struct mesh_path *mpath;
1007 	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1008 	mpath = node->mpath;
1009 	hlist_del_rcu(p);
1010 	if (free_leafs) {
1011 		del_timer_sync(&mpath->timer);
1012 		kfree(mpath);
1013 	}
1014 	kfree(node);
1015 }
1016 
1017 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1018 {
1019 	struct mesh_path *mpath;
1020 	struct mpath_node *node, *new_node;
1021 	u32 hash_idx;
1022 
1023 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1024 	if (new_node == NULL)
1025 		return -ENOMEM;
1026 
1027 	node = hlist_entry(p, struct mpath_node, list);
1028 	mpath = node->mpath;
1029 	new_node->mpath = mpath;
1030 	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1031 	hlist_add_head(&new_node->list,
1032 			&newtbl->hash_buckets[hash_idx]);
1033 	return 0;
1034 }
1035 
1036 int mesh_pathtbl_init(void)
1037 {
1038 	struct mesh_table *tbl_path, *tbl_mpp;
1039 	int ret;
1040 
1041 	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1042 	if (!tbl_path)
1043 		return -ENOMEM;
1044 	tbl_path->free_node = &mesh_path_node_free;
1045 	tbl_path->copy_node = &mesh_path_node_copy;
1046 	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1047 	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1048 	if (!tbl_path->known_gates) {
1049 		ret = -ENOMEM;
1050 		goto free_path;
1051 	}
1052 	INIT_HLIST_HEAD(tbl_path->known_gates);
1053 
1054 
1055 	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1056 	if (!tbl_mpp) {
1057 		ret = -ENOMEM;
1058 		goto free_path;
1059 	}
1060 	tbl_mpp->free_node = &mesh_path_node_free;
1061 	tbl_mpp->copy_node = &mesh_path_node_copy;
1062 	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1063 	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1064 	if (!tbl_mpp->known_gates) {
1065 		ret = -ENOMEM;
1066 		goto free_mpp;
1067 	}
1068 	INIT_HLIST_HEAD(tbl_mpp->known_gates);
1069 
1070 	/* Need no locking since this is during init */
1071 	RCU_INIT_POINTER(mesh_paths, tbl_path);
1072 	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1073 
1074 	return 0;
1075 
1076 free_mpp:
1077 	mesh_table_free(tbl_mpp, true);
1078 free_path:
1079 	mesh_table_free(tbl_path, true);
1080 	return ret;
1081 }
1082 
1083 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1084 {
1085 	struct mesh_table *tbl;
1086 	struct mesh_path *mpath;
1087 	struct mpath_node *node;
1088 	int i;
1089 
1090 	rcu_read_lock();
1091 	tbl = rcu_dereference(mesh_paths);
1092 	for_each_mesh_entry(tbl, node, i) {
1093 		if (node->mpath->sdata != sdata)
1094 			continue;
1095 		mpath = node->mpath;
1096 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1097 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1098 		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1099 			mesh_path_del(mpath->sdata, mpath->dst);
1100 	}
1101 	rcu_read_unlock();
1102 }
1103 
1104 void mesh_pathtbl_unregister(void)
1105 {
1106 	/* no need for locking during exit path */
1107 	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1108 	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1109 }
1110