xref: /openbmc/linux/net/mac80211/mesh_pathtbl.c (revision a09d2831)
1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <net/mac80211.h>
16 #include "ieee80211_i.h"
17 #include "mesh.h"
18 
19 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
20 #define INIT_PATHS_SIZE_ORDER	2
21 
22 /* Keep the mean chain length below this constant */
23 #define MEAN_CHAIN_LEN		2
24 
25 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
26 				time_after(jiffies, mpath->exp_time) && \
27 				!(mpath->flags & MESH_PATH_FIXED))
28 
29 struct mpath_node {
30 	struct hlist_node list;
31 	struct rcu_head rcu;
32 	/* This indirection allows two different tables to point to the same
33 	 * mesh_path structure, useful when resizing
34 	 */
35 	struct mesh_path *mpath;
36 };
37 
38 static struct mesh_table *mesh_paths;
39 static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
40 
41 int mesh_paths_generation;
42 static void __mesh_table_free(struct mesh_table *tbl)
43 {
44 	kfree(tbl->hash_buckets);
45 	kfree(tbl->hashwlock);
46 	kfree(tbl);
47 }
48 
49 void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
50 {
51 	struct hlist_head *mesh_hash;
52 	struct hlist_node *p, *q;
53 	int i;
54 
55 	mesh_hash = tbl->hash_buckets;
56 	for (i = 0; i <= tbl->hash_mask; i++) {
57 		spin_lock(&tbl->hashwlock[i]);
58 		hlist_for_each_safe(p, q, &mesh_hash[i]) {
59 			tbl->free_node(p, free_leafs);
60 			atomic_dec(&tbl->entries);
61 		}
62 		spin_unlock(&tbl->hashwlock[i]);
63 	}
64 	__mesh_table_free(tbl);
65 }
66 
67 static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
68 {
69 	struct mesh_table *newtbl;
70 	struct hlist_head *oldhash;
71 	struct hlist_node *p, *q;
72 	int i;
73 
74 	if (atomic_read(&tbl->entries)
75 			< tbl->mean_chain_len * (tbl->hash_mask + 1))
76 		goto endgrow;
77 
78 	newtbl = mesh_table_alloc(tbl->size_order + 1);
79 	if (!newtbl)
80 		goto endgrow;
81 
82 	newtbl->free_node = tbl->free_node;
83 	newtbl->mean_chain_len = tbl->mean_chain_len;
84 	newtbl->copy_node = tbl->copy_node;
85 	atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
86 
87 	oldhash = tbl->hash_buckets;
88 	for (i = 0; i <= tbl->hash_mask; i++)
89 		hlist_for_each(p, &oldhash[i])
90 			if (tbl->copy_node(p, newtbl) < 0)
91 				goto errcopy;
92 
93 	return newtbl;
94 
95 errcopy:
96 	for (i = 0; i <= newtbl->hash_mask; i++) {
97 		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
98 			tbl->free_node(p, 0);
99 	}
100 	__mesh_table_free(newtbl);
101 endgrow:
102 	return NULL;
103 }
104 
105 
106 /* This lock will have the grow table function as writer and add / delete nodes
107  * as readers. When reading the table (i.e. doing lookups) we are well protected
108  * by RCU
109  */
110 static DEFINE_RWLOCK(pathtbl_resize_lock);
111 
112 /**
113  *
114  * mesh_path_assign_nexthop - update mesh path next hop
115  *
116  * @mpath: mesh path to update
117  * @sta: next hop to assign
118  *
119  * Locking: mpath->state_lock must be held when calling this function
120  */
121 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
122 {
123 	struct sk_buff *skb;
124 	struct ieee80211_hdr *hdr;
125 	struct sk_buff_head tmpq;
126 	unsigned long flags;
127 
128 	rcu_assign_pointer(mpath->next_hop, sta);
129 
130 	__skb_queue_head_init(&tmpq);
131 
132 	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
133 
134 	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
135 		hdr = (struct ieee80211_hdr *) skb->data;
136 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
137 		__skb_queue_tail(&tmpq, skb);
138 	}
139 
140 	skb_queue_splice(&tmpq, &mpath->frame_queue);
141 	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
142 }
143 
144 
145 /**
146  * mesh_path_lookup - look up a path in the mesh path table
147  * @dst: hardware address (ETH_ALEN length) of destination
148  * @sdata: local subif
149  *
150  * Returns: pointer to the mesh path structure, or NULL if not found
151  *
152  * Locking: must be called within a read rcu section.
153  */
154 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
155 {
156 	struct mesh_path *mpath;
157 	struct hlist_node *n;
158 	struct hlist_head *bucket;
159 	struct mesh_table *tbl;
160 	struct mpath_node *node;
161 
162 	tbl = rcu_dereference(mesh_paths);
163 
164 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
165 	hlist_for_each_entry_rcu(node, n, bucket, list) {
166 		mpath = node->mpath;
167 		if (mpath->sdata == sdata &&
168 				memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
169 			if (MPATH_EXPIRED(mpath)) {
170 				spin_lock_bh(&mpath->state_lock);
171 				if (MPATH_EXPIRED(mpath))
172 					mpath->flags &= ~MESH_PATH_ACTIVE;
173 				spin_unlock_bh(&mpath->state_lock);
174 			}
175 			return mpath;
176 		}
177 	}
178 	return NULL;
179 }
180 
181 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
182 {
183 	struct mesh_path *mpath;
184 	struct hlist_node *n;
185 	struct hlist_head *bucket;
186 	struct mesh_table *tbl;
187 	struct mpath_node *node;
188 
189 	tbl = rcu_dereference(mpp_paths);
190 
191 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
192 	hlist_for_each_entry_rcu(node, n, bucket, list) {
193 		mpath = node->mpath;
194 		if (mpath->sdata == sdata &&
195 		    memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
196 			if (MPATH_EXPIRED(mpath)) {
197 				spin_lock_bh(&mpath->state_lock);
198 				if (MPATH_EXPIRED(mpath))
199 					mpath->flags &= ~MESH_PATH_ACTIVE;
200 				spin_unlock_bh(&mpath->state_lock);
201 			}
202 			return mpath;
203 		}
204 	}
205 	return NULL;
206 }
207 
208 
209 /**
210  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
211  * @idx: index
212  * @sdata: local subif, or NULL for all entries
213  *
214  * Returns: pointer to the mesh path structure, or NULL if not found.
215  *
216  * Locking: must be called within a read rcu section.
217  */
218 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
219 {
220 	struct mpath_node *node;
221 	struct hlist_node *p;
222 	int i;
223 	int j = 0;
224 
225 	for_each_mesh_entry(mesh_paths, p, node, i) {
226 		if (sdata && node->mpath->sdata != sdata)
227 			continue;
228 		if (j++ == idx) {
229 			if (MPATH_EXPIRED(node->mpath)) {
230 				spin_lock_bh(&node->mpath->state_lock);
231 				if (MPATH_EXPIRED(node->mpath))
232 					node->mpath->flags &= ~MESH_PATH_ACTIVE;
233 				spin_unlock_bh(&node->mpath->state_lock);
234 			}
235 			return node->mpath;
236 		}
237 	}
238 
239 	return NULL;
240 }
241 
242 /**
243  * mesh_path_add - allocate and add a new path to the mesh path table
244  * @addr: destination address of the path (ETH_ALEN length)
245  * @sdata: local subif
246  *
247  * Returns: 0 on success
248  *
249  * State: the initial state of the new path is set to 0
250  */
251 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
252 {
253 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
254 	struct ieee80211_local *local = sdata->local;
255 	struct mesh_path *mpath, *new_mpath;
256 	struct mpath_node *node, *new_node;
257 	struct hlist_head *bucket;
258 	struct hlist_node *n;
259 	int grow = 0;
260 	int err = 0;
261 	u32 hash_idx;
262 
263 	if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
264 		/* never add ourselves as neighbours */
265 		return -ENOTSUPP;
266 
267 	if (is_multicast_ether_addr(dst))
268 		return -ENOTSUPP;
269 
270 	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
271 		return -ENOSPC;
272 
273 	err = -ENOMEM;
274 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
275 	if (!new_mpath)
276 		goto err_path_alloc;
277 
278 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
279 	if (!new_node)
280 		goto err_node_alloc;
281 
282 	read_lock(&pathtbl_resize_lock);
283 	memcpy(new_mpath->dst, dst, ETH_ALEN);
284 	new_mpath->sdata = sdata;
285 	new_mpath->flags = 0;
286 	skb_queue_head_init(&new_mpath->frame_queue);
287 	new_node->mpath = new_mpath;
288 	new_mpath->timer.data = (unsigned long) new_mpath;
289 	new_mpath->timer.function = mesh_path_timer;
290 	new_mpath->exp_time = jiffies;
291 	spin_lock_init(&new_mpath->state_lock);
292 	init_timer(&new_mpath->timer);
293 
294 	hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
295 	bucket = &mesh_paths->hash_buckets[hash_idx];
296 
297 	spin_lock(&mesh_paths->hashwlock[hash_idx]);
298 
299 	err = -EEXIST;
300 	hlist_for_each_entry(node, n, bucket, list) {
301 		mpath = node->mpath;
302 		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
303 			goto err_exists;
304 	}
305 
306 	hlist_add_head_rcu(&new_node->list, bucket);
307 	if (atomic_inc_return(&mesh_paths->entries) >=
308 		mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
309 		grow = 1;
310 
311 	mesh_paths_generation++;
312 
313 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
314 	read_unlock(&pathtbl_resize_lock);
315 	if (grow) {
316 		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
317 		ieee80211_queue_work(&local->hw, &ifmsh->work);
318 	}
319 	return 0;
320 
321 err_exists:
322 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
323 	read_unlock(&pathtbl_resize_lock);
324 	kfree(new_node);
325 err_node_alloc:
326 	kfree(new_mpath);
327 err_path_alloc:
328 	atomic_dec(&sdata->u.mesh.mpaths);
329 	return err;
330 }
331 
332 void mesh_mpath_table_grow(void)
333 {
334 	struct mesh_table *oldtbl, *newtbl;
335 
336 	write_lock(&pathtbl_resize_lock);
337 	oldtbl = mesh_paths;
338 	newtbl = mesh_table_grow(mesh_paths);
339 	if (!newtbl) {
340 		write_unlock(&pathtbl_resize_lock);
341 		return;
342 	}
343 	rcu_assign_pointer(mesh_paths, newtbl);
344 	write_unlock(&pathtbl_resize_lock);
345 
346 	synchronize_rcu();
347 	mesh_table_free(oldtbl, false);
348 }
349 
350 void mesh_mpp_table_grow(void)
351 {
352 	struct mesh_table *oldtbl, *newtbl;
353 
354 	write_lock(&pathtbl_resize_lock);
355 	oldtbl = mpp_paths;
356 	newtbl = mesh_table_grow(mpp_paths);
357 	if (!newtbl) {
358 		write_unlock(&pathtbl_resize_lock);
359 		return;
360 	}
361 	rcu_assign_pointer(mpp_paths, newtbl);
362 	write_unlock(&pathtbl_resize_lock);
363 
364 	synchronize_rcu();
365 	mesh_table_free(oldtbl, false);
366 }
367 
368 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
369 {
370 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
371 	struct ieee80211_local *local = sdata->local;
372 	struct mesh_path *mpath, *new_mpath;
373 	struct mpath_node *node, *new_node;
374 	struct hlist_head *bucket;
375 	struct hlist_node *n;
376 	int grow = 0;
377 	int err = 0;
378 	u32 hash_idx;
379 
380 	if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
381 		/* never add ourselves as neighbours */
382 		return -ENOTSUPP;
383 
384 	if (is_multicast_ether_addr(dst))
385 		return -ENOTSUPP;
386 
387 	err = -ENOMEM;
388 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
389 	if (!new_mpath)
390 		goto err_path_alloc;
391 
392 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
393 	if (!new_node)
394 		goto err_node_alloc;
395 
396 	read_lock(&pathtbl_resize_lock);
397 	memcpy(new_mpath->dst, dst, ETH_ALEN);
398 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
399 	new_mpath->sdata = sdata;
400 	new_mpath->flags = 0;
401 	skb_queue_head_init(&new_mpath->frame_queue);
402 	new_node->mpath = new_mpath;
403 	new_mpath->exp_time = jiffies;
404 	spin_lock_init(&new_mpath->state_lock);
405 
406 	hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
407 	bucket = &mpp_paths->hash_buckets[hash_idx];
408 
409 	spin_lock(&mpp_paths->hashwlock[hash_idx]);
410 
411 	err = -EEXIST;
412 	hlist_for_each_entry(node, n, bucket, list) {
413 		mpath = node->mpath;
414 		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
415 			goto err_exists;
416 	}
417 
418 	hlist_add_head_rcu(&new_node->list, bucket);
419 	if (atomic_inc_return(&mpp_paths->entries) >=
420 		mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
421 		grow = 1;
422 
423 	spin_unlock(&mpp_paths->hashwlock[hash_idx]);
424 	read_unlock(&pathtbl_resize_lock);
425 	if (grow) {
426 		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
427 		ieee80211_queue_work(&local->hw, &ifmsh->work);
428 	}
429 	return 0;
430 
431 err_exists:
432 	spin_unlock(&mpp_paths->hashwlock[hash_idx]);
433 	read_unlock(&pathtbl_resize_lock);
434 	kfree(new_node);
435 err_node_alloc:
436 	kfree(new_mpath);
437 err_path_alloc:
438 	return err;
439 }
440 
441 
442 /**
443  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
444  *
445  * @sta: broken peer link
446  *
447  * This function must be called from the rate control algorithm if enough
448  * delivery errors suggest that a peer link is no longer usable.
449  */
450 void mesh_plink_broken(struct sta_info *sta)
451 {
452 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
453 	struct mesh_path *mpath;
454 	struct mpath_node *node;
455 	struct hlist_node *p;
456 	struct ieee80211_sub_if_data *sdata = sta->sdata;
457 	int i;
458 
459 	rcu_read_lock();
460 	for_each_mesh_entry(mesh_paths, p, node, i) {
461 		mpath = node->mpath;
462 		spin_lock_bh(&mpath->state_lock);
463 		if (mpath->next_hop == sta &&
464 		    mpath->flags & MESH_PATH_ACTIVE &&
465 		    !(mpath->flags & MESH_PATH_FIXED)) {
466 			mpath->flags &= ~MESH_PATH_ACTIVE;
467 			++mpath->sn;
468 			spin_unlock_bh(&mpath->state_lock);
469 			mesh_path_error_tx(MESH_TTL, mpath->dst,
470 					cpu_to_le32(mpath->sn),
471 					cpu_to_le16(PERR_RCODE_DEST_UNREACH),
472 					bcast, sdata);
473 		} else
474 		spin_unlock_bh(&mpath->state_lock);
475 	}
476 	rcu_read_unlock();
477 }
478 
479 /**
480  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
481  *
482  * @sta - mesh peer to match
483  *
484  * RCU notes: this function is called when a mesh plink transitions from
485  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
486  * allows path creation. This will happen before the sta can be freed (because
487  * sta_info_destroy() calls this) so any reader in a rcu read block will be
488  * protected against the plink disappearing.
489  */
490 void mesh_path_flush_by_nexthop(struct sta_info *sta)
491 {
492 	struct mesh_path *mpath;
493 	struct mpath_node *node;
494 	struct hlist_node *p;
495 	int i;
496 
497 	for_each_mesh_entry(mesh_paths, p, node, i) {
498 		mpath = node->mpath;
499 		if (mpath->next_hop == sta)
500 			mesh_path_del(mpath->dst, mpath->sdata);
501 	}
502 }
503 
504 void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
505 {
506 	struct mesh_path *mpath;
507 	struct mpath_node *node;
508 	struct hlist_node *p;
509 	int i;
510 
511 	for_each_mesh_entry(mesh_paths, p, node, i) {
512 		mpath = node->mpath;
513 		if (mpath->sdata == sdata)
514 			mesh_path_del(mpath->dst, mpath->sdata);
515 	}
516 }
517 
518 static void mesh_path_node_reclaim(struct rcu_head *rp)
519 {
520 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
521 	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
522 
523 	del_timer_sync(&node->mpath->timer);
524 	atomic_dec(&sdata->u.mesh.mpaths);
525 	kfree(node->mpath);
526 	kfree(node);
527 }
528 
529 /**
530  * mesh_path_del - delete a mesh path from the table
531  *
532  * @addr: dst address (ETH_ALEN length)
533  * @sdata: local subif
534  *
535  * Returns: 0 if successful
536  */
537 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
538 {
539 	struct mesh_path *mpath;
540 	struct mpath_node *node;
541 	struct hlist_head *bucket;
542 	struct hlist_node *n;
543 	int hash_idx;
544 	int err = 0;
545 
546 	read_lock(&pathtbl_resize_lock);
547 	hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
548 	bucket = &mesh_paths->hash_buckets[hash_idx];
549 
550 	spin_lock(&mesh_paths->hashwlock[hash_idx]);
551 	hlist_for_each_entry(node, n, bucket, list) {
552 		mpath = node->mpath;
553 		if (mpath->sdata == sdata &&
554 				memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
555 			spin_lock_bh(&mpath->state_lock);
556 			mpath->flags |= MESH_PATH_RESOLVING;
557 			hlist_del_rcu(&node->list);
558 			call_rcu(&node->rcu, mesh_path_node_reclaim);
559 			atomic_dec(&mesh_paths->entries);
560 			spin_unlock_bh(&mpath->state_lock);
561 			goto enddel;
562 		}
563 	}
564 
565 	err = -ENXIO;
566 enddel:
567 	mesh_paths_generation++;
568 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
569 	read_unlock(&pathtbl_resize_lock);
570 	return err;
571 }
572 
573 /**
574  * mesh_path_tx_pending - sends pending frames in a mesh path queue
575  *
576  * @mpath: mesh path to activate
577  *
578  * Locking: the state_lock of the mpath structure must NOT be held when calling
579  * this function.
580  */
581 void mesh_path_tx_pending(struct mesh_path *mpath)
582 {
583 	if (mpath->flags & MESH_PATH_ACTIVE)
584 		ieee80211_add_pending_skbs(mpath->sdata->local,
585 				&mpath->frame_queue);
586 }
587 
588 /**
589  * mesh_path_discard_frame - discard a frame whose path could not be resolved
590  *
591  * @skb: frame to discard
592  * @sdata: network subif the frame was to be sent through
593  *
594  * If the frame was being forwarded from another MP, a PERR frame will be sent
595  * to the precursor.  The precursor's address (i.e. the previous hop) was saved
596  * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
597  * the destination is successfully resolved.
598  *
599  * Locking: the function must me called within a rcu_read_lock region
600  */
601 void mesh_path_discard_frame(struct sk_buff *skb,
602 			     struct ieee80211_sub_if_data *sdata)
603 {
604 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
605 	struct mesh_path *mpath;
606 	u32 sn = 0;
607 
608 	if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
609 		u8 *ra, *da;
610 
611 		da = hdr->addr3;
612 		ra = hdr->addr1;
613 		mpath = mesh_path_lookup(da, sdata);
614 		if (mpath)
615 			sn = ++mpath->sn;
616 		mesh_path_error_tx(MESH_TTL, skb->data, cpu_to_le32(sn),
617 				   cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
618 	}
619 
620 	kfree_skb(skb);
621 	sdata->u.mesh.mshstats.dropped_frames_no_route++;
622 }
623 
624 /**
625  * mesh_path_flush_pending - free the pending queue of a mesh path
626  *
627  * @mpath: mesh path whose queue has to be freed
628  *
629  * Locking: the function must me called withing a rcu_read_lock region
630  */
631 void mesh_path_flush_pending(struct mesh_path *mpath)
632 {
633 	struct sk_buff *skb;
634 
635 	while ((skb = skb_dequeue(&mpath->frame_queue)) &&
636 			(mpath->flags & MESH_PATH_ACTIVE))
637 		mesh_path_discard_frame(skb, mpath->sdata);
638 }
639 
640 /**
641  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
642  *
643  * @mpath: the mesh path to modify
644  * @next_hop: the next hop to force
645  *
646  * Locking: this function must be called holding mpath->state_lock
647  */
648 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
649 {
650 	spin_lock_bh(&mpath->state_lock);
651 	mesh_path_assign_nexthop(mpath, next_hop);
652 	mpath->sn = 0xffff;
653 	mpath->metric = 0;
654 	mpath->hop_count = 0;
655 	mpath->exp_time = 0;
656 	mpath->flags |= MESH_PATH_FIXED;
657 	mesh_path_activate(mpath);
658 	spin_unlock_bh(&mpath->state_lock);
659 	mesh_path_tx_pending(mpath);
660 }
661 
662 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
663 {
664 	struct mesh_path *mpath;
665 	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
666 	mpath = node->mpath;
667 	hlist_del_rcu(p);
668 	if (free_leafs)
669 		kfree(mpath);
670 	kfree(node);
671 }
672 
673 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
674 {
675 	struct mesh_path *mpath;
676 	struct mpath_node *node, *new_node;
677 	u32 hash_idx;
678 
679 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
680 	if (new_node == NULL)
681 		return -ENOMEM;
682 
683 	node = hlist_entry(p, struct mpath_node, list);
684 	mpath = node->mpath;
685 	new_node->mpath = mpath;
686 	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
687 	hlist_add_head(&new_node->list,
688 			&newtbl->hash_buckets[hash_idx]);
689 	return 0;
690 }
691 
692 int mesh_pathtbl_init(void)
693 {
694 	mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
695 	if (!mesh_paths)
696 		return -ENOMEM;
697 	mesh_paths->free_node = &mesh_path_node_free;
698 	mesh_paths->copy_node = &mesh_path_node_copy;
699 	mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
700 
701 	mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
702 	if (!mpp_paths) {
703 		mesh_table_free(mesh_paths, true);
704 		return -ENOMEM;
705 	}
706 	mpp_paths->free_node = &mesh_path_node_free;
707 	mpp_paths->copy_node = &mesh_path_node_copy;
708 	mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
709 
710 	return 0;
711 }
712 
713 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
714 {
715 	struct mesh_path *mpath;
716 	struct mpath_node *node;
717 	struct hlist_node *p;
718 	int i;
719 
720 	read_lock(&pathtbl_resize_lock);
721 	for_each_mesh_entry(mesh_paths, p, node, i) {
722 		if (node->mpath->sdata != sdata)
723 			continue;
724 		mpath = node->mpath;
725 		spin_lock_bh(&mpath->state_lock);
726 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
727 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
728 			time_after(jiffies,
729 			 mpath->exp_time + MESH_PATH_EXPIRE)) {
730 			spin_unlock_bh(&mpath->state_lock);
731 			mesh_path_del(mpath->dst, mpath->sdata);
732 		} else
733 			spin_unlock_bh(&mpath->state_lock);
734 	}
735 	read_unlock(&pathtbl_resize_lock);
736 }
737 
738 void mesh_pathtbl_unregister(void)
739 {
740 	mesh_table_free(mesh_paths, true);
741 	mesh_table_free(mpp_paths, true);
742 }
743